summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c2
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c58
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h32
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c42
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c22
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c73
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c132
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c43
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h32
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c37
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c16
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c48
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h23
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c452
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c160
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h43
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c163
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h173
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c132
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h43
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c10
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c80
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c31
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c28
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c169
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c58
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c4
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h62
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c386
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c42
-rw-r--r--drivers/net/ethernet/cortina/gemini.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dlink/Kconfig20
-rw-r--r--drivers/net/ethernet/dlink/Makefile1
-rw-r--r--drivers/net/ethernet/dlink/sundance.c1985
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c2
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c32
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c48
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig40
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c271
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h30
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h155
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c756
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c70
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h53
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c314
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h21
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c336
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c445
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c11
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c49
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c36
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c21
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c65
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h1
-rw-r--r--drivers/net/ethernet/google/Kconfig1
-rw-r--r--drivers/net/ethernet/google/gve/Makefile3
-rw-r--r--drivers/net/ethernet/google/gve/gve.h36
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c311
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c66
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c314
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c1
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig18
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h131
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c271
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h59
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c127
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c253
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c222
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h143
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c409
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c72
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c66
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c54
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c44
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c90
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c49
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c45
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h23
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c161
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c157
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c302
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c187
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1487
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h143
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c125
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c428
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h3
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c316
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c1
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c42
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c13
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c41
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c31
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h75
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h38
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c132
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c50
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c468
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c62
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h90
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c303
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c864
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h54
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c3
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c141
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c1072
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c387
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h)36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c)35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c66
-rw-r--r--drivers/net/ethernet/meta/Kconfig1
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h26
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c148
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h122
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c68
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c145
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c193
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c22
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c92
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h18
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c30
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c141
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_time.c303
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c168
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan969x/Kconfig5
-rw-r--r--drivers/net/ethernet/microchip/lan969x/Makefile13
-rw-r--r--drivers/net/ethernet/microchip/lan969x/lan969x.c353
-rw-r--r--drivers/net/ethernet/microchip/lan969x/lan969x.h65
-rw-r--r--drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c191
-rw-r--r--drivers/net/ethernet/microchip/lan969x/lan969x_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/lan969x/lan969x_vcap_ag_api.c3843
-rw-r--r--drivers/net/ethernet/microchip/lan969x/lan969x_vcap_impl.c85
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c128
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c34
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c307
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h208
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h4603
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c39
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c24
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c122
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h23
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c49
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c59
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.h247
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c33
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c48
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c47
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c43
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c105
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c66
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c54
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c45
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c34
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c22
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c30
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c436
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c36
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h2
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c10
-rw-r--r--drivers/net/ethernet/renesas/ravb.h6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c103
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c117
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h7
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c16
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c49
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c34
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c20
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h7
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/mae.c11
-rw-r--r--drivers/net/ethernet/sfc/mae.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c76
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h10
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h49
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/ptp.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c46
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c14
-rw-r--r--drivers/net/ethernet/sfc/siena/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c14
-rw-r--r--drivers/net/ethernet/sfc/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c33
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h4
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c2
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c273
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c150
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c413
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c198
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h9
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c47
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h1
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c24
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c188
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h9
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c23
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
642 files changed, 25270 insertions, 11980 deletions
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 2874680ef24d..e1695d0fbd8b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -1009,7 +1009,7 @@ static struct platform_driver axdrv = {
.name = "ax88796",
},
.probe = ax_probe,
- .remove_new = ax_remove,
+ .remove = ax_remove,
.suspend = ax_suspend,
.resume = ax_resume,
};
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 5a0fa995e643..94ff8364cdf0 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -457,7 +457,7 @@ static struct platform_driver mcf8390_drv = {
.name = "mcf8390",
},
.probe = mcf8390_probe,
- .remove_new = mcf8390_remove,
+ .remove = mcf8390_remove,
};
module_platform_driver(mcf8390_drv);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 350683a09d2e..961019c32842 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -894,7 +894,7 @@ static int ne_drv_resume(struct platform_device *pdev)
#endif
static struct platform_driver ne_driver = {
- .remove_new = ne_drv_remove,
+ .remove = ne_drv_remove,
.suspend = ne_drv_suspend,
.resume = ne_drv_resume,
.driver = {
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index e03193da5874..115f48b3342c 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1607,7 +1607,7 @@ static struct platform_driver owl_emac_driver = {
.pm = &owl_emac_pm_ops,
},
.probe = owl_emac_probe,
- .remove_new = owl_emac_remove,
+ .remove = owl_emac_remove,
};
module_platform_driver(owl_emac_driver);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index adf6f67c5fcb..a593adc16c78 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1565,7 +1565,7 @@ static struct platform_driver greth_of_driver = {
.of_match_table = greth_of_match,
},
.probe = greth_of_probe,
- .remove_new = greth_of_remove,
+ .remove = greth_of_remove,
};
module_platform_driver(greth_of_driver);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index d761c08fe5c1..2f516b950f4e 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -1142,7 +1142,7 @@ static struct platform_driver emac_driver = {
.of_match_table = emac_of_match,
},
.probe = emac_probe,
- .remove_new = emac_remove,
+ .remove = emac_remove,
.suspend = emac_suspend,
.resume = emac_resume,
};
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 3c112c18ae6a..3f6204de9e6b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1519,7 +1519,7 @@ MODULE_DEVICE_TABLE(of, altera_tse_ids);
static struct platform_driver altera_tse_driver = {
.probe = altera_tse_probe,
- .remove_new = altera_tse_remove,
+ .remove = altera_tse_remove,
.suspend = NULL,
.resume = NULL,
.driver = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index d958cda9e58b..66445617fbfb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -763,25 +763,16 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (comp_ctx->status == ENA_CMD_COMPLETED) {
netdev_err(admin_queue->ena_dev->net_device,
- "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
- /* Check if fallback to polling is enabled */
- if (admin_queue->auto_polling)
- admin_queue->polling = true;
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d)\n",
+ comp_ctx->cmd_opcode);
} else {
netdev_err(admin_queue->ena_dev->net_device,
"The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
}
- /* Check if shifted to polling mode.
- * This will happen if there is a completion without an interrupt
- * and autopolling mode is enabled. Continuing normal execution in such case
- */
- if (!admin_queue->polling) {
- admin_queue->running_state = false;
- ret = -ETIME;
- goto err;
- }
+ admin_queue->running_state = false;
+ ret = -ETIME;
+ goto err;
}
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
@@ -1650,12 +1641,6 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
- bool polling)
-{
- ena_dev->admin_queue.auto_polling = polling;
-}
-
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -2198,21 +2183,6 @@ int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
return ret;
}
-int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_basic_stats *stats)
-{
- struct ena_com_stats_ctx ctx;
- int ret;
-
- memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
- if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.u.basic_stats,
- sizeof(ctx.get_resp.u.basic_stats));
-
- return ret;
-}
-
int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
{
struct ena_admin_aq_get_stats_cmd *get_cmd;
@@ -2289,24 +2259,6 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
return ret;
}
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload)
-{
- int ret;
- struct ena_admin_get_feat_resp resp;
-
- ret = ena_com_get_feature(ena_dev, &resp,
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
- if (unlikely(ret)) {
- netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
- return ret;
- }
-
- memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
-
- return 0;
-}
-
int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index a372c5e768a7..9414e93d107b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -224,9 +224,6 @@ struct ena_com_admin_queue {
/* Indicate if the admin queue should poll for completion */
bool polling;
- /* Define if fallback to polling mode should occur */
- bool auto_polling;
-
u16 curr_cmd_id;
/* Indicate that the ena was initialized and can
@@ -493,17 +490,6 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
*/
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
-/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
- * @ena_dev: ENA communication layer struct
- * @polling: Enable/Disable polling mode
- *
- * Set the autopolling mode.
- * If autopolling is on:
- * In case of missing interrupt when data is available switch to polling.
- */
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
- bool polling);
-
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
@@ -591,15 +577,6 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx);
-/* ena_com_get_dev_basic_stats - Get device basic statistics
- * @ena_dev: ENA communication layer struct
- * @stats: stats return value
- *
- * @return: 0 on Success and negative value otherwise.
- */
-int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_basic_stats *stats);
-
/* ena_com_get_eni_stats - Get extended network interface statistics
* @ena_dev: ENA communication layer struct
* @stats: stats return value
@@ -635,15 +612,6 @@ int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32
*/
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
-/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
- * @ena_dev: ENA communication layer struct
- * @offlad: offload return value
- *
- * @return: 0 on Success and negative value otherwise.
- */
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload);
-
/* ena_com_rss_init - Init RSS
* @ena_dev: ENA communication layer struct
* @log_size: indirection log size
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 60fb35ec4b15..a3c934c3de71 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -1129,22 +1129,18 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
return;
}
- strings_buf = devm_kcalloc(&adapter->pdev->dev,
- ETH_GSTRING_LEN, strings_num,
- GFP_ATOMIC);
+ strings_buf = kcalloc(strings_num, ETH_GSTRING_LEN, GFP_ATOMIC);
if (!strings_buf) {
netif_err(adapter, drv, netdev,
"Failed to allocate strings_buf\n");
return;
}
- data_buf = devm_kcalloc(&adapter->pdev->dev,
- strings_num, sizeof(u64),
- GFP_ATOMIC);
+ data_buf = kcalloc(strings_num, sizeof(u64), GFP_ATOMIC);
if (!data_buf) {
netif_err(adapter, drv, netdev,
"Failed to allocate data buf\n");
- devm_kfree(&adapter->pdev->dev, strings_buf);
+ kfree(strings_buf);
return;
}
@@ -1166,8 +1162,8 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
strings_buf + i * ETH_GSTRING_LEN,
data_buf[i]);
- devm_kfree(&adapter->pdev->dev, strings_buf);
- devm_kfree(&adapter->pdev->dev, data_buf);
+ kfree(strings_buf);
+ kfree(data_buf);
}
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c5b50cfa935a..63c8a2328142 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1383,7 +1383,7 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
rx_ring->rx_stats.bytes,
&dim_sample);
- net_dim(&ena_napi->dim, dim_sample);
+ net_dim(&ena_napi->dim, &dim_sample);
rx_ring->per_napi_packets = 0;
}
@@ -1677,9 +1677,9 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
static int ena_request_io_irq(struct ena_adapter *adapter)
{
u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
+ int rc = 0, i, k, irq_idx;
unsigned long flags = 0;
struct ena_irq *irq;
- int rc = 0, i, k;
if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
netif_err(adapter, ifup, adapter->netdev,
@@ -1705,6 +1705,16 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
}
+ /* Now that IO IRQs have been successfully allocated map them to the
+ * corresponding IO NAPI instance. Note that the mgmnt IRQ does not
+ * have a NAPI, so care must be taken to correctly map IRQs to NAPIs.
+ */
+ for (i = 0; i < io_queue_count; i++) {
+ irq_idx = ENA_IO_IRQ_IDX(i);
+ irq = &adapter->irq_tbl[irq_idx];
+ netif_napi_set_irq(&adapter->ena_napi[i].napi, irq->vector);
+ }
+
return rc;
err:
@@ -1811,20 +1821,40 @@ static void ena_napi_disable_in_range(struct ena_adapter *adapter,
int first_index,
int count)
{
+ struct napi_struct *napi;
int i;
- for (i = first_index; i < first_index + count; i++)
- napi_disable(&adapter->ena_napi[i].napi);
+ for (i = first_index; i < first_index + count; i++) {
+ napi = &adapter->ena_napi[i].napi;
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* This API is supported for non-XDP queues only */
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+ napi_disable(napi);
+ }
}
static void ena_napi_enable_in_range(struct ena_adapter *adapter,
int first_index,
int count)
{
+ struct napi_struct *napi;
int i;
- for (i = first_index; i < first_index + count; i++)
- napi_enable(&adapter->ena_napi[i].napi);
+ for (i = first_index; i < first_index + count; i++) {
+ napi = &adapter->ena_napi[i].napi;
+ napi_enable(napi);
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* This API is supported for non-XDP queues only */
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_RX, napi);
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_TX, napi);
+ }
+ }
}
/* Configure the Rx forwarding */
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 305232f5476d..e4ee4c28800c 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -550,7 +550,6 @@ typedef enum {
/* Driver definitions */
-#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_DEVICE_ID_AMD8111E_7462 0x7462
#define MAX_UNITS 8 /* Maximum number of devices possible */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 85c978149bf6..0671a066913b 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1363,7 +1363,7 @@ static void au1000_remove(struct platform_device *pdev)
static struct platform_driver au1000_eth_driver = {
.probe = au1000_probe,
- .remove_new = au1000_remove,
+ .remove = au1000_remove,
.driver = {
.name = "au1000-eth",
},
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index c78706d21a6a..0f98b92408ed 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1514,7 +1514,7 @@ static struct platform_driver sunlance_sbus_driver = {
.of_match_table = sunlance_sbus_match,
},
.probe = sunlance_sbus_probe,
- .remove_new = sunlance_sbus_remove,
+ .remove = sunlance_sbus_remove,
};
module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 5fc94c2f638e..4431ab1c18b3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -195,23 +195,19 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < XGBE_STATS_COUNT; i++) {
- memcpy(data, xgbe_gstring_stats[i].stat_string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < XGBE_STATS_COUNT; i++)
+ ethtool_puts(&data, xgbe_gstring_stats[i].stat_string);
+
for (i = 0; i < pdata->tx_ring_count; i++) {
- sprintf(data, "txq_%u_packets", i);
- data += ETH_GSTRING_LEN;
- sprintf(data, "txq_%u_bytes", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "txq_%u_packets", i);
+ ethtool_sprintf(&data, "txq_%u_bytes", i);
}
+
for (i = 0; i < pdata->rx_ring_count; i++) {
- sprintf(data, "rxq_%u_packets", i);
- data += ETH_GSTRING_LEN;
- sprintf(data, "rxq_%u_bytes", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "rxq_%u_packets", i);
+ ethtool_sprintf(&data, "rxq_%u_bytes", i);
}
+
break;
}
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 7912b3b45148..4365bd62942c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -565,7 +565,7 @@ static struct platform_driver xgbe_driver = {
.pm = &xgbe_platform_pm_ops,
},
.probe = xgbe_platform_probe,
- .remove_new = xgbe_platform_remove,
+ .remove = xgbe_platform_remove,
};
int xgbe_platform_init(void)
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 9e90c2381491..2a91c84aebdb 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -734,7 +734,7 @@ static struct platform_driver xge_driver = {
.acpi_match_table = ACPI_PTR(xge_acpi_match),
},
.probe = xge_probe,
- .remove_new = xge_remove,
+ .remove = xge_remove,
.shutdown = xge_shutdown,
};
module_platform_driver(xge_driver);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4af9d89d5f88..3b2951030a38 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -2159,7 +2159,7 @@ static struct platform_driver xgene_enet_driver = {
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
- .remove_new = xgene_enet_remove,
+ .remove = xgene_enet_remove,
.shutdown = xgene_enet_shutdown,
};
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 766ab78256fe..8989506e6248 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -759,7 +759,7 @@ static void mac_mace_device_remove(struct platform_device *pdev)
static struct platform_driver mac_mace_driver = {
.probe = mace_probe,
- .remove_new = mac_mace_device_remove,
+ .remove = mac_mace_device_remove,
.driver = {
.name = mac_mace_string,
},
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 440ff4616fec..6fef47ba0a59 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -15,6 +15,7 @@
#include "aq_macsec.h"
#include "aq_main.h"
+#include <linux/ethtool.h>
#include <linux/linkmode.h>
#include <linux/ptp_clock_kernel.h>
@@ -977,6 +978,76 @@ static int aq_ethtool_set_phy_tunable(struct net_device *ndev,
return err;
}
+static int aq_ethtool_get_module_info(struct net_device *ndev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u8 compliance_val, dom_type;
+ int err;
+
+ /* Module EEPROM is only supported for controllers with external PHY */
+ if (aq_nic->aq_nic_cfg.aq_hw_caps->media_type != AQ_HW_MEDIA_TYPE_FIBRE ||
+ !aq_nic->aq_hw_ops->hw_read_module_eeprom)
+ return -EOPNOTSUPP;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_ID_ADDR, SFF_8472_COMP_ADDR, 1, &compliance_val);
+ if (err)
+ return err;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_ID_ADDR, SFF_8472_DOM_TYPE_ADDR, 1, &dom_type);
+ if (err)
+ return err;
+
+ if (dom_type & SFF_8472_ADDRESS_CHANGE_REQ_MASK || compliance_val == 0x00) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ return 0;
+}
+
+static int aq_ethtool_get_module_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, unsigned char *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ unsigned int first, last, len;
+ int err;
+
+ if (!aq_nic->aq_hw_ops->hw_read_module_eeprom)
+ return -EOPNOTSUPP;
+
+ first = ee->offset;
+ last = ee->offset + ee->len;
+
+ if (first < ETH_MODULE_SFF_8079_LEN) {
+ len = min(last, ETH_MODULE_SFF_8079_LEN);
+ len -= first;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_ID_ADDR, first, len, data);
+ if (err)
+ return err;
+
+ first += len;
+ data += len;
+ }
+ if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) {
+ len = min(last, ETH_MODULE_SFF_8472_LEN);
+ len -= first;
+ first -= ETH_MODULE_SFF_8079_LEN;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_DIAGNOSTICS_ADDR, first, len, data);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1014,4 +1085,6 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_ts_info = aq_ethtool_get_ts_info,
.get_phy_tunable = aq_ethtool_get_phy_tunable,
.set_phy_tunable = aq_ethtool_set_phy_tunable,
+ .get_module_info = aq_ethtool_get_module_info,
+ .get_module_eeprom = aq_ethtool_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
index 6d5be5ebeb13..f26fe1a75539 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
@@ -14,4 +14,12 @@
extern const struct ethtool_ops aq_ethtool_ops;
#define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK)
+#define SFF_8472_ID_ADDR 0x50
+#define SFF_8472_DIAGNOSTICS_ADDR 0x51
+
+#define SFF_8472_COMP_ADDR 0x5e
+#define SFF_8472_DOM_TYPE_ADDR 0x5c
+
+#define SFF_8472_ADDRESS_CHANGE_REQ_MASK 0x4
+
#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index f010bda61c96..42c0efc1b455 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -340,6 +340,9 @@ struct aq_hw_ops {
int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
int (*hw_get_mac_temp)(struct aq_hw_s *self, u32 *temp);
+
+ int (*hw_read_module_eeprom)(struct aq_hw_s *self, u8 dev_addr,
+ u8 reg_start_addr, int len, u8 *data);
};
struct aq_fw_ops {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 43c71f6b314f..08630ee94251 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -162,8 +162,8 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
self->msix_entry_mask |= (1 << i);
if (pdev->msix_enabled && affinity_mask)
- irq_set_affinity_hint(pci_irq_vector(pdev, i),
- affinity_mask);
+ irq_update_affinity_hint(pci_irq_vector(pdev, i),
+ affinity_mask);
}
return err;
@@ -187,7 +187,7 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self)
continue;
if (pdev->msix_enabled)
- irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+ irq_update_affinity_hint(pci_irq_vector(pdev, i), NULL);
free_irq(pci_irq_vector(pdev, i), irq_data);
self->msix_entry_mask &= ~(1U << i);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 56c46266bb0a..493432d036b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1654,6 +1654,137 @@ static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp)
return 0;
}
+#define START_TRANSMIT 0x5001
+#define START_READ_TRANSMIT 0x5101
+#define STOP_TRANSMIT 0x3001
+#define REPEAT_TRANSMIT 0x1001
+#define REPEAT_NACK_TRANSMIT 0x1011
+
+static int hw_atl_b0_smb0_wait_result(struct aq_hw_s *self, bool expect_ack)
+{
+ int err;
+ u32 val;
+
+ err = readx_poll_timeout(hw_atl_smb0_byte_transfer_complete_get,
+ self, val, val == 1, 100U, 10000U);
+ if (err)
+ return err;
+ if (hw_atl_smb0_receive_acknowledged_get(self) != expect_ack)
+ return -EIO;
+ return 0;
+}
+
+/* Starts an I2C/SMBUS write to a given address. addr is in 7-bit format,
+ * the read/write bit is not part of it.
+ */
+static int hw_atl_b0_smb0_start_write(struct aq_hw_s *self, u32 addr)
+{
+ hw_atl_smb0_tx_data_set(self, (addr << 1) | 0);
+ hw_atl_smb0_provisioning2_set(self, START_TRANSMIT);
+ return hw_atl_b0_smb0_wait_result(self, 0);
+}
+
+/* Writes a single byte as part of an ongoing write started by start_write. */
+static int hw_atl_b0_smb0_write_byte(struct aq_hw_s *self, u32 data)
+{
+ hw_atl_smb0_tx_data_set(self, data);
+ hw_atl_smb0_provisioning2_set(self, REPEAT_TRANSMIT);
+ return hw_atl_b0_smb0_wait_result(self, 0);
+}
+
+/* Starts an I2C/SMBUS read to a given address. addr is in 7-bit format,
+ * the read/write bit is not part of it.
+ */
+static int hw_atl_b0_smb0_start_read(struct aq_hw_s *self, u32 addr)
+{
+ int err;
+
+ hw_atl_smb0_tx_data_set(self, (addr << 1) | 1);
+ hw_atl_smb0_provisioning2_set(self, START_READ_TRANSMIT);
+ err = hw_atl_b0_smb0_wait_result(self, 0);
+ if (err)
+ return err;
+ if (hw_atl_smb0_repeated_start_detect_get(self) == 0)
+ return -EIO;
+ return 0;
+}
+
+/* Reads a single byte as part of an ongoing read started by start_read. */
+static int hw_atl_b0_smb0_read_byte(struct aq_hw_s *self)
+{
+ int err;
+
+ hw_atl_smb0_provisioning2_set(self, REPEAT_TRANSMIT);
+ err = hw_atl_b0_smb0_wait_result(self, 0);
+ if (err)
+ return err;
+ return hw_atl_smb0_rx_data_get(self);
+}
+
+/* Reads the last byte of an ongoing read. */
+static int hw_atl_b0_smb0_read_byte_nack(struct aq_hw_s *self)
+{
+ int err;
+
+ hw_atl_smb0_provisioning2_set(self, REPEAT_NACK_TRANSMIT);
+ err = hw_atl_b0_smb0_wait_result(self, 1);
+ if (err)
+ return err;
+ return hw_atl_smb0_rx_data_get(self);
+}
+
+/* Sends a stop condition and ends a transfer. */
+static void hw_atl_b0_smb0_stop(struct aq_hw_s *self)
+{
+ hw_atl_smb0_provisioning2_set(self, STOP_TRANSMIT);
+}
+
+static int hw_atl_b0_read_module_eeprom(struct aq_hw_s *self, u8 dev_addr,
+ u8 reg_start_addr, int len, u8 *data)
+{
+ int i, b;
+ int err;
+ u32 val;
+
+ /* Wait for SMBUS0 to be idle */
+ err = readx_poll_timeout(hw_atl_smb0_bus_busy_get, self,
+ val, val == 0, 100U, 10000U);
+ if (err)
+ return err;
+
+ err = hw_atl_b0_smb0_start_write(self, dev_addr);
+ if (err)
+ goto out;
+
+ err = hw_atl_b0_smb0_write_byte(self, reg_start_addr);
+ if (err)
+ goto out;
+
+ err = hw_atl_b0_smb0_start_read(self, dev_addr);
+ if (err)
+ goto out;
+
+ for (i = 0; i < len - 1; i++) {
+ b = hw_atl_b0_smb0_read_byte(self);
+ if (b < 0) {
+ err = b;
+ goto out;
+ }
+ data[i] = (u8)b;
+ }
+
+ b = hw_atl_b0_smb0_read_byte_nack(self);
+ if (b < 0) {
+ err = b;
+ goto out;
+ }
+ data[i] = (u8)b;
+
+out:
+ hw_atl_b0_smb0_stop(self);
+ return err;
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_soft_reset = hw_atl_utils_soft_reset,
.hw_prepare = hw_atl_utils_initfw,
@@ -1712,4 +1843,5 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_fc = hw_atl_b0_set_fc,
.hw_get_mac_temp = hw_atl_b0_get_mac_temp,
+ .hw_read_module_eeprom = hw_atl_b0_read_module_eeprom,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 7b67bdd8a258..d07af1271d59 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -57,6 +57,49 @@ u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw)
HW_ATL_TS_DATA_OUT_SHIFT);
}
+u32 hw_atl_smb0_bus_busy_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_BUS_BUSY_ADR,
+ HW_ATL_SMB0_BUS_BUSY_MSK,
+ HW_ATL_SMB0_BUS_BUSY_SHIFT);
+}
+
+u32 hw_atl_smb0_byte_transfer_complete_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_ADR,
+ HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_MSK,
+ HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_SHIFT);
+}
+
+u32 hw_atl_smb0_receive_acknowledged_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_RX_ACKNOWLEDGED_ADR,
+ HW_ATL_SMB0_RX_ACKNOWLEDGED_MSK,
+ HW_ATL_SMB0_RX_ACKNOWLEDGED_SHIFT);
+}
+
+u32 hw_atl_smb0_repeated_start_detect_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_REPEATED_START_DETECT_ADR,
+ HW_ATL_SMB0_REPEATED_START_DETECT_MSK,
+ HW_ATL_SMB0_REPEATED_START_DETECT_SHIFT);
+}
+
+u32 hw_atl_smb0_rx_data_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_SMB0_RECEIVED_DATA_ADR);
+}
+
+void hw_atl_smb0_tx_data_set(struct aq_hw_s *aq_hw, u32 data)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL_SMB0_TRANSMITTED_DATA_ADR, data);
+}
+
+void hw_atl_smb0_provisioning2_set(struct aq_hw_s *aq_hw, u32 data)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL_SMB0_PROVISIONING2_ADR, data);
+}
+
/* global */
void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
u32 semaphore)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 58f5ee0a6214..5fd506acacb5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -34,6 +34,27 @@ u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw);
/* get temperature sense data */
u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw);
+/* SMBUS0 bus busy */
+u32 hw_atl_smb0_bus_busy_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 byte transfer complete */
+u32 hw_atl_smb0_byte_transfer_complete_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 receive acknowledged */
+u32 hw_atl_smb0_receive_acknowledged_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 set transmitted data (only leftmost byte of data valid) */
+void hw_atl_smb0_tx_data_set(struct aq_hw_s *aq_hw, u32 data);
+
+/* SMBUS0 provisioning2 command register */
+void hw_atl_smb0_provisioning2_set(struct aq_hw_s *aq_hw, u32 data);
+
+/* SMBUS0 repeated start detect */
+u32 hw_atl_smb0_repeated_start_detect_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 received data register */
+u32 hw_atl_smb0_rx_data_get(struct aq_hw_s *aq_hw);
+
/* global */
/* set global microprocessor semaphore */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 4a6467031b9e..fce30d90b6cb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -42,6 +42,38 @@
#define HW_ATL_TS_DATA_OUT_SHIFT 0
#define HW_ATL_TS_DATA_OUT_WIDTH 12
+/* SMBUS0 Received Data register */
+#define HW_ATL_SMB0_RECEIVED_DATA_ADR 0x00000748
+/* SMBUS0 Transmitted Data register */
+#define HW_ATL_SMB0_TRANSMITTED_DATA_ADR 0x00000608
+
+/* SMBUS0 Global Provisioning 2 register */
+#define HW_ATL_SMB0_PROVISIONING2_ADR 0x00000604
+
+/* SMBUS0 Bus Busy Bitfield Definitions */
+#define HW_ATL_SMB0_BUS_BUSY_ADR 0x00000744
+#define HW_ATL_SMB0_BUS_BUSY_MSK 0x00000080
+#define HW_ATL_SMB0_BUS_BUSY_SHIFT 7
+#define HW_ATL_SMB0_BUS_BUSY_WIDTH 1
+
+/* SMBUS0 Byte Transfer Complete Bitfield Definitions */
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_ADR 0x00000744
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_MSK 0x00000002
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_SHIFT 1
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_WIDTH 1
+
+/* SMBUS0 Receive Acknowledge Bitfield Definitions */
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_ADR 0x00000744
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_MSK 0x00000100
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_SHIFT 8
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_WIDTH 1
+
+/* SMBUS0 Repeated Start Detect Bitfield Definitions */
+#define HW_ATL_SMB0_REPEATED_START_DETECT_ADR 0x00000744
+#define HW_ATL_SMB0_REPEATED_START_DETECT_MSK 0x00000004
+#define HW_ATL_SMB0_REPEATED_START_DETECT_SHIFT 2
+#define HW_ATL_SMB0_REPEATED_START_DETECT_WIDTH 1
+
/* global microprocessor semaphore definitions
* base address: 0x000003a0
* parameter: semaphore {s} | stride size 0x4 | range [0, 15]
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 493d6356c8ca..780e70ea1c22 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -264,7 +264,7 @@ static void emac_rockchip_remove(struct platform_device *pdev)
static struct platform_driver emac_rockchip_driver = {
.probe = emac_rockchip_probe,
- .remove_new = emac_rockchip_remove,
+ .remove = emac_rockchip_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = emac_rockchip_dt_ids,
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 9586b6894f7e..3d4c3d8698e2 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1598,8 +1598,8 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
int ring_mask, ring_size, done = 0;
unsigned int pktlen_mask, offset;
struct ag71xx_ring *ring;
- struct list_head rx_list;
struct sk_buff *skb;
+ LIST_HEAD(rx_list);
ring = &ag->rx_ring;
pktlen_mask = ag->dcfg->desc_pktlen_mask;
@@ -1610,8 +1610,6 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
limit, ring->curr, ring->dirty);
- INIT_LIST_HEAD(&rx_list);
-
while (done < limit) {
unsigned int i = ring->curr & ring_mask;
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
@@ -1648,6 +1646,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
skb->dev = ndev;
skb->ip_summed = CHECKSUM_NONE;
+ skb->protocol = eth_type_trans(skb, ndev);
list_add_tail(&skb->list, &rx_list);
next:
@@ -1659,8 +1658,6 @@ next:
ag71xx_ring_rx_refill(ag);
- list_for_each_entry(skb, &rx_list, list)
- skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb_list(&rx_list);
netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
@@ -1822,10 +1819,9 @@ static int ag71xx_probe(struct platform_device *pdev)
}
clk_eth = devm_clk_get_enabled(&pdev->dev, "eth");
- if (IS_ERR(clk_eth)) {
- netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
- return PTR_ERR(clk_eth);
- }
+ if (IS_ERR(clk_eth))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_eth),
+ "Failed to get eth clk.");
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1836,14 +1832,13 @@ static int ag71xx_probe(struct platform_device *pdev)
memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
- if (IS_ERR(ag->mac_reset)) {
- netif_err(ag, probe, ndev, "missing mac reset\n");
- return PTR_ERR(ag->mac_reset);
- }
+ if (IS_ERR(ag->mac_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ag->mac_reset),
+ "missing mac reset");
- ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ag->mac_base)
- return -ENOMEM;
+ ag->mac_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ag->mac_base))
+ return PTR_ERR(ag->mac_base);
/* ensure that HW is in manual polling mode before interrupts are
* activated. Otherwise ag71xx_interrupt might call napi_schedule
@@ -1917,18 +1912,14 @@ static int ag71xx_probe(struct platform_device *pdev)
if (err)
return err;
- platform_set_drvdata(pdev, ndev);
-
err = ag71xx_phylink_setup(ag);
- if (err) {
- netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to setup phylink");
err = devm_register_netdev(&pdev->dev, ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
- platform_set_drvdata(pdev, NULL);
return err;
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 75ca3ddda1f5..eeec8bf17cf4 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -72,7 +72,6 @@ config BCMGENET
tristate "Broadcom GENET internal MAC support"
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835
- select MII
select PHYLIB
select FIXED_PHY
select BCM7XXX_PHY
@@ -195,7 +194,6 @@ config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
depends on HAS_IOMEM
depends on NET_DSA || !NET_DSA
- select MII
select PHYLIB
select FIXED_PHY
select DIMLIB
@@ -260,7 +258,6 @@ config BCMASP
depends on ARCH_BRCMSTB || COMPILE_TEST
default ARCH_BRCMSTB
depends on OF
- select MII
select PHYLIB
select MDIO_BCM_UNIMAC
help
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 297c2682a9cf..a68fab1b05f0 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -1500,7 +1500,7 @@ static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
static struct platform_driver bcmasp_driver = {
.probe = bcmasp_probe,
- .remove_new = bcmasp_remove,
+ .remove = bcmasp_remove,
.shutdown = bcmasp_shutdown,
.driver = {
.name = "brcm,asp-v2",
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index ca163c8e3729..9da5ae29a105 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -101,14 +101,14 @@ static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
static void bcmasp_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
+ const char *str;
unsigned int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCMASP_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcmasp_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcmasp_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
default:
@@ -496,4 +496,5 @@ const struct ethtool_ops bcmasp_ethtool_ops = {
.get_strings = bcmasp_get_strings,
.get_ethtool_stats = bcmasp_get_ethtool_stats,
.get_sset_count = bcmasp_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 9ea16ef4139d..cfd50efbdbc0 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -365,6 +365,9 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
intf->tx_spb_index = spb_index;
intf->tx_spb_dma_valid = valid;
+
+ skb_tx_timestamp(skb);
+
bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 72df1bb10172..203e8d0dd04b 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -789,7 +789,7 @@ static struct platform_driver bcm4908_enet_driver = {
.of_match_table = bcm4908_enet_of_match,
},
.probe = bcm4908_enet_probe,
- .remove_new = bcm4908_enet_remove,
+ .remove = bcm4908_enet_remove,
};
module_platform_driver(bcm4908_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3c0e3b9828be..65e3a0656a4c 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1339,14 +1339,14 @@ static int bcm_enet_get_sset_count(struct net_device *netdev,
static void bcm_enet_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -1936,7 +1936,7 @@ static void bcm_enet_remove(struct platform_device *pdev)
static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
- .remove_new = bcm_enet_remove,
+ .remove = bcm_enet_remove,
.driver = {
.name = "bcm63xx_enet",
},
@@ -2503,14 +2503,14 @@ static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
static void bcm_enetsw_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enetsw_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enetsw_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -2755,7 +2755,7 @@ static void bcm_enetsw_remove(struct platform_device *pdev)
static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
- .remove_new = bcm_enetsw_remove,
+ .remove = bcm_enetsw_remove,
.driver = {
.name = "bcm63xx_enetsw",
},
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0a68b526e4a8..42672c63f108 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -27,30 +27,6 @@
#include "bcmsysport.h"
-/* I/O accessors register helpers */
-#define BCM_SYSPORT_IO_MACRO(name, offset) \
-static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
-{ \
- u32 reg = readl_relaxed(priv->base + offset + off); \
- return reg; \
-} \
-static inline void name##_writel(struct bcm_sysport_priv *priv, \
- u32 val, u32 off) \
-{ \
- writel_relaxed(val, priv->base + offset + off); \
-} \
-
-BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
-BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
-BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
-BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
-BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
-BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
-BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
-BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
-BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
-BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
-
/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
* same layout, except it has been moved by 4 bytes up, *sigh*
*/
@@ -370,32 +346,22 @@ static void bcm_sysport_get_strings(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
const struct bcm_sysport_stats *s;
- char buf[128];
- int i, j;
+ int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
s = &bcm_sysport_gstrings_stats[i];
if (priv->is_lite &&
!bcm_sysport_lite_stat_valid(s->type))
continue;
- memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_puts(&data, s->stat_string);
}
for (i = 0; i < dev->num_tx_queues; i++) {
- snprintf(buf, sizeof(buf), "txq%d_packets", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
-
- snprintf(buf, sizeof(buf), "txq%d_bytes", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_sprintf(&data, "txq%d_packets", i);
+ ethtool_sprintf(&data, "txq%d_bytes", i);
}
break;
default:
@@ -1053,7 +1019,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
if (priv->dim.use_dim) {
dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
priv->dim.bytes, &dim_sample);
- net_dim(&priv->dim.dim, dim_sample);
+ net_dim(&priv->dim.dim, &dim_sample);
}
return work_done;
@@ -2900,7 +2866,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
- .remove_new = bcm_sysport_remove,
+ .remove = bcm_sysport_remove,
.driver = {
.name = "brcm-systemport",
.of_match_table = bcm_sysport_of_match,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 335cf6631db5..a34296f989f1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -773,4 +773,27 @@ struct bcm_sysport_priv {
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
};
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + (offset) + off); \
+ return reg; \
+} \
+static inline void name##_writel(struct bcm_sysport_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + (offset) + off); \
+} \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 77425c7a32db..ecce23cecbea 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -294,7 +294,7 @@ static struct platform_driver bgmac_enet_driver = {
.pm = BGMAC_PM_OPS
},
.probe = bgmac_probe,
- .remove_new = bgmac_remove,
+ .remove = bgmac_remove,
};
module_platform_driver(bgmac_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 6ffdc4229407..a461ec612e95 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1367,8 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strscpy(data + i * ETH_GSTRING_LEN,
- bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, bgmac_get_strings_stats[i].name);
}
static void bgmac_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index adf7b6b94941..44199855ebfb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -39,34 +39,34 @@ static const struct {
int size;
char string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[] = {
-/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, "[%s]: rx_ucast_packets" },
+ 8, "[%d]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, "[%s]: rx_mcast_packets" },
+ 8, "[%d]: rx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, "[%s]: rx_bcast_packets" },
- { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ 8, "[%d]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
{ Q_STATS_OFFSET32(rx_err_discard_pkt),
- 4, "[%s]: rx_phy_ip_err_discards"},
+ 4, "[%d]: rx_phy_ip_err_discards"},
{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
- 4, "[%s]: rx_skb_alloc_discard" },
- { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
- { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" },
- { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+ 4, "[%d]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
+ { Q_STATS_OFFSET32(driver_xoff), 4, "[%d]: tx_exhaustion_events" },
+ { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%s]: tx_ucast_packets" },
+ 8, "[%d]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
- 8, "[%s]: tx_mcast_packets" },
+ 8, "[%d]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, "[%s]: tx_bcast_packets" },
+ 8, "[%d]: tx_bcast_packets" },
{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
- 8, "[%s]: tpa_aggregations" },
+ 8, "[%d]: tpa_aggregations" },
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
- 8, "[%s]: tpa_aggregated_frames"},
- { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
+ 8, "[%d]: tpa_aggregated_frames"},
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%d]: tpa_bytes"},
{ Q_STATS_OFFSET32(driver_filtered_tx_pkt),
- 4, "[%s]: driver_filtered_tx_pkt" }
+ 4, "[%d]: driver_filtered_tx_pkt" }
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -3184,49 +3184,43 @@ static u32 bnx2x_get_private_flags(struct net_device *dev)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k, start;
- char queue_name[MAX_QUEUE_NAME_LEN+1];
+ const char *str;
+ int i, j, start;
switch (stringset) {
case ETH_SS_STATS:
- k = 0;
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- memset(queue_name, 0, sizeof(queue_name));
- snprintf(queue_name, sizeof(queue_name),
- "%d", i);
- for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
- snprintf(buf + (k + j)*ETH_GSTRING_LEN,
- ETH_GSTRING_LEN,
- bnx2x_q_stats_arr[j].string,
- queue_name);
- k += BNX2X_NUM_Q_STATS;
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+ str = bnx2x_q_stats_arr[j].string;
+ ethtool_sprintf(&buf, str, i);
+ }
}
}
- for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ for (i = 0; i < BNX2X_NUM_STATS; i++) {
if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
- strcpy(buf + (k + j)*ETH_GSTRING_LEN,
- bnx2x_stats_arr[i].string);
- j++;
+ ethtool_puts(&buf, bnx2x_stats_arr[i].string);
}
break;
case ETH_SS_TEST:
+ if (IS_VF(bp))
+ break;
/* First 4 tests cannot be done in MF mode */
if (!IS_MF(bp))
start = 0;
else
start = 4;
- memcpy(buf, bnx2x_tests_str_arr + start,
- ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
+ for (i = start; i < BNX2X_NUM_TESTS_SF; i++)
+ ethtool_puts(&buf, bnx2x_tests_str_arr[i]);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(buf, bnx2x_private_arr,
- ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
+ for (i = 0; i < BNX2X_PRI_FLAG_LEN; i++)
+ ethtool_puts(&buf, bnx2x_private_arr[i]);
break;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 99d025b69079..5f7bdafcf05d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -245,6 +245,21 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
+ ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
+};
+
+const u16 bnxt_bstore_to_trace[] = {
+ [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
+ [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
+ [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
+ [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
+ [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
+ [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
+ [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
+ [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
+ [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
+ [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
+ [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -864,6 +879,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
bnapi->events &= ~BNXT_TX_CMP_EVENT;
}
+static bool bnxt_separate_head_pool(void)
+{
+ return PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+}
+
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
unsigned int *offset,
@@ -886,27 +906,19 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
}
static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
{
- u8 *data;
- struct pci_dev *pdev = bp->pdev;
+ unsigned int offset;
+ struct page *page;
- if (gfp == GFP_ATOMIC)
- data = napi_alloc_frag(bp->rx_buf_size);
- else
- data = netdev_alloc_frag(bp->rx_buf_size);
- if (!data)
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bp->rx_buf_size, gfp);
+ if (!page)
return NULL;
- *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
- if (dma_mapping_error(&pdev->dev, *mapping)) {
- skb_free_frag(data);
- data = NULL;
- }
- return data;
+ *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
+ return page_address(page) + offset;
}
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
@@ -928,7 +940,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
} else {
- u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
+ u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
if (!data)
return -ENOMEM;
@@ -1179,13 +1191,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
}
skb = napi_build_skb(data, bp->rx_buf_size);
- dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ bp->rx_dir);
if (!skb) {
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, true);
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, offset_and_len & 0xffff);
return skb;
@@ -1840,7 +1853,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
u8 *new_data;
dma_addr_t new_mapping;
- new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
+ GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats->rx.rx_oom_discards += 1;
@@ -1852,16 +1866,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->mapping = new_mapping;
skb = napi_build_skb(data, bp->rx_buf_size);
- dma_unmap_single_attrs(&bp->pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir);
if (!skb) {
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, true);
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, len);
}
@@ -2254,11 +2268,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- unsigned long flags;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
memset(skb_hwtstamps(skb), 0,
sizeof(*skb_hwtstamps(skb)));
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
@@ -2465,6 +2476,59 @@ static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
return false;
}
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
+{
+ u32 flags = bp->ctx->ctx_arr[type].flags;
+
+ return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
+ ((flags & BNXT_CTX_MEM_FW_TRACE) ||
+ (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
+}
+
+static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
+{
+ u32 mem_size, pages, rem_bytes, magic_byte_offset;
+ u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
+ struct bnxt_bs_trace_info *bs_trace;
+ int last_pg;
+
+ if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
+ return;
+
+ mem_size = ctxm->max_entries * ctxm->entry_size;
+ rem_bytes = mem_size % BNXT_PAGE_SIZE;
+ pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+
+ last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
+ magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
+
+ rmem = &ctx_pg[0].ring_mem;
+ bs_trace = &bp->bs_trace[trace_type];
+ bs_trace->ctx_type = ctxm->type;
+ bs_trace->trace_type = trace_type;
+ if (pages > MAX_CTX_PAGES) {
+ int last_pg_dir = rmem->nr_pages - 1;
+
+ rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
+ bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
+ } else {
+ bs_trace->magic_byte = rmem->pg_arr[last_pg];
+ }
+ bs_trace->magic_byte += magic_byte_offset;
+ *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
+}
+
+#define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
+
+#define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
+
#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
((data2) & \
ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
@@ -2764,12 +2828,12 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (!ptp)
goto async_event_process_exit;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_update_current_time(bp);
ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
BNXT_PHC_BITS) | ptp->current_time);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_rtc_timecounter_init(ptp, ns);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
}
break;
}
@@ -2781,6 +2845,13 @@ static int bnxt_async_event_process(struct bnxt *bp,
hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
+ u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
+ u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
+
+ bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -3102,7 +3173,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
cpr->rx_packets,
cpr->rx_bytes,
&dim_sample);
- net_dim(&cpr->dim, dim_sample);
+ net_dim(&cpr->dim, &dim_sample);
}
return work_done;
}
@@ -3233,7 +3304,7 @@ poll_done:
cpr_rx->rx_packets,
cpr_rx->rx_bytes,
&dim_sample);
- net_dim(&cpr->dim, dim_sample);
+ net_dim(&cpr->dim, &dim_sample);
}
return work_done;
}
@@ -3311,28 +3382,22 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
- struct pci_dev *pdev = bp->pdev;
int i, max_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
- dma_addr_t mapping = rx_buf->mapping;
void *data = rx_buf->data;
if (!data)
continue;
rx_buf->data = NULL;
- if (BNXT_RX_PAGE_MODE(bp)) {
+ if (BNXT_RX_PAGE_MODE(bp))
page_pool_recycle_direct(rxr->page_pool, data);
- } else {
- dma_unmap_single_attrs(&pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- skb_free_frag(data);
- }
+ else
+ page_pool_free_va(rxr->head_pool, data, true);
}
}
@@ -3359,7 +3424,6 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct pci_dev *pdev = bp->pdev;
struct bnxt_tpa_idx_map *map;
int i;
@@ -3373,13 +3437,8 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
if (!data)
continue;
- dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
tpa_info->data = NULL;
-
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, false);
}
skip_rx_tpa_free:
@@ -3434,6 +3493,35 @@ static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
*(p2 + i + offset) = init_val;
}
+static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ int i, head_page, start_idx, source_offset;
+ size_t len, rem_len, total_len, max_bytes;
+
+ head_page = head / rmem->page_size;
+ source_offset = head % rmem->page_size;
+ total_len = (tail - head) & MAX_CTX_BYTES_MASK;
+ if (!total_len)
+ total_len = MAX_CTX_BYTES;
+ start_idx = head_page % MAX_CTX_PAGES;
+ max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
+ source_offset;
+ total_len = min(total_len, max_bytes);
+ rem_len = total_len;
+
+ for (i = start_idx; rem_len; i++, source_offset = 0) {
+ len = min((size_t)(rmem->page_size - source_offset), rem_len);
+ if (buf)
+ memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
+ len);
+ offset += len;
+ rem_len -= len;
+ }
+ return total_len;
+}
+
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
@@ -3595,7 +3683,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- rxr->page_pool = NULL;
+ if (rxr->page_pool != rxr->head_pool)
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
@@ -3613,6 +3703,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
int numa_node)
{
struct page_pool_params pp = { 0 };
+ struct page_pool *pool;
pp.pool_size = bp->rx_agg_ring_size;
if (BNXT_RX_PAGE_MODE(bp))
@@ -3625,14 +3716,25 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.max_len = PAGE_SIZE;
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- rxr->page_pool = page_pool_create(&pp);
- if (IS_ERR(rxr->page_pool)) {
- int err = PTR_ERR(rxr->page_pool);
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
- rxr->page_pool = NULL;
- return err;
+ if (bnxt_separate_head_pool()) {
+ pp.pool_size = max(bp->rx_ring_size, 1024);
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
}
+ rxr->head_pool = pool;
+
return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
}
static int bnxt_alloc_rx_rings(struct bnxt *bp)
@@ -4183,7 +4285,8 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
u8 *data;
for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
+ data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -8226,6 +8329,9 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
return 0;
}
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force);
+
#define BNXT_CTX_INIT_VALID(flags) \
(!!((flags) & \
FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
@@ -8234,7 +8340,7 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_v2_output *resp;
struct hwrm_func_backing_store_qcaps_v2_input *req;
- struct bnxt_ctx_mem_info *ctx;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
u16 type;
int rc;
@@ -8242,16 +8348,20 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
if (rc)
return rc;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- bp->ctx = ctx;
+ if (!ctx) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bp->ctx = ctx;
+ }
resp = hwrm_req_hold(bp, req);
for (type = 0; type < BNXT_CTX_V2_MAX; ) {
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
u8 init_val, init_off, i;
+ u32 max_entries;
+ u16 entry_size;
__le32 *p;
u32 flags;
@@ -8261,15 +8371,26 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
goto ctx_done;
flags = le32_to_cpu(resp->flags);
type = le16_to_cpu(resp->next_valid_type);
- if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
continue;
-
+ }
+ entry_size = le16_to_cpu(resp->entry_size);
+ max_entries = le32_to_cpu(resp->max_num_entries);
+ if (ctxm->mem_valid) {
+ if (!(flags & BNXT_CTX_MEM_PERSIST) ||
+ ctxm->entry_size != entry_size ||
+ ctxm->max_entries != max_entries)
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
+ else
+ continue;
+ }
ctxm->type = le16_to_cpu(resp->type);
- ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->entry_size = entry_size;
ctxm->flags = flags;
ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
ctxm->entry_multiple = resp->entry_multiple;
- ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->max_entries = max_entries;
ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
init_val = resp->ctx_init_value;
init_off = resp->ctx_init_offset;
@@ -8294,7 +8415,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
struct hwrm_func_backing_store_qcaps_input *req;
int rc;
- if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
+ if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
+ (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
@@ -8635,6 +8757,36 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
return rc;
}
+static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ size_t nr_pages = ctx_pg->nr_pages;
+ int page_size = rmem->page_size;
+ size_t len = 0, total_len = 0;
+ u16 depth = rmem->depth;
+
+ tail %= nr_pages * page_size;
+ do {
+ if (depth > 1) {
+ int i = head / (page_size * MAX_CTX_PAGES);
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ rmem = &pg_tbl->ring_mem;
+ }
+ len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
+ head += len;
+ offset += len;
+ total_len += len;
+ if (head >= nr_pages * page_size)
+ head = 0;
+ } while (head != tail);
+ return total_len;
+}
+
static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg)
{
@@ -8685,6 +8837,8 @@ static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
ctxm->init_value ? ctxm : NULL);
}
+ if (!rc)
+ ctxm->mem_valid = 1;
return rc;
}
@@ -8711,6 +8865,16 @@ static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
hwrm_req_hold(bp, req);
req->type = cpu_to_le16(ctxm->type);
req->entry_size = cpu_to_le16(ctxm->entry_size);
+ if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
+ bnxt_bs_trace_avail(bp, ctxm->type)) {
+ struct bnxt_bs_trace_info *bs_trace;
+ u32 enables;
+
+ enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
+ req->enables = cpu_to_le32(enables);
+ bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
+ req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
+ }
req->subtype_valid_cnt = ctxm->split_entry_cnt;
for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
p[i] = cpu_to_le32(ctxm->split[i]);
@@ -8740,21 +8904,42 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_mem_type *ctxm;
- u16 last_type;
+ u16 last_type = BNXT_CTX_INV;
int rc = 0;
u16 type;
- if (!ena)
- return 0;
- else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
- last_type = BNXT_CTX_MAX - 1;
- else
- last_type = BNXT_CTX_L2_MAX - 1;
+ for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (!bnxt_bs_trace_avail(bp, type))
+ continue;
+ if (!ctxm->mem_valid) {
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
+ ctxm->max_entries, 1);
+ if (rc) {
+ netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
+ type);
+ continue;
+ }
+ bnxt_bs_trace_init(bp, ctxm);
+ last_type = type;
+ }
+ }
+
+ if (last_type == BNXT_CTX_INV) {
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNXT_CTX_MAX - 1;
+ else
+ last_type = BNXT_CTX_L2_MAX - 1;
+ }
ctx->ctx_arr[last_type].last = 1;
for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
ctxm = &ctx->ctx_arr[type];
+ if (!ctxm->mem_valid)
+ continue;
rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
if (rc)
return rc;
@@ -8762,21 +8947,63 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
return 0;
}
-void bnxt_free_ctx_mem(struct bnxt *bp)
+/**
+ * __bnxt_copy_ctx_mem - copy host context memory
+ * @bp: The driver context
+ * @ctxm: The pointer to the context memory type
+ * @buf: The destination buffer or NULL to just obtain the length
+ * @offset: The buffer offset to copy the data to
+ * @head: The head offset of context memory to copy from
+ * @tail: The tail offset (last byte + 1) of context memory to end the copy
+ *
+ * This function is called for debugging purposes to dump the host context
+ * used by the chip.
+ *
+ * Return: Length of memory copied
+ */
+static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, void *buf,
+ size_t offset, size_t head, size_t tail)
{
- struct bnxt_ctx_mem_info *ctx = bp->ctx;
- u16 type;
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ size_t len = 0, total_len = 0;
+ int i, n = 1;
- if (!ctx)
- return;
+ if (!ctx_pg)
+ return 0;
- for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
- struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
- struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
- int i, n = 1;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++) {
+ len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
+ tail);
+ offset += len;
+ total_len += len;
+ }
+ return total_len;
+}
- if (!ctx_pg)
- continue;
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset)
+{
+ size_t tail = ctxm->max_entries * ctxm->entry_size;
+
+ return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
+}
+
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ int i, n = 1;
+
+ ctxm->last = 0;
+
+ if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
+ return;
+
+ ctx_pg = ctxm->pg_info;
+ if (ctx_pg) {
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
for (i = 0; i < n; i++)
@@ -8784,11 +9011,27 @@ void bnxt_free_ctx_mem(struct bnxt *bp)
kfree(ctx_pg);
ctxm->pg_info = NULL;
+ ctxm->mem_valid = 0;
}
+ memset(ctxm, 0, sizeof(*ctxm));
+}
+
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++)
+ bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
- kfree(ctx);
- bp->ctx = NULL;
+ if (force) {
+ kfree(ctx);
+ bp->ctx = NULL;
+ }
}
static int bnxt_alloc_ctx_mem(struct bnxt *bp)
@@ -10885,7 +11128,7 @@ static void bnxt_free_irq(struct bnxt *bp)
irq = &bp->irq_tbl[map_idx];
if (irq->requested) {
if (irq->have_cpumask) {
- irq_set_affinity_hint(irq->vector, NULL);
+ irq_update_affinity_hint(irq->vector, NULL);
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
@@ -10940,10 +11183,10 @@ static int bnxt_request_irq(struct bnxt *bp)
irq->have_cpumask = 1;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
- rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
if (rc) {
netdev_warn(bp->dev,
- "Set affinity failed, IRQ = %d\n",
+ "Update affinity hint failed, IRQ = %d\n",
irq->vector);
break;
}
@@ -10988,7 +11231,8 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
+ netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
+ bnapi->index);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
@@ -11748,7 +11992,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_irq_stop(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
@@ -12882,7 +13126,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (features & NETIF_F_GRO_HW)
features &= ~NETIF_F_LRO;
- /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ /* Both CTAG and STAG VLAN acceleration on the RX side have to be
* turned on or off together.
*/
vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
@@ -13461,7 +13705,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
bnxt_hwrm_func_drv_unrgtr(bp);
if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
}
static bool is_bnxt_fw_ok(struct bnxt *bp)
@@ -13495,12 +13739,13 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
unsigned long flags;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -13564,12 +13809,13 @@ void bnxt_fw_reset(struct bnxt *bp)
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int n = 0, tmo;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
unsigned long flags;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -15316,7 +15562,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, true);
bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -15958,7 +16204,7 @@ init_err_pci_clean:
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, true);
bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -16012,7 +16258,7 @@ static int bnxt_suspend(struct device *device)
}
bnxt_hwrm_func_drv_unrgtr(bp);
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
rtnl_unlock();
return rc;
}
@@ -16124,7 +16370,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
rtnl_unlock();
/* Request a slot slot reset. */
@@ -16136,7 +16382,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
@@ -16164,7 +16410,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
/* Upon fatal error, our device internal logic that latches to
* BAR value is getting reset and will restore only upon
- * rewritting the BARs.
+ * rewriting the BARs.
*
* As pci_restore_state() does not re-write the BARs if the
* value is same as saved value earlier, driver needs to
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 69231e85140b..231e38933984 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1105,6 +1105,7 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_agg_ring_struct;
struct xdp_rxq_info xdp_rxq;
struct page_pool *page_pool;
+ struct page_pool *head_pool;
};
struct bnxt_rx_sw_stats {
@@ -1848,6 +1849,8 @@ struct bnxt_vf_rep {
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+#define MAX_CTX_BYTES ((size_t)MAX_CTX_TOTAL_PAGES * BNXT_PAGE_SIZE)
+#define MAX_CTX_BYTES_MASK (MAX_CTX_BYTES - 1)
struct bnxt_ctx_pg_info {
u32 entries;
@@ -1880,6 +1883,13 @@ struct bnxt_ctx_mem_type {
u16 entry_size;
u32 flags;
#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+#define BNXT_CTX_MEM_FW_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE
+#define BNXT_CTX_MEM_FW_BIN_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE
+#define BNXT_CTX_MEM_PERSIST \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET
+
u32 instance_bmap;
u8 init_value;
u8 entry_multiple;
@@ -1888,6 +1898,7 @@ struct bnxt_ctx_mem_type {
u32 max_entries;
u32 min_entries;
u8 last:1;
+ u8 mem_valid:1;
u8 split_entry_cnt;
#define BNXT_MAX_SPLIT_ENTRY 4
union {
@@ -1919,21 +1930,30 @@ struct bnxt_ctx_mem_type {
#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
-#define BNXT_CTX_TKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC
-#define BNXT_CTX_RKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC
+#define BNXT_CTX_TCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNXT_CTX_RCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
-#define BNXT_CTX_QTKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC
-#define BNXT_CTX_QRKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC
#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
+#define BNXT_CTX_SRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNXT_CTX_SRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNXT_CTX_CRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNXT_CTX_CRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNXT_CTX_RIGP0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNXT_CTX_L2HWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNXT_CTX_REHWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+#define BNXT_CTX_CA0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE
+#define BNXT_CTX_CA1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE
+#define BNXT_CTX_CA2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE
+#define BNXT_CTX_RIGP1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE
#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
-#define BNXT_CTX_V2_MAX (BNXT_CTX_XPAR + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_RIGP1 + 1)
#define BNXT_CTX_INV ((u16)-1)
struct bnxt_ctx_mem_info {
@@ -2094,6 +2114,26 @@ enum board_idx {
NETXTREME_E_P7_VF,
};
+#define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc)
+#define BNXT_TRACE_MAX 11
+
+struct bnxt_bs_trace_info {
+ u8 *magic_byte;
+ u32 last_offset;
+ u8 wrapped:1;
+ u16 ctx_type;
+ u16 trace_type;
+};
+
+static inline void bnxt_bs_trace_check_wrap(struct bnxt_bs_trace_info *bs_trace,
+ u32 offset)
+{
+ if (!bs_trace->wrapped &&
+ *bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE)
+ bs_trace->wrapped = 1;
+ bs_trace->last_offset = offset;
+}
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -2622,6 +2662,7 @@ struct bnxt {
u16 dump_flag;
#define BNXT_DUMP_LIVE 0
#define BNXT_DUMP_CRASH 1
+#define BNXT_DUMP_DRIVER 2
struct bpf_prog *xdp_prog;
@@ -2650,6 +2691,7 @@ struct bnxt {
struct bnxt_ctx_pg_info *fw_crash_mem;
u32 fw_crash_len;
+ struct bnxt_bs_trace_info bs_trace[BNXT_TRACE_MAX];
};
#define BNXT_NUM_RX_RING_STATS 8
@@ -2785,12 +2827,14 @@ static inline bool bnxt_sriov_cfg(struct bnxt *bp)
#endif
}
+extern const u16 bnxt_bstore_to_trace[];
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
@@ -2822,7 +2866,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
-void bnxt_free_ctx_mem(struct bnxt *bp);
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset);
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force);
int bnxt_num_tx_to_cp(struct bnxt *bp, int tx);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 4e2b938ed1f7..7236d8e548ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -15,6 +15,50 @@
#include "bnxt_hwrm.h"
#include "bnxt_coredump.h"
+static const u16 bnxt_bstore_to_seg_id[] = {
+ [BNXT_CTX_QP] = BNXT_CTX_MEM_SEG_QP,
+ [BNXT_CTX_SRQ] = BNXT_CTX_MEM_SEG_SRQ,
+ [BNXT_CTX_CQ] = BNXT_CTX_MEM_SEG_CQ,
+ [BNXT_CTX_VNIC] = BNXT_CTX_MEM_SEG_VNIC,
+ [BNXT_CTX_STAT] = BNXT_CTX_MEM_SEG_STAT,
+ [BNXT_CTX_STQM] = BNXT_CTX_MEM_SEG_STQM,
+ [BNXT_CTX_FTQM] = BNXT_CTX_MEM_SEG_FTQM,
+ [BNXT_CTX_MRAV] = BNXT_CTX_MEM_SEG_MRAV,
+ [BNXT_CTX_TIM] = BNXT_CTX_MEM_SEG_TIM,
+ [BNXT_CTX_SRT] = BNXT_CTX_MEM_SEG_SRT,
+ [BNXT_CTX_SRT2] = BNXT_CTX_MEM_SEG_SRT2,
+ [BNXT_CTX_CRT] = BNXT_CTX_MEM_SEG_CRT,
+ [BNXT_CTX_CRT2] = BNXT_CTX_MEM_SEG_CRT2,
+ [BNXT_CTX_RIGP0] = BNXT_CTX_MEM_SEG_RIGP0,
+ [BNXT_CTX_L2HWRM] = BNXT_CTX_MEM_SEG_L2HWRM,
+ [BNXT_CTX_REHWRM] = BNXT_CTX_MEM_SEG_REHWRM,
+ [BNXT_CTX_CA0] = BNXT_CTX_MEM_SEG_CA0,
+ [BNXT_CTX_CA1] = BNXT_CTX_MEM_SEG_CA1,
+ [BNXT_CTX_CA2] = BNXT_CTX_MEM_SEG_CA2,
+ [BNXT_CTX_RIGP1] = BNXT_CTX_MEM_SEG_RIGP1,
+};
+
+static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags,
+ u32 *offset)
+{
+ struct hwrm_dbg_log_buffer_flush_output *resp;
+ struct hwrm_dbg_log_buffer_flush_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_LOG_BUFFER_FLUSH);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(flags);
+ req->type = cpu_to_le16(type);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *offset = le32_to_cpu(resp->current_buffer_offset);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
struct bnxt_hwrm_dbg_dma_info *info)
{
@@ -165,11 +209,12 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
return rc;
}
-static void
+void
bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
struct bnxt_coredump_segment_hdr *seg_hdr,
struct coredump_segment_record *seg_rec, u32 seg_len,
- int status, u32 duration, u32 instance)
+ int status, u32 duration, u32 instance, u32 comp_id,
+ u32 seg_id)
{
memset(seg_hdr, 0, sizeof(*seg_hdr));
memcpy(seg_hdr->signature, "sEgM", 4);
@@ -180,11 +225,8 @@ bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
seg_hdr->high_version = seg_rec->version_hi;
seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
} else {
- /* For hwrm_ver_get response Component id = 2
- * and Segment id = 0
- */
- seg_hdr->component_id = cpu_to_le32(2);
- seg_hdr->segment_id = 0;
+ seg_hdr->component_id = cpu_to_le32(comp_id);
+ seg_hdr->segment_id = cpu_to_le32(seg_id);
}
seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
seg_hdr->length = cpu_to_le32(seg_len);
@@ -269,7 +311,78 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
record->ioctl_high_version = 0;
}
-static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+static void bnxt_fill_drv_seg_record(struct bnxt *bp,
+ struct bnxt_driver_segment_record *record,
+ struct bnxt_ctx_mem_type *ctxm, u16 type)
+{
+ struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[type];
+ u32 offset = 0;
+ int rc = 0;
+
+ rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
+ if (rc)
+ return;
+
+ bnxt_bs_trace_check_wrap(bs_trace, offset);
+ record->max_entries = cpu_to_le32(ctxm->max_entries);
+ record->entry_size = cpu_to_le32(ctxm->entry_size);
+ record->offset = cpu_to_le32(bs_trace->last_offset);
+ record->wrapped = bs_trace->wrapped;
+}
+
+static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
+ u32 *segs)
+{
+ struct bnxt_driver_segment_record record = {};
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u32 comp_id = BNXT_DRV_COMP_ID;
+ void *data = NULL;
+ size_t len = 0;
+ u16 type;
+
+ *segs = 0;
+ if (!ctx)
+ return 0;
+
+ if (buf)
+ buf += offset;
+ for (type = 0 ; type <= BNXT_CTX_RIGP1; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ bool trace = bnxt_bs_trace_avail(bp, type);
+ u32 seg_id = bnxt_bstore_to_seg_id[type];
+ size_t seg_len, extra_hlen = 0;
+
+ if (!ctxm->mem_valid || !seg_id)
+ continue;
+
+ if (trace)
+ extra_hlen = BNXT_SEG_RCD_LEN;
+ if (buf)
+ data = buf + BNXT_SEG_HDR_LEN + extra_hlen;
+ seg_len = bnxt_copy_ctx_mem(bp, ctxm, data, 0) + extra_hlen;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
+ 0, 0, 0, comp_id, seg_id);
+ memcpy(buf, &seg_hdr, BNXT_SEG_HDR_LEN);
+ buf += BNXT_SEG_HDR_LEN;
+ if (trace) {
+ u16 trace_type = bnxt_bstore_to_trace[type];
+
+ bnxt_fill_drv_seg_record(bp, &record, ctxm,
+ trace_type);
+ memcpy(buf, &record, BNXT_SEG_RCD_LEN);
+ }
+ buf += seg_len;
+ }
+ len += BNXT_SEG_HDR_LEN + seg_len;
+ *segs += 1;
+ }
+ return len;
+}
+
+static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
+ u32 *dump_len)
{
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
@@ -287,17 +400,31 @@ static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
start_utc = sys_tz.tz_minuteswest * 60;
seg_hdr_len = sizeof(seg_hdr);
- /* First segment should be hwrm_ver_get response */
+ /* First segment should be hwrm_ver_get response.
+ * For hwrm_ver_get response Component id = 2 and Segment id = 0.
+ */
*dump_len = seg_hdr_len + ver_get_resp_len;
if (buf) {
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
- 0, 0, 0);
+ 0, 0, 0, BNXT_VER_GET_COMP_ID, 0);
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len;
memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
offset += ver_get_resp_len;
}
+ if (dump_type == BNXT_DUMP_DRIVER) {
+ u32 drv_len, segs = 0;
+
+ drv_len = bnxt_get_ctx_coredump(bp, buf, offset, &segs);
+ *dump_len += drv_len;
+ offset += drv_len;
+ if (buf)
+ coredump.total_segs += segs;
+ goto err;
+ }
+
+ seg_record_len = sizeof(*seg_record);
rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
if (rc) {
netdev_err(bp->dev, "Failed to get coredump segment list\n");
@@ -346,7 +473,7 @@ next_seg:
end = jiffies;
duration = jiffies_to_msecs(end - start);
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
- rc, duration, 0);
+ rc, duration, 0, 0, 0);
if (buf) {
/* Write segment header into the buffer */
@@ -442,7 +569,7 @@ int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
else
return -EOPNOTSUPP;
} else {
- return __bnxt_get_coredump(bp, buf, dump_len);
+ return __bnxt_get_coredump(bp, dump_type, buf, dump_len);
}
}
@@ -512,9 +639,12 @@ u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
return bp->fw_crash_len;
}
- if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
- if (dump_type != BNXT_DUMP_CRASH)
- __bnxt_get_coredump(bp, NULL, &len);
+ if (dump_type != BNXT_DUMP_DRIVER) {
+ if (!bnxt_hwrm_get_dump_len(bp, dump_type, &len))
+ return len;
}
+ if (dump_type != BNXT_DUMP_CRASH)
+ __bnxt_get_coredump(bp, dump_type, NULL, &len);
+
return len;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index a76d5c281413..d1cd6387f3ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -68,11 +68,49 @@ struct bnxt_coredump_record {
__le16 rsvd3[313];
};
+struct bnxt_driver_segment_record {
+ __le32 max_entries;
+ __le32 entry_size;
+ __le32 offset;
+ __u8 wrapped:1;
+ __u8 unused[3];
+};
+
+#define BNXT_VER_GET_COMP_ID 2
+#define BNXT_DRV_COMP_ID 0xd
+
+#define BNXT_CTX_MEM_SEG_ID_START 0x200
+
+#define BNXT_CTX_MEM_SEG_QP (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_QP)
+#define BNXT_CTX_MEM_SEG_SRQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_SRQ)
+#define BNXT_CTX_MEM_SEG_CQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_CQ)
+#define BNXT_CTX_MEM_SEG_VNIC (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_VNIC)
+#define BNXT_CTX_MEM_SEG_STAT (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STAT)
+#define BNXT_CTX_MEM_SEG_STQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STQM)
+#define BNXT_CTX_MEM_SEG_FTQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_FTQM)
+#define BNXT_CTX_MEM_SEG_MRAV (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_MRAV)
+#define BNXT_CTX_MEM_SEG_TIM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_TIM)
+
+#define BNXT_CTX_MEM_SEG_SRT 0x1
+#define BNXT_CTX_MEM_SEG_SRT2 0x2
+#define BNXT_CTX_MEM_SEG_CRT 0x3
+#define BNXT_CTX_MEM_SEG_CRT2 0x4
+#define BNXT_CTX_MEM_SEG_RIGP0 0x5
+#define BNXT_CTX_MEM_SEG_L2HWRM 0x6
+#define BNXT_CTX_MEM_SEG_REHWRM 0x7
+#define BNXT_CTX_MEM_SEG_CA0 0x8
+#define BNXT_CTX_MEM_SEG_CA1 0x9
+#define BNXT_CTX_MEM_SEG_CA2 0xa
+#define BNXT_CTX_MEM_SEG_RIGP1 0xb
+
#define BNXT_CRASH_DUMP_LEN (8 << 20)
#define COREDUMP_LIST_BUF_LEN 2048
#define COREDUMP_RETRIEVE_BUF_LEN 4096
+#define BNXT_SEG_HDR_LEN sizeof(struct bnxt_coredump_segment_hdr)
+#define BNXT_SEG_RCD_LEN sizeof(struct bnxt_driver_segment_record)
+
struct bnxt_coredump {
void *data;
int data_size;
@@ -118,6 +156,11 @@ struct hwrm_dbg_cmn_output {
#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
+void bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec,
+ u32 seg_len, int status, u32 duration,
+ u32 instance, u32 comp_id, u32 seg_id);
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 4cb0fabf977e..ef8288fd68f4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -463,7 +463,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
break;
}
bnxt_cancel_reservations(bp, false);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
break;
}
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index f71cc8188b4e..2f4987ec7464 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -705,112 +705,105 @@ skip_ring_stats:
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnxt *bp = netdev_priv(dev);
- static const char * const *str;
u32 i, j, num_str;
+ const char *str;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- if (is_rx_ring(bp, i)) {
- num_str = NUM_RING_RX_HW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_rx_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_rx_ring(bp, i))
+ for (j = 0; j < NUM_RING_RX_HW_STATS; j++) {
+ str = bnxt_ring_rx_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
- }
- if (is_tx_ring(bp, i)) {
- num_str = NUM_RING_TX_HW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_tx_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_tx_ring(bp, i))
+ for (j = 0; j < NUM_RING_TX_HW_STATS; j++) {
+ str = bnxt_ring_tx_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
- }
num_str = bnxt_get_num_tpa_ring_stats(bp);
if (!num_str || !is_rx_ring(bp, i))
goto skip_tpa_stats;
if (bp->max_tpa_v2)
- str = bnxt_ring_tpa2_stats_str;
+ for (j = 0; j < num_str; j++) {
+ str = bnxt_ring_tpa2_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
+ }
else
- str = bnxt_ring_tpa_stats_str;
-
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i, str[j]);
- buf += ETH_GSTRING_LEN;
- }
-skip_tpa_stats:
- if (is_rx_ring(bp, i)) {
- num_str = NUM_RING_RX_SW_STATS;
for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_rx_sw_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_ring_tpa_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
+skip_tpa_stats:
+ if (is_rx_ring(bp, i))
+ for (j = 0; j < NUM_RING_RX_SW_STATS; j++) {
+ str = bnxt_rx_sw_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
+ }
+ for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) {
+ str = bnxt_cmn_sw_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i, str);
}
- num_str = NUM_RING_CMN_SW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_cmn_sw_stats_str[j]);
- buf += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
- strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
}
+ for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++)
+ ethtool_puts(&buf, bnxt_ring_err_stats_arr[i]);
- if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ if (bp->flags & BNXT_FLAG_PORT_STATS)
for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
- strcpy(buf, bnxt_port_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_port_stats_arr[i].string;
+ ethtool_puts(&buf, str);
}
- }
+
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
u32 len;
len = min_t(u32, bp->fw_rx_stats_ext_size,
ARRAY_SIZE(bnxt_port_stats_ext_arr));
for (i = 0; i < len; i++) {
- strcpy(buf, bnxt_port_stats_ext_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_port_stats_ext_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
len = min_t(u32, bp->fw_tx_stats_ext_size,
ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
for (i = 0; i < len; i++) {
- strcpy(buf,
- bnxt_tx_port_stats_ext_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_port_stats_ext_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_rx_bytes_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_rx_bytes_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_rx_pkts_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_rx_pkts_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_tx_bytes_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_bytes_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_tx_pkts_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_pkts_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
}
}
break;
case ETH_SS_TEST:
if (bp->num_tests)
- memcpy(buf, bp->test_info->string,
- bp->num_tests * ETH_GSTRING_LEN);
+ for (i = 0; i < bp->num_tests; i++)
+ ethtool_puts(&buf, bp->test_info->string[i]);
break;
default:
netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
@@ -1131,14 +1124,15 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fkeys = &fltr->fkeys;
fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
- if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
- fkeys->basic.ip_proto == IPPROTO_RAW) {
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
fs->flow_type = IP_USER_FLOW;
fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
- if (fkeys->basic.ip_proto == IPPROTO_ICMP)
- fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
- else
- fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
+ fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip4_spec.proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
} else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V4_FLOW;
@@ -1160,13 +1154,13 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
}
} else {
- if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
- fkeys->basic.ip_proto == IPPROTO_RAW) {
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
fs->flow_type = IPV6_USER_FLOW;
- if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
- fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
- else
- fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
+ fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip6_spec.l4_proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) {
+ fs->flow_type = IPV6_USER_FLOW;
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
} else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V6_FLOW;
@@ -1289,10 +1283,12 @@ static int bnxt_add_l2_cls_rule(struct bnxt *bp,
static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
struct ethtool_usrip4_spec *ip_mask)
{
+ u8 mproto = ip_mask->proto;
+ u8 sproto = ip_spec->proto;
+
if (ip_mask->l4_4_bytes || ip_mask->tos ||
ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
- ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
- (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP)))
return false;
return true;
}
@@ -1300,10 +1296,11 @@ static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
struct ethtool_usrip6_spec *ip_mask)
{
+ u8 mproto = ip_mask->l4_proto;
+ u8 sproto = ip_spec->l4_proto;
+
if (ip_mask->l4_4_bytes || ip_mask->tclass ||
- ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
- (ip_spec->l4_proto != IPPROTO_RAW &&
- ip_spec->l4_proto != IPPROTO_ICMPV6))
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6)))
return false;
return true;
}
@@ -1357,7 +1354,8 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
- fkeys->basic.ip_proto = ip_spec->proto;
+ fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto
+ : BNXT_IP_PROTO_WILDCARD;
fkeys->basic.n_proto = htons(ETH_P_IP);
fkeys->addrs.v4addrs.src = ip_spec->ip4src;
fmasks->addrs.v4addrs.src = ip_mask->ip4src;
@@ -1388,7 +1386,8 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
- fkeys->basic.ip_proto = ip_spec->l4_proto;
+ fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto
+ : BNXT_IP_PROTO_WILDCARD;
fkeys->basic.n_proto = htons(ETH_P_IPV6);
fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
@@ -4985,8 +4984,8 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
- if (dump->flag > BNXT_DUMP_CRASH) {
- netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
+ if (dump->flag > BNXT_DUMP_DRIVER) {
+ netdev_info(dev, "Supports only Live(0), Crash(1), Driver(2) dumps.\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index e2ee030237d4..33b86ede1ce5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -44,6 +44,7 @@ struct bnxt_led_cfg {
#define BNXT_PXP_REG_LEN 0x3110
#define BNXT_IP_PROTO_FULL_MASK 0xFF
+#define BNXT_IP_PROTO_WILDCARD 0x0
extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f8ef6f1a1964..5f8de1634378 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -42,6 +42,10 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL
#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
@@ -509,6 +513,7 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
#define HWRM_SV 0x400UL
#define HWRM_DBG_SERDES_TEST 0xff0eUL
#define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
@@ -624,8 +629,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 68
-#define HWRM_VERSION_STR "1.10.3.68"
+#define HWRM_VERSION_RSVD 85
+#define HWRM_VERSION_STR "1.10.3.85"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1302,6 +1307,43 @@ struct hwrm_async_event_cmpl_error_report {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
};
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+struct hwrm_async_event_cmpl_dbg_buf_producer {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE
+};
+
/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
@@ -1864,7 +1906,10 @@ struct hwrm_func_qcaps_output {
__le32 roce_vf_max_gid;
__le32 flags_ext3;
#define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
- u8 unused_3[7];
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ __le16 max_roce_vfs;
+ u8 unused_3[5];
u8 valid;
};
@@ -2253,17 +2298,18 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
#define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
__le32 enables2;
- #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
- #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
- #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
u8 port_kdnet_mode;
#define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
@@ -2281,7 +2327,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
#define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
#define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
- u8 unused_1[2];
+ __le16 physical_slot_number;
__le32 num_ktls_tx_key_ctxs;
__le32 num_ktls_rx_key_ctxs;
__le32 num_quic_tx_key_ctxs;
@@ -3683,7 +3729,7 @@ struct hwrm_func_ptp_ext_qcfg_output {
u8 valid;
};
-/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */
+/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */
struct hwrm_func_backing_store_cfg_v2_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3721,6 +3767,7 @@ struct hwrm_func_backing_store_cfg_v2_input {
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3752,6 +3799,9 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le32 split_entry_1;
__le32 split_entry_2;
__le32 split_entry_3;
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL
+ __le32 next_bs_offset;
};
/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
@@ -3802,6 +3852,7 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3963,6 +4014,7 @@ struct hwrm_func_backing_store_qcaps_v2_input {
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
@@ -4005,6 +4057,7 @@ struct hwrm_func_backing_store_qcaps_v2_output {
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
@@ -4014,6 +4067,8 @@ struct hwrm_func_backing_store_qcaps_v2_output {
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL
__le32 instance_bit_map;
u8 ctx_init_value;
u8 ctx_init_offset;
@@ -4034,7 +4089,8 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le32 split_entry_1;
__le32 split_entry_2;
__le32 split_entry_3;
- u8 rsvd3[3];
+ __le16 max_instance_count;
+ u8 rsvd3;
u8 valid;
};
@@ -4535,11 +4591,12 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE
u8 xcvr_pkg_type;
#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
@@ -4654,7 +4711,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
u8 link_down_reason;
- #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
__le16 support_speeds2;
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
@@ -9241,20 +9299,22 @@ struct hwrm_fw_set_time_output {
/* hwrm_struct_hdr (size:128b/16B) */
struct hwrm_struct_hdr {
__le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
- #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND
__le16 len;
u8 version;
u8 count;
@@ -9756,6 +9816,7 @@ struct hwrm_dbg_qcaps_output {
#define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
#define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
#define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
+ #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL
u8 unused_1[3];
u8 valid;
};
@@ -9996,6 +10057,43 @@ struct hwrm_dbg_ring_info_get_output {
u8 valid;
};
+/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */
+struct hwrm_dbg_log_buffer_flush_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE 0x0UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE 0x1UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE 0x2UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE 0x3UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE 0x4UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE 0x5UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE 0x7UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE 0x8UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE 0x9UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE 0xaUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE
+ u8 unused_1[2];
+ __le32 flags;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL
+};
+
+/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */
+struct hwrm_dbg_log_buffer_flush_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 current_buffer_offset;
+ u8 unused_1[3];
+ u8 valid;
+};
+
/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
__le16 req_type;
@@ -10080,6 +10178,7 @@ struct hwrm_nvm_write_input {
#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
#define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
#define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
+ #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL
__le32 dir_item_length;
__le32 offset;
__le32 len;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index fa514be87650..075ccd589845 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -67,15 +67,15 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_cfg_settime(ptp->bp, ns);
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ns);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
/* Caller holds ptp_lock */
-static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
- u64 *ns)
+static int __bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 high_before, high_now, low;
@@ -98,17 +98,50 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
return 0;
}
-static void bnxt_ptp_get_current_time(struct bnxt *bp)
+static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
unsigned long flags;
+ int rc;
+
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+ rc = __bnxt_refclk_read(bp, sts, ns);
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return rc;
+}
+
+static int bnxt_refclk_read_low(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u32 *low)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return -EIO;
+ }
+
+ ptp_read_system_prets(sts);
+ *low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return 0;
+}
+
+static void bnxt_ptp_get_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
return;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
}
static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
@@ -151,36 +184,32 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
- unsigned long flags;
u64 ns, cycles;
+ u32 low;
int rc;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- rc = bnxt_refclk_read(ptp->bp, sts, &cycles);
- if (rc) {
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ rc = bnxt_refclk_read_low(ptp->bp, sts, &low);
+ if (rc)
return rc;
- }
- ns = timecounter_cyc2time(&ptp->tc, cycles);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ cycles = bnxt_extend_cycles_32b_to_48b(ptp, low);
+ ns = bnxt_timecounter_cyc2time(ptp, cycles);
*ts = ns_to_timespec64(ns);
return 0;
}
-/* Caller holds ptp_lock */
void bnxt_ptp_update_current_time(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
}
static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
{
struct hwrm_port_mac_cfg_input *req;
- unsigned long flags;
int rc;
rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG);
@@ -194,9 +223,7 @@ static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
if (rc) {
netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
} else {
- spin_lock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_update_current_time(ptp->bp);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
}
return rc;
@@ -211,9 +238,9 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_adjphc(ptp, delta);
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_adjtime(&ptp->tc, delta);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -246,10 +273,10 @@ static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
if (!BNXT_MH(bp))
return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -257,13 +284,10 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct ptp_clock_event event;
- unsigned long flags;
u64 ns, pps_ts;
pps_ts = EVENT_PPS_TS(data2, data1);
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- ns = timecounter_cyc2time(&ptp->tc, pps_ts);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ ns = bnxt_timecounter_cyc2time(ptp, pps_ts);
switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) {
case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL:
@@ -400,17 +424,13 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
{
u64 cycles_now;
u64 nsec_now, nsec_delta;
- unsigned long flags;
int rc;
- spin_lock_irqsave(&ptp->ptp_lock, flags);
rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
- if (rc) {
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ if (rc)
return rc;
- }
- nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ nsec_now = bnxt_timecounter_cyc2time(ptp, cycles_now);
nsec_delta = target_ns - nsec_now;
*cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
@@ -687,7 +707,7 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc)
struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
u64 ns = 0;
- bnxt_refclk_read(ptp->bp, NULL, &ns);
+ __bnxt_refclk_read(ptp->bp, NULL, &ns);
return ns;
}
@@ -697,7 +717,6 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
struct skb_shared_hwtstamps timestamp;
struct bnxt_ptp_tx_req *txts_req;
unsigned long now = jiffies;
- unsigned long flags;
u64 ts = 0, ns = 0;
u32 tmo = 0;
int rc;
@@ -711,9 +730,7 @@ static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
tmo, slot);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(txts_req->tx_skb, &timestamp);
ptp->stats.ts_pkts++;
@@ -767,9 +784,9 @@ next_slot:
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
if (time_after_eq(now, ptp->next_overflow_check)) {
- spin_lock_irqsave(&ptp->ptp_lock, flags);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
if (rc == -EAGAIN)
@@ -808,15 +825,11 @@ void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod)
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- u64 time;
if (!ptp)
return -ENODEV;
- BNXT_READ_TIME64(ptp, time, ptp->old_time);
- *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts;
- if (pkt_ts < (time & BNXT_LO_TIMER_MASK))
- *ts += BNXT_LO_TIMER_MASK + 1;
+ *ts = bnxt_extend_cycles_32b_to_48b(ptp, pkt_ts);
return 0;
}
@@ -829,7 +842,6 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
u32 opaque = tscmp->tx_ts_cmp_opaque;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
- unsigned long flags;
u64 ts, ns;
u16 cons;
@@ -844,9 +856,7 @@ void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
le32_to_cpu(tscmp->tx_ts_cmp_flags_type),
le32_to_cpu(tscmp->tx_ts_cmp_errors_v));
} else {
- spin_lock_irqsave(&ptp->ptp_lock, flags);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(tx_buf->skb, &timestamp);
}
@@ -955,6 +965,7 @@ static bool bnxt_pps_config_ok(struct bnxt *bp)
static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
if (!ptp->ptp_clock) {
memset(&ptp->cc, 0, sizeof(ptp->cc));
@@ -971,8 +982,11 @@ static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
}
ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
}
- if (init_tc)
+ if (init_tc) {
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
+ }
}
/* Caller holds ptp_lock */
@@ -1005,9 +1019,9 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
if (rc)
return rc;
}
- spin_lock_irqsave(&bp->ptp_cfg->ptp_lock, flags);
+ write_seqlock_irqsave(&bp->ptp_cfg->ptp_lock, flags);
bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
- spin_unlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags);
+ write_sequnlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags);
return 0;
}
@@ -1042,7 +1056,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
bnxt_ptp_free(bp);
WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS);
- spin_lock_init(&ptp->ptp_lock);
+ seqlock_init(&ptp->ptp_lock);
spin_lock_init(&ptp->ptp_tx_lock);
if (BNXT_PTP_USE_RTC(bp)) {
@@ -1075,12 +1089,8 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
atomic64_set(&ptp->stats.ts_err, 0);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- unsigned long flags;
-
- spin_lock_irqsave(&ptp->ptp_lock, flags);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
- spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
ptp_schedule_worker(ptp->ptp_clock, 0);
}
ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index f322466ecad3..c7851f8c971c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -21,6 +21,7 @@
#define BNXT_DEVCLK_FREQ 1000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
+#define BNXT_HI_TIMER_SHIFT 24
#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */
#define BNXT_PTP_QTS_TIMEOUT 1000
@@ -102,14 +103,15 @@ struct bnxt_ptp_cfg {
struct timecounter tc;
struct bnxt_pps pps_info;
/* serialize timecounter access */
- spinlock_t ptp_lock;
+ seqlock_t ptp_lock;
/* serialize ts tx request queuing */
spinlock_t ptp_tx_lock;
u64 current_time;
- u64 old_time;
unsigned long next_period;
unsigned long next_overflow_check;
u32 cmult;
+ /* cache of upper 24 bits of cyclecoutner. 8 bits are used to check for roll-over */
+ u32 old_time;
/* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
#define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
@@ -145,20 +147,6 @@ struct bnxt_ptp_cfg {
struct bnxt_ptp_stats stats;
};
-#if BITS_PER_LONG == 32
-#define BNXT_READ_TIME64(ptp, dst, src) \
-do { \
- unsigned long flags; \
- \
- spin_lock_irqsave(&(ptp)->ptp_lock, flags); \
- (dst) = (src); \
- spin_unlock_irqrestore(&(ptp)->ptp_lock, flags); \
-} while (0)
-#else
-#define BNXT_READ_TIME64(ptp, dst, src) \
- ((dst) = READ_ONCE(src))
-#endif
-
#define BNXT_PTP_INC_TX_AVAIL(ptp) \
do { \
spin_lock_bh(&(ptp)->ptp_tx_lock); \
@@ -182,4 +170,27 @@ void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
void bnxt_ptp_clear(struct bnxt *bp);
+static inline u64 bnxt_timecounter_cyc2time(struct bnxt_ptp_cfg *ptp, u64 ts)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ } while (read_seqretry(&ptp->ptp_lock, seq));
+
+ return ns;
+}
+
+static inline u64 bnxt_extend_cycles_32b_to_48b(struct bnxt_ptp_cfg *ptp, u32 ts)
+{
+ u64 time, cycles;
+
+ time = (u64)READ_ONCE(ptp->old_time) << BNXT_HI_TIMER_SHIFT;
+ cycles = (time & BNXT_HI_TIMER_MASK) | ts;
+ if (ts < (time & BNXT_LO_TIMER_MASK))
+ cycles += BNXT_LO_TIMER_MASK + 1;
+ return cycles;
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f7be886570d8..3e93f957430b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1144,14 +1144,14 @@ static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcmgenet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcmgenet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -2405,7 +2405,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
if (ring->dim.use_dim) {
dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
ring->dim.bytes, &dim_sample);
- net_dim(&ring->dim.dim, dim_sample);
+ net_dim(&ring->dim.dim, &dim_sample);
}
return work_done;
@@ -4350,7 +4350,7 @@ MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
- .remove_new = bcmgenet_remove,
+ .remove = bcmgenet_remove,
.shutdown = bcmgenet_shutdown,
.driver = {
.name = "bcmgenet",
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index fcf8485f3446..30865fe03eeb 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2608,7 +2608,7 @@ static void sbmac_remove(struct platform_device *pldev)
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
- .remove_new = sbmac_remove,
+ .remove = sbmac_remove,
.driver = {
.name = sbmac_string,
},
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 378815917741..01dfec115942 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3737,7 +3737,7 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
}
do {
- u32 *fw_data = (u32 *)(fw_hdr + 1);
+ __be32 *fw_data = (__be32 *)(fw_hdr + 1);
for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
write_op(tp, cpu_scratch_base +
(be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
@@ -7395,27 +7395,60 @@ tx_recovery:
static void tg3_napi_disable(struct tg3 *tp)
{
+ int txq_idx = tp->txq_cnt - 1;
+ int rxq_idx = tp->rxq_cnt - 1;
+ struct tg3_napi *tnapi;
int i;
- for (i = tp->irq_cnt - 1; i >= 0; i--)
- napi_disable(&tp->napi[i].napi);
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ tnapi = &tp->napi[i];
+ if (tnapi->tx_buffers) {
+ netif_queue_set_napi(tp->dev, txq_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ txq_idx--;
+ }
+ if (tnapi->rx_rcb) {
+ netif_queue_set_napi(tp->dev, rxq_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ rxq_idx--;
+ }
+ napi_disable(&tnapi->napi);
+ }
}
static void tg3_napi_enable(struct tg3 *tp)
{
+ int txq_idx = 0, rxq_idx = 0;
+ struct tg3_napi *tnapi;
int i;
- for (i = 0; i < tp->irq_cnt; i++)
- napi_enable(&tp->napi[i].napi);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ tnapi = &tp->napi[i];
+ napi_enable(&tnapi->napi);
+ if (tnapi->tx_buffers) {
+ netif_queue_set_napi(tp->dev, txq_idx,
+ NETDEV_QUEUE_TYPE_TX,
+ &tnapi->napi);
+ txq_idx++;
+ }
+ if (tnapi->rx_rcb) {
+ netif_queue_set_napi(tp->dev, rxq_idx,
+ NETDEV_QUEUE_TYPE_RX,
+ &tnapi->napi);
+ rxq_idx++;
+ }
+ }
}
static void tg3_napi_init(struct tg3 *tp)
{
int i;
- netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
- for (i = 1; i < tp->irq_cnt; i++)
- netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ netif_napi_add(tp->dev, &tp->napi[i].napi,
+ i ? tg3_poll_msix : tg3_poll);
+ netif_napi_set_irq(&tp->napi[i].napi, tp->napi[i].irq_vec);
+ }
}
static void tg3_napi_fini(struct tg3 *tp)
@@ -11309,18 +11342,17 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
else {
name = &tnapi->irq_lbl[0];
if (tnapi->tx_buffers && tnapi->rx_rcb)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-txrx-%d", tp->dev->name, irq_num);
else if (tnapi->tx_buffers)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-tx-%d", tp->dev->name, irq_num);
else if (tnapi->rx_rcb)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-rx-%d", tp->dev->name, irq_num);
else
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-%d", tp->dev->name, irq_num);
- name[IFNAMSIZ-1] = 0;
}
if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
@@ -13093,12 +13125,16 @@ static int tg3_test_nvram(struct tg3 *tp)
/* Bootstrap checksum at offset 0x10 */
csum = calc_crc((unsigned char *) buf, 0x10);
- if (csum != le32_to_cpu(buf[0x10/4]))
+
+ /* The type of buf is __be32 *, but this value is __le32 */
+ if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
goto out;
/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
- csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
- if (csum != le32_to_cpu(buf[0xfc/4]))
+ csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
+
+ /* The type of buf is __be32 *, but this value is __le32 */
+ if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
goto out;
kfree(buf);
@@ -17065,12 +17101,14 @@ static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
addr_ok = is_valid_ether_addr(addr);
}
if (!addr_ok) {
+ __be32 be_hi, be_lo;
+
/* Next, try NVRAM. */
if (!tg3_flag(tp, NO_NVRAM) &&
- !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
- !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
- memcpy(&addr[0], ((char *)&hi) + 2, 2);
- memcpy(&addr[2], (char *)&lo, sizeof(lo));
+ !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
+ !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
+ memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
+ memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
}
/* Finally just fetch it out of the MAC control regs. */
else {
@@ -18237,7 +18275,7 @@ done:
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index cf1b2b123c7e..b473f8014d9c 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3033,7 +3033,7 @@ struct tg3_napi {
dma_addr_t rx_rcb_mapping;
dma_addr_t tx_desc_mapping;
- char irq_lbl[IFNAMSIZ];
+ char irq_lbl[IFNAMSIZ + 6 + 10]; /* name + "-txrx-" + %d */
unsigned int irq_vec;
};
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 10b1e534030e..4396997c59d0 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -351,7 +351,6 @@ struct bnad {
/* debugfs specific data */
char *regdata;
u32 reglen;
- struct dentry *bnad_dentry_files[5];
struct dentry *port_debugfs_root;
};
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 97291bfbeea5..8f0972e6737c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -500,11 +500,6 @@ bnad_debugfs_init(struct bnad *bnad)
if (!bna_debugfs_root) {
bna_debugfs_root = debugfs_create_dir("bna", NULL);
atomic_set(&bna_debugfs_port_count, 0);
- if (!bna_debugfs_root) {
- netdev_warn(bnad->netdev,
- "debugfs root dir creation failed\n");
- return;
- }
}
/* Setup the pci_dev debugfs directory for the port */
@@ -517,18 +512,11 @@ bnad_debugfs_init(struct bnad *bnad)
for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
file = &bnad_debugfs_files[i];
- bnad->bnad_dentry_files[i] =
- debugfs_create_file(file->name,
- file->mode,
- bnad->port_debugfs_root,
- bnad,
- file->fops);
- if (!bnad->bnad_dentry_files[i]) {
- netdev_warn(bnad->netdev,
- "create %s entry failed\n",
- file->name);
- return;
- }
+ debugfs_create_file(file->name,
+ file->mode,
+ bnad->port_debugfs_root,
+ bnad,
+ file->fops);
}
}
}
@@ -537,15 +525,6 @@ bnad_debugfs_init(struct bnad *bnad)
void
bnad_debugfs_uninit(struct bnad *bnad)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
- if (bnad->bnad_dentry_files[i]) {
- debugfs_remove(bnad->bnad_dentry_files[i]);
- bnad->bnad_dentry_files[i] = NULL;
- }
- }
-
/* Remove the pci_dev debugfs directory for the port */
if (bnad->port_debugfs_root) {
debugfs_remove(bnad->port_debugfs_root);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 56901280ba04..daa416fb1724 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -915,20 +915,15 @@ static int macb_mii_probe(struct net_device *dev)
return 0;
}
-static int macb_mdiobus_register(struct macb *bp)
+static int macb_mdiobus_register(struct macb *bp, struct device_node *mdio_np)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
/* If we have a child named mdio, probe it instead of looking for PHYs
* directly under the MAC node
*/
- child = of_get_child_by_name(np, "mdio");
- if (child) {
- int ret = of_mdiobus_register(bp->mii_bus, child);
-
- of_node_put(child);
- return ret;
- }
+ if (mdio_np)
+ return of_mdiobus_register(bp->mii_bus, mdio_np);
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
@@ -950,17 +945,15 @@ static int macb_mdiobus_register(struct macb *bp)
static int macb_mii_init(struct macb *bp)
{
- struct device_node *child, *np = bp->pdev->dev.of_node;
+ struct device_node *mdio_np, *np = bp->pdev->dev.of_node;
int err = -ENXIO;
/* With fixed-link, we don't need to register the MDIO bus,
* except if we have a child named "mdio" in the device tree.
* In that case, some devices may be attached to the MACB's MDIO bus.
*/
- child = of_get_child_by_name(np, "mdio");
- if (child)
- of_node_put(child);
- else if (of_phy_is_fixed_link(np))
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (!mdio_np && of_phy_is_fixed_link(np))
return macb_mii_probe(bp->dev);
/* Enable management port */
@@ -984,7 +977,7 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
- err = macb_mdiobus_register(bp);
+ err = macb_mdiobus_register(bp, mdio_np);
if (err)
goto err_out_free_mdiobus;
@@ -999,6 +992,8 @@ err_out_unregister_bus:
err_out_free_mdiobus:
mdiobus_free(bp->mii_bus);
err_out:
+ of_node_put(mdio_np);
+
return err;
}
@@ -4849,10 +4844,11 @@ static const struct macb_config pc302gem_config = {
};
static const struct macb_config sama5d2_config = {
- .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -5490,7 +5486,7 @@ static const struct dev_pm_ops macb_pm_ops = {
static struct platform_driver macb_driver = {
.probe = macb_probe,
- .remove_new = macb_remove,
+ .remove = macb_remove,
.driver = {
.name = "macb",
.of_match_table = of_match_ptr(macb_dt_ids),
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index a71b320fd030..331ac6a3dc38 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1919,7 +1919,7 @@ static struct platform_driver xgmac_driver = {
.pm = &xgmac_pm_ops,
},
.probe = xgmac_probe,
- .remove_new = xgmac_remove,
+ .remove = xgmac_remove,
};
module_platform_driver(xgmac_driver);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index b3c81a2e9d46..9ad49aea2673 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -36,175 +36,6 @@
*/
#define CN23XX_INPUT_JABBER 64600
-void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
-{
- int i = 0;
- u32 regval = 0;
- struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
-
- /*In cn23xx_soft_reset*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
- "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
- lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
-
- /*In cn23xx_set_dpi_regs*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
- lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
-
- for (i = 0; i < 6; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_DPI_DMA_ENG_ENB", i,
- CN23XX_DPI_DMA_ENG_ENB(i),
- lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_DPI_DMA_ENG_BUF", i,
- CN23XX_DPI_DMA_ENG_BUF(i),
- lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
- }
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
- CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
-
- /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_CONFIG_PCIE_DEVCTL",
- CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
-
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
- CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
- lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
-
- /*In cn23xx_specific_regs_setup */
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
- CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
- (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
-
- /*In cn23xx_setup_global_mac_regs*/
- for (i = 0; i < CN23XX_MAX_MACS; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_PKT_MAC_RINFO64", i,
- CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_PKT_MAC_RINFO64
- (i, oct->pf_num))));
- }
-
- /*In cn23xx_setup_global_input_regs*/
- for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_PKT_CONTROL64", i,
- CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
- }
-
- /*In cn23xx_setup_global_output_regs*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
-
- for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKT_CONTROL", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
- CVM_CAST64(octeon_read_csr(
- oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
- }
-
- /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "cn23xx->intr_enb_reg64",
- CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
- CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "cn23xx->intr_sum_reg64",
- CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
- CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
-
- /*In cn23xx_setup_iq_regs*/
- for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_BASE_ADDR64", i,
- CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_SIZE", i,
- CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
- CVM_CAST64(octeon_read_csr
- (oct, CN23XX_SLI_IQ_SIZE(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_DOORBELL", i,
- CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_IQ_DOORBELL(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_INSTR_COUNT64", i,
- CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
- }
-
- /*In cn23xx_setup_oq_regs*/
- for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_BASE_ADDR64", i,
- CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_SIZE", i,
- CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
- CVM_CAST64(octeon_read_csr
- (oct, CN23XX_SLI_OQ_SIZE(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
- CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
- CVM_CAST64(octeon_read_csr(
- oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKTS_SENT", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKTS_CREDIT", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
- }
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_PKT_TIME_INT",
- CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_PKT_CNT_INT",
- CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
-}
-
static int cn23xx_pf_soft_reset(struct octeon_device *oct)
{
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index e6f31d0d5c0b..234b96b4f488 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -59,8 +59,6 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct,
u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
-void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
-
int cn23xx_sriov_config(struct octeon_device *oct);
int cn23xx_fw_loaded(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 744f2434f7fa..393b9951490a 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1545,7 +1545,7 @@ static struct platform_driver octeon_mgmt_driver = {
.of_match_table = octeon_mgmt_match,
},
.probe = octeon_mgmt_probe,
- .remove_new = octeon_mgmt_remove,
+ .remove = octeon_mgmt_remove,
};
module_platform_driver(octeon_mgmt_driver);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 89256b866840..5a9f6925e1fa 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -515,23 +515,6 @@ void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
EXPORT_SYMBOL(cxgb3_free_atid);
-/*
- * Free a server TID and return it to the free pool.
- */
-void cxgb3_free_stid(struct t3cdev *tdev, int stid)
-{
- struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
- union listen_entry *p = stid2entry(t, stid);
-
- spin_lock_bh(&t->stid_lock);
- p->next = t->sfree;
- t->sfree = p;
- t->stids_in_use--;
- spin_unlock_bh(&t->stid_lock);
-}
-
-EXPORT_SYMBOL(cxgb3_free_stid);
-
void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
void *ctx, unsigned int tid)
{
@@ -671,28 +654,6 @@ int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
EXPORT_SYMBOL(cxgb3_alloc_atid);
-int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
- void *ctx)
-{
- int stid = -1;
- struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
-
- spin_lock_bh(&t->stid_lock);
- if (t->sfree) {
- union listen_entry *p = t->sfree;
-
- stid = (p - t->stid_tab) + t->stid_base;
- t->sfree = p->next;
- p->t3c_tid.ctx = ctx;
- p->t3c_tid.client = client;
- t->stids_in_use++;
- }
- spin_unlock_bh(&t->stid_lock);
- return stid;
-}
-
-EXPORT_SYMBOL(cxgb3_alloc_stid);
-
/* Get the t3cdev associated with a net_device */
struct t3cdev *dev2t3cdev(struct net_device *dev)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
index 929c298115ca..7419824f9926 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
@@ -95,10 +95,7 @@ struct cxgb3_client {
*/
int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx);
-int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
- void *ctx);
void *cxgb3_free_atid(struct t3cdev *dev, int atid);
-void cxgb3_free_stid(struct t3cdev *dev, int stid);
void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx, unsigned int tid);
void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 163efab27e9b..5060d3998889 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -120,7 +120,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
write_unlock_bh(&ctbl->lock);
dev_err(adap->pdev_dev,
"CLIP FW cmd failed with error %d, "
- "Connections using %pI6c wont be "
+ "Connections using %pI6c won't be "
"offloaded",
ret, ce->addr6.sin6_addr.s6_addr);
return ret;
@@ -133,7 +133,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
} else {
write_unlock_bh(&ctbl->lock);
dev_info(adap->pdev_dev, "CLIP table overflow, "
- "Connections using %pI6c wont be offloaded",
+ "Connections using %pI6c won't be offloaded",
(void *)lip);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index bbf7641a0fc7..75bd69ff61a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1608,7 +1608,6 @@ void t4_os_portmod_changed(struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void t4_free_sge_resources(struct adapter *adap);
-void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
irq_handler_t t4_intr_handler(struct adapter *adap);
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
int cxgb4_selftest_lb_pkt(struct net_device *netdev);
@@ -2141,28 +2140,6 @@ int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
unsigned int naddr, const u8 **addr, bool sleep_ok);
int cxgb4_init_mps_ref_entries(struct adapter *adap);
void cxgb4_free_mps_ref_entries(struct adapter *adap);
-int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
- const u8 *addr, const u8 *mask,
- unsigned int vni, unsigned int vni_mask,
- u8 dip_hit, u8 lookup_type, bool sleep_ok);
-int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
- int idx, bool sleep_ok);
-int cxgb4_free_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok);
-int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok);
int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
int *tcam_idx, const u8 *addr,
bool persistent, u8 *smt_idx);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2418645c8823..97a261d5357e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2188,18 +2188,6 @@ void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
}
EXPORT_SYMBOL(cxgb4_get_tcp_stats);
-void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
- const unsigned int *pgsz_order)
-{
- struct adapter *adap = netdev2adap(dev);
-
- t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
- t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
- HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
- HPZ3_V(pgsz_order[3]));
-}
-EXPORT_SYMBOL(cxgb4_iscsi_init);
-
int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
index a020e8490681..60f4d5b5eb3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
@@ -28,28 +28,6 @@ static int cxgb4_mps_ref_dec_by_mac(struct adapter *adap,
return ret;
}
-static int cxgb4_mps_ref_dec(struct adapter *adap, u16 idx)
-{
- struct mps_entries_ref *mps_entry, *tmp;
- int ret = -EINVAL;
-
- spin_lock(&adap->mps_ref_lock);
- list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
- if (mps_entry->idx == idx) {
- if (!refcount_dec_and_test(&mps_entry->refcnt)) {
- spin_unlock(&adap->mps_ref_lock);
- return -EBUSY;
- }
- list_del(&mps_entry->list);
- kfree(mps_entry);
- ret = 0;
- break;
- }
- }
- spin_unlock(&adap->mps_ref_lock);
- return ret;
-}
-
static int cxgb4_mps_ref_inc(struct adapter *adap, const u8 *mac_addr,
u16 idx, const u8 *mask)
{
@@ -141,82 +119,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
return ret;
}
-int cxgb4_free_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok)
-{
- int ret = 0;
-
- if (!cxgb4_mps_ref_dec(adap, idx))
- ret = t4_free_raw_mac_filt(adap, viid, addr,
- mask, idx, lookup_type,
- port_id, sleep_ok);
-
- return ret;
-}
-
-int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok)
-{
- int ret;
-
- ret = t4_alloc_raw_mac_filt(adap, viid, addr,
- mask, idx, lookup_type,
- port_id, sleep_ok);
- if (ret < 0)
- return ret;
-
- if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
- ret = -ENOMEM;
- t4_free_raw_mac_filt(adap, viid, addr,
- mask, idx, lookup_type,
- port_id, sleep_ok);
- }
-
- return ret;
-}
-
-int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
- int idx, bool sleep_ok)
-{
- int ret = 0;
-
- if (!cxgb4_mps_ref_dec(adap, idx))
- ret = t4_free_encap_mac_filt(adap, viid, idx, sleep_ok);
-
- return ret;
-}
-
-int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
- const u8 *addr, const u8 *mask,
- unsigned int vni, unsigned int vni_mask,
- u8 dip_hit, u8 lookup_type, bool sleep_ok)
-{
- int ret;
-
- ret = t4_alloc_encap_mac_filt(adap, viid, addr, mask, vni, vni_mask,
- dip_hit, lookup_type, sleep_ok);
- if (ret < 0)
- return ret;
-
- if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
- ret = -ENOMEM;
- t4_free_encap_mac_filt(adap, viid, ret, sleep_ok);
- }
- return ret;
-}
-
int cxgb4_init_mps_ref_entries(struct adapter *adap)
{
spin_lock_init(&adap->mps_ref_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index d8cafaa7ddb4..d7713038386c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -518,8 +518,6 @@ unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
unsigned int *mtu_idxp);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
-void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
- const unsigned int *pgsz_order);
struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1e5f5b1a22a6..c02b4e9c06b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -608,25 +608,6 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
return e;
}
-/**
- * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
- * @dev: net_device pointer
- * @vlan: VLAN Id
- * @port: Associated port
- * @dmac: Destination MAC address to add to L2T
- * Returns pointer to the allocated l2t entry
- *
- * Allocates an L2T entry for use by switching rule of a filter
- */
-struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
- u8 port, u8 *dmac)
-{
- struct adapter *adap = netdev2adap(dev);
-
- return t4_l2t_alloc_switching(adap, vlan, port, dmac);
-}
-EXPORT_SYMBOL(cxgb4_l2t_alloc_switching);
-
struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
{
unsigned int l2t_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 340fecb28a13..8aad7e9dee6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -115,8 +115,6 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
u64 cxgb4_select_ntuple(struct net_device *dev,
const struct l2t_entry *l2t);
-struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
- u8 port, u8 *dmac);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
u8 port, u8 *dmac);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index de52bcb884c4..a7d76a8ed050 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -4874,22 +4874,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
}
}
-/**
- * t4_free_ofld_rxqs - free a block of consecutive Rx queues
- * @adap: the adapter
- * @n: number of queues
- * @q: pointer to first queue
- *
- * Release the resources of a consecutive block of offload Rx queues.
- */
-void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
-{
- for ( ; n; n--, q++)
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
-}
-
void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
{
if (txq->q.desc) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c
index 9a54302bb046..a77d6ac1ee8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
@@ -51,64 +51,6 @@ struct srq_data *t4_init_srq(int srq_size)
return s;
}
-/* cxgb4_get_srq_entry: read the SRQ table entry
- * @dev: Pointer to the net_device
- * @idx: Index to the srq
- * @entryp: pointer to the srq entry
- *
- * Sends CPL_SRQ_TABLE_REQ message for the given index.
- * Contents will be returned in CPL_SRQ_TABLE_RPL message.
- *
- * Returns zero if the read is successful, else a error
- * number will be returned. Caller should not use the srq
- * entry if the return value is non-zero.
- *
- *
- */
-int cxgb4_get_srq_entry(struct net_device *dev,
- int srq_idx, struct srq_entry *entryp)
-{
- struct cpl_srq_table_req *req;
- struct adapter *adap;
- struct sk_buff *skb;
- struct srq_data *s;
- int rc = -ENODEV;
-
- adap = netdev2adap(dev);
- s = adap->srq;
-
- if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s)
- goto out;
-
- skb = alloc_skb(sizeof(*req), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- req = (struct cpl_srq_table_req *)
- __skb_put_zero(skb, sizeof(*req));
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ,
- TID_TID_V(srq_idx) |
- TID_QID_V(adap->sge.fw_evtq.abs_id)));
- req->idx = srq_idx;
-
- mutex_lock(&s->lock);
-
- s->entryp = entryp;
- t4_mgmt_tx(adap, skb);
-
- rc = wait_for_completion_timeout(&s->comp, SRQ_WAIT_TO);
- if (rc)
- rc = 0;
- else /* !rc means we timed out */
- rc = -ETIMEDOUT;
-
- WARN_ON_ONCE(entryp->idx != srq_idx);
- mutex_unlock(&s->lock);
-out:
- return rc;
-}
-EXPORT_SYMBOL(cxgb4_get_srq_entry);
-
void do_srq_table_rpl(struct adapter *adap,
const struct cpl_srq_table_rpl *rpl)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.h b/drivers/net/ethernet/chelsio/cxgb4/srq.h
index ec85cf93865a..d9f04bd5ffa3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.h
@@ -58,8 +58,6 @@ struct srq_data {
};
struct srq_data *t4_init_srq(int srq_size);
-int cxgb4_get_srq_entry(struct net_device *dev,
- int srq_idx, struct srq_entry *entryp);
void do_srq_table_rpl(struct adapter *adap,
const struct cpl_srq_table_rpl *rpl);
#endif /* __CXGB4_SRQ_H */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 7ff82b6778ba..21e0dfeff158 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -573,7 +573,6 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
u32 snd_nxt, u32 rcv_nxt);
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
-int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
u64 mask, u64 val, u8 cookie,
int through_l2t);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index 1e67140b0f80..fab6df21f01c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -106,15 +106,6 @@ void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
}
-/*
- * Set one of the t_flags bits in the TCB.
- */
-int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
-{
- return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
- (u64)val << bit_pos);
-}
-
static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
{
return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 455a54708be4..96fd31d75dfd 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -342,8 +342,8 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
{
struct sk_buff *skb;
- /* Allocate space for cpl_pass_accpet_req which will be synthesized by
- * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
+ /* Allocate space for cpl_pass_accept_req which will be synthesized by
+ * driver. Once driver synthesizes cpl_pass_accept_req the skb will go
* through the regular cpl_pass_accept_req processing in TOM.
*/
skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 0a21a10a791c..fa5857923db4 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1903,7 +1903,7 @@ static struct platform_driver cs89x0_driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(cs89x0_match),
},
- .remove_new = cs89x0_platform_remove,
+ .remove = cs89x0_platform_remove,
};
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index c2007cd86416..a4972457edd9 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -862,7 +862,7 @@ MODULE_DEVICE_TABLE(of, ep93xx_eth_of_ids);
static struct platform_driver ep93xx_eth_driver = {
.probe = ep93xx_eth_probe,
- .remove_new = ep93xx_eth_remove,
+ .remove = ep93xx_eth_remove,
.driver = {
.name = "ep93xx-eth",
.of_match_table = ep93xx_eth_of_ids,
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 84b300fee2bb..6723df9b65d9 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -568,7 +568,7 @@ static void mac89x0_device_remove(struct platform_device *pdev)
static struct platform_driver mac89x0_platform_driver = {
.probe = mac89x0_device_probe,
- .remove_new = mac89x0_device_remove,
+ .remove = mac89x0_device_remove,
.driver = {
.name = "mac89x0",
},
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 0cc3644ee855..10b7e02ba4d0 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -23,10 +23,8 @@
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 8
-#define ENIC_RQ_MAX 8
-#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
-#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_WQ_MAX 256
+#define ENIC_RQ_MAX 256
#define ENIC_WQ_NAPI_BUDGET 256
@@ -162,6 +160,17 @@ struct enic_rq_stats {
u64 desc_skip; /* Rx pkt went into later buffer */
};
+struct enic_wq {
+ spinlock_t lock; /* spinlock for wq */
+ struct vnic_wq vwq;
+ struct enic_wq_stats stats;
+} ____cacheline_aligned;
+
+struct enic_rq {
+ struct vnic_rq vrq;
+ struct enic_rq_stats stats;
+} ____cacheline_aligned;
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -173,8 +182,8 @@ struct enic {
struct work_struct reset;
struct work_struct tx_hang_reset;
struct work_struct change_mtu_work;
- struct msix_entry msix_entry[ENIC_INTR_MAX];
- struct enic_msix_entry msix[ENIC_INTR_MAX];
+ struct msix_entry *msix_entry;
+ struct enic_msix_entry *msix;
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
@@ -193,28 +202,25 @@ struct enic {
bool enic_api_busy;
struct enic_port_profile *pp;
- /* work queue cache line section */
- ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
- spinlock_t wq_lock[ENIC_WQ_MAX];
- struct enic_wq_stats wq_stats[ENIC_WQ_MAX];
+ struct enic_wq *wq;
+ unsigned int wq_avail;
unsigned int wq_count;
u16 loop_enable;
u16 loop_tag;
- /* receive queue cache line section */
- ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
- struct enic_rq_stats rq_stats[ENIC_RQ_MAX];
+ struct enic_rq *rq;
+ unsigned int rq_avail;
unsigned int rq_count;
struct vxlan_offload vxlan;
- struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
+ struct napi_struct *napi;
- /* interrupt resource cache line section */
- ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
+ struct vnic_intr *intr;
+ unsigned int intr_avail;
unsigned int intr_count;
u32 __iomem *legacy_pba; /* memory-mapped */
- /* completion queue cache line section */
- ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
+ struct vnic_cq *cq;
+ unsigned int cq_avail;
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
u32 rx_copybreak;
@@ -272,18 +278,28 @@ static inline unsigned int enic_msix_wq_intr(struct enic *enic,
return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
}
-static inline unsigned int enic_msix_err_intr(struct enic *enic)
-{
- return enic->rq_count + enic->wq_count;
-}
+/* MSIX interrupts are organized as the error interrupt, then the notify
+ * interrupt followed by all the I/O interrupts. The error interrupt needs
+ * to fit in 7 bits due to hardware constraints
+ */
+#define ENIC_MSIX_RESERVED_INTR 2
+#define ENIC_MSIX_ERR_INTR 0
+#define ENIC_MSIX_NOTIFY_INTR 1
+#define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR
+#define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2)
#define ENIC_LEGACY_IO_INTR 0
#define ENIC_LEGACY_ERR_INTR 1
#define ENIC_LEGACY_NOTIFY_INTR 2
+static inline unsigned int enic_msix_err_intr(struct enic *enic)
+{
+ return ENIC_MSIX_ERR_INTR;
+}
+
static inline unsigned int enic_msix_notify_intr(struct enic *enic)
{
- return enic->rq_count + enic->wq_count + 1;
+ return ENIC_MSIX_NOTIFY_INTR;
}
static inline bool enic_is_err_intr(struct enic *enic, int intr)
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f7986f2b6a17..d607b4f0542c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -337,7 +337,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
for (i = 0; i < enic->rq_count; i++) {
- struct enic_rq_stats *rqstats = &enic->rq_stats[i];
+ struct enic_rq_stats *rqstats = &enic->rq[i].stats;
int index;
for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
@@ -346,7 +346,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
}
}
for (i = 0; i < enic->wq_count; i++) {
- struct enic_wq_stats *wqstats = &enic->wq_stats[i];
+ struct enic_wq_stats *wqstats = &enic->wq[i].stats;
int index;
for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
@@ -695,8 +695,8 @@ static void enic_get_channels(struct net_device *netdev,
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_MSIX:
- channels->max_rx = ENIC_RQ_MAX;
- channels->max_tx = ENIC_WQ_MAX;
+ channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX);
+ channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX);
channels->rx_count = enic->rq_count;
channels->tx_count = enic->wq_count;
break;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ffed14b63d41..9913952ccb42 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -342,8 +342,8 @@ static void enic_wq_free_buf(struct vnic_wq *wq,
{
struct enic *enic = vnic_dev_priv(wq->vdev);
- enic->wq_stats[wq->index].cq_work++;
- enic->wq_stats[wq->index].cq_bytes += buf->len;
+ enic->wq[wq->index].stats.cq_work++;
+ enic->wq[wq->index].stats.cq_bytes += buf->len;
enic_free_wq_buf(wq, buf);
}
@@ -352,20 +352,20 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
{
struct enic *enic = vnic_dev_priv(vdev);
- spin_lock(&enic->wq_lock[q_number]);
+ spin_lock(&enic->wq[q_number].lock);
- vnic_wq_service(&enic->wq[q_number], cq_desc,
+ vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
completed_index, enic_wq_free_buf,
opaque);
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
- vnic_wq_desc_avail(&enic->wq[q_number]) >=
+ vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
netif_wake_subqueue(enic->netdev, q_number);
- enic->wq_stats[q_number].wake++;
+ enic->wq[q_number].stats.wake++;
}
- spin_unlock(&enic->wq_lock[q_number]);
+ spin_unlock(&enic->wq[q_number].lock);
return 0;
}
@@ -377,7 +377,7 @@ static bool enic_log_q_error(struct enic *enic)
bool err = false;
for (i = 0; i < enic->wq_count; i++) {
- error_status = vnic_wq_error_status(&enic->wq[i]);
+ error_status = vnic_wq_error_status(&enic->wq[i].vwq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
@@ -385,7 +385,7 @@ static bool enic_log_q_error(struct enic *enic)
}
for (i = 0; i < enic->rq_count; i++) {
- error_status = vnic_rq_error_status(&enic->rq[i]);
+ error_status = vnic_rq_error_status(&enic->rq[i].vrq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
@@ -598,9 +598,9 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
/* The enic_queue_wq_desc() above does not do HW checksum */
- enic->wq_stats[wq->index].csum_none++;
- enic->wq_stats[wq->index].packets++;
- enic->wq_stats[wq->index].bytes += skb->len;
+ enic->wq[wq->index].stats.csum_none++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
return err;
}
@@ -634,9 +634,9 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
- enic->wq_stats[wq->index].csum_partial++;
- enic->wq_stats[wq->index].packets++;
- enic->wq_stats[wq->index].bytes += skb->len;
+ enic->wq[wq->index].stats.csum_partial++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
return err;
}
@@ -699,11 +699,11 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
if (skb->encapsulation) {
hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
- enic->wq_stats[wq->index].encap_tso++;
+ enic->wq[wq->index].stats.encap_tso++;
} else {
hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
- enic->wq_stats[wq->index].tso++;
+ enic->wq[wq->index].stats.tso++;
}
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
@@ -757,8 +757,8 @@ tso_out_stats:
pkts = len / mss;
if ((len % mss) > 0)
pkts++;
- enic->wq_stats[wq->index].packets += pkts;
- enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len));
+ enic->wq[wq->index].stats.packets += pkts;
+ enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len));
return 0;
}
@@ -792,9 +792,9 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
- enic->wq_stats[wq->index].encap_csum++;
- enic->wq_stats[wq->index].packets++;
- enic->wq_stats[wq->index].bytes += skb->len;
+ enic->wq[wq->index].stats.encap_csum++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
return err;
}
@@ -812,7 +812,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
vlan_tag = skb_vlan_tag_get(skb);
- enic->wq_stats[wq->index].add_vlan++;
+ enic->wq[wq->index].stats.add_vlan++;
} else if (enic->loop_enable) {
vlan_tag = enic->loop_tag;
loopback = 1;
@@ -859,11 +859,11 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
struct netdev_queue *txq;
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
- wq = &enic->wq[txq_map];
+ wq = &enic->wq[txq_map].vwq;
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
- enic->wq_stats[wq->index].null_pkt++;
+ enic->wq[wq->index].stats.null_pkt++;
return NETDEV_TX_OK;
}
@@ -878,19 +878,19 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
dev_kfree_skb_any(skb);
- enic->wq_stats[wq->index].skb_linear_fail++;
+ enic->wq[wq->index].stats.skb_linear_fail++;
return NETDEV_TX_OK;
}
- spin_lock(&enic->wq_lock[txq_map]);
+ spin_lock(&enic->wq[txq_map].lock);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock(&enic->wq_lock[txq_map]);
- enic->wq_stats[wq->index].desc_full_awake++;
+ spin_unlock(&enic->wq[txq_map].lock);
+ enic->wq[wq->index].stats.desc_full_awake++;
return NETDEV_TX_BUSY;
}
@@ -899,14 +899,14 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
- enic->wq_stats[wq->index].stopped++;
+ enic->wq[wq->index].stats.stopped++;
}
skb_tx_timestamp(skb);
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
error:
- spin_unlock(&enic->wq_lock[txq_map]);
+ spin_unlock(&enic->wq[txq_map].lock);
return NETDEV_TX_OK;
}
@@ -940,10 +940,10 @@ static void enic_get_stats(struct net_device *netdev,
net_stats->rx_errors = stats->rx.rx_errors;
net_stats->multicast = stats->rx.rx_multicast_frames_ok;
- for (i = 0; i < ENIC_RQ_MAX; i++) {
- struct enic_rq_stats *rqs = &enic->rq_stats[i];
+ for (i = 0; i < enic->rq_count; i++) {
+ struct enic_rq_stats *rqs = &enic->rq[i].stats;
- if (!enic->rq->ctrl)
+ if (!enic->rq[i].vrq.ctrl)
break;
pkt_truncated += rqs->pkt_truncated;
bad_fcs += rqs->bad_fcs;
@@ -1313,7 +1313,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
}
skb = netdev_alloc_skb_ip_align(netdev, len);
if (!skb) {
- enic->rq_stats[rq->index].no_skb++;
+ enic->rq[rq->index].stats.no_skb++;
return -ENOMEM;
}
@@ -1366,7 +1366,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index];
+ struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1512,7 +1512,7 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
{
struct enic *enic = vnic_dev_priv(vdev);
- vnic_rq_service(&enic->rq[q_number], cq_desc,
+ vnic_rq_service(&enic->rq[q_number].vrq, cq_desc,
completed_index, VNIC_RQ_RETURN_DESC,
enic_rq_indicate_buf, opaque);
@@ -1609,7 +1609,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1621,7 +1621,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[0]);
+ enic_calc_int_moderation(enic, &enic->rq[0].vrq);
if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
@@ -1630,11 +1630,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[0]);
+ enic_set_int_moderation(enic, &enic->rq[0].vrq);
vnic_intr_unmask(&enic->intr[intr]);
- enic->rq_stats[0].napi_complete++;
+ enic->rq[0].stats.napi_complete++;
} else {
- enic->rq_stats[0].napi_repoll++;
+ enic->rq[0].stats.napi_repoll++;
}
return rq_work_done;
@@ -1683,7 +1683,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
struct net_device *netdev = napi->dev;
struct enic *enic = netdev_priv(netdev);
unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
- struct vnic_wq *wq = &enic->wq[wq_index];
+ struct vnic_wq *wq = &enic->wq[wq_index].vwq;
unsigned int cq;
unsigned int intr;
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
@@ -1737,7 +1737,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again.
@@ -1749,7 +1749,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[rq]);
+ enic_calc_int_moderation(enic, &enic->rq[rq].vrq);
if ((work_done < budget) && napi_complete_done(napi, work_done)) {
@@ -1758,11 +1758,11 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[rq]);
+ enic_set_int_moderation(enic, &enic->rq[rq].vrq);
vnic_intr_unmask(&enic->intr[intr]);
- enic->rq_stats[rq].napi_complete++;
+ enic->rq[rq].stats.napi_complete++;
} else {
- enic->rq_stats[rq].napi_repoll++;
+ enic->rq[rq].stats.napi_repoll++;
}
return work_done;
@@ -1792,7 +1792,7 @@ static void enic_free_intr(struct enic *enic)
free_irq(enic->pdev->irq, enic);
break;
case VNIC_DEV_INTR_MODE_MSIX:
- for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ for (i = 0; i < enic->intr_count; i++)
if (enic->msix[i].requested)
free_irq(enic->msix_entry[i].vector,
enic->msix[i].devid);
@@ -1859,7 +1859,7 @@ static int enic_request_intr(struct enic *enic)
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
- for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ for (i = 0; i < enic->intr_count; i++)
enic->msix[i].requested = 0;
for (i = 0; i < enic->intr_count; i++) {
@@ -1989,10 +1989,10 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->rq_count; i++) {
/* enable rq before updating rq desc */
- vnic_rq_enable(&enic->rq[i]);
- vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+ vnic_rq_enable(&enic->rq[i].vrq);
+ vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
+ if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
goto err_out_free_rq;
@@ -2000,7 +2000,7 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_enable(&enic->wq[i]);
+ vnic_wq_enable(&enic->wq[i].vwq);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_add_station_addr(enic);
@@ -2027,9 +2027,9 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
- ret = vnic_rq_disable(&enic->rq[i]);
+ ret = vnic_rq_disable(&enic->rq[i].vrq);
if (!ret)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -2071,12 +2071,12 @@ static int enic_stop(struct net_device *netdev)
enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
- err = vnic_wq_disable(&enic->wq[i]);
+ err = vnic_wq_disable(&enic->wq[i].vwq);
if (err)
return err;
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_disable(&enic->rq[i]);
+ err = vnic_rq_disable(&enic->rq[i].vrq);
if (err)
return err;
}
@@ -2086,9 +2086,9 @@ static int enic_stop(struct net_device *netdev)
enic_free_intr(enic);
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
+ vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
for (i = 0; i < enic->rq_count; i++)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -2442,112 +2442,56 @@ static void enic_tx_hang_reset(struct work_struct *work)
static int enic_set_intr_mode(struct enic *enic)
{
- unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
- unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
unsigned int i;
+ int num_intr;
/* Set interrupt mode (INTx, MSI, MSI-X) depending
* on system capabilities.
*
* Try MSI-X first
- *
- * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
- * (the second to last INTR is used for WQ/RQ errors)
- * (the last INTR is used for notifications)
- */
-
- BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
- for (i = 0; i < n + m + 2; i++)
- enic->msix_entry[i].entry = i;
-
- /* Use multiple RQs if RSS is enabled
*/
- if (ENIC_SETTING(enic, RSS) &&
- enic->config.intr_mode < 1 &&
- enic->rq_count >= n &&
- enic->wq_count >= m &&
- enic->cq_count >= n + m &&
- enic->intr_count >= n + m + 2) {
-
- if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
- n + m + 2, n + m + 2) > 0) {
-
- enic->rq_count = n;
- enic->wq_count = m;
- enic->cq_count = n + m;
- enic->intr_count = n + m + 2;
-
- vnic_dev_set_intr_mode(enic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
-
- return 0;
- }
- }
-
if (enic->config.intr_mode < 1 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= m &&
- enic->cq_count >= 1 + m &&
- enic->intr_count >= 1 + m + 2) {
- if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
- 1 + m + 2, 1 + m + 2) > 0) {
-
- enic->rq_count = 1;
- enic->wq_count = m;
- enic->cq_count = 1 + m;
- enic->intr_count = 1 + m + 2;
-
+ enic->intr_avail >= ENIC_MSIX_MIN_INTR) {
+ for (i = 0; i < enic->intr_avail; i++)
+ enic->msix_entry[i].entry = i;
+
+ num_intr = pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ ENIC_MSIX_MIN_INTR,
+ enic->intr_avail);
+ if (num_intr > 0) {
vnic_dev_set_intr_mode(enic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
-
+ VNIC_DEV_INTR_MODE_MSIX);
+ enic->intr_avail = num_intr;
return 0;
}
}
/* Next try MSI
*
- * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
+ * We need 1 INTR
*/
if (enic->config.intr_mode < 2 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= 1 &&
- enic->cq_count >= 2 &&
- enic->intr_count >= 1 &&
+ enic->intr_avail >= 1 &&
!pci_enable_msi(enic->pdev)) {
-
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->cq_count = 2;
- enic->intr_count = 1;
-
+ enic->intr_avail = 1;
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
-
return 0;
}
/* Next try INTx
*
- * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
+ * We need 3 INTRs
* (the first INTR is used for WQ/RQ)
* (the second INTR is used for WQ/RQ errors)
* (the last INTR is used for notifications)
*/
if (enic->config.intr_mode < 3 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= 1 &&
- enic->cq_count >= 2 &&
- enic->intr_count >= 3) {
-
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->cq_count = 2;
- enic->intr_count = 3;
-
+ enic->intr_avail >= 3) {
+ enic->intr_avail = 3;
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
-
return 0;
}
@@ -2572,11 +2516,81 @@ static void enic_clear_intr_mode(struct enic *enic)
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}
+static int enic_adjust_resources(struct enic *enic)
+{
+ unsigned int max_queues;
+ unsigned int rq_default;
+ unsigned int rq_avail;
+ unsigned int wq_avail;
+
+ if (enic->rq_avail < 1 || enic->wq_avail < 1 || enic->cq_avail < 2) {
+ dev_err(enic_get_dev(enic),
+ "Not enough resources available rq: %d wq: %d cq: %d\n",
+ enic->rq_avail, enic->wq_avail,
+ enic->cq_avail);
+ return -ENOSPC;
+ }
+
+ if (is_kdump_kernel()) {
+ dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
+ enic->rq_avail = 1;
+ enic->wq_avail = 1;
+ enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
+ enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
+ enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
+ }
+
+ /* if RSS isn't set, then we can only use one RQ */
+ if (!ENIC_SETTING(enic, RSS))
+ enic->rq_avail = 1;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ enic->rq_count = 1;
+ enic->wq_count = 1;
+ enic->cq_count = 2;
+ enic->intr_count = enic->intr_avail;
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ /* Adjust the number of wqs/rqs/cqs/interrupts that will be
+ * used based on which resource is the most constrained
+ */
+ wq_avail = min(enic->wq_avail, ENIC_WQ_MAX);
+ rq_default = netif_get_num_default_rss_queues();
+ rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default);
+ max_queues = min(enic->cq_avail,
+ enic->intr_avail - ENIC_MSIX_RESERVED_INTR);
+ if (wq_avail + rq_avail <= max_queues) {
+ enic->rq_count = rq_avail;
+ enic->wq_count = wq_avail;
+ } else {
+ /* recalculate wq/rq count */
+ if (rq_avail < wq_avail) {
+ enic->rq_count = min(rq_avail, max_queues / 2);
+ enic->wq_count = max_queues - enic->rq_count;
+ } else {
+ enic->wq_count = min(wq_avail, max_queues / 2);
+ enic->rq_count = max_queues - enic->wq_count;
+ }
+ }
+ enic->cq_count = enic->rq_count + enic->wq_count;
+ enic->intr_count = enic->cq_count + ENIC_MSIX_RESERVED_INTR;
+
+ break;
+ default:
+ dev_err(enic_get_dev(enic), "Unknown interrupt mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
struct netdev_queue_stats_rx *rxs)
{
struct enic *enic = netdev_priv(dev);
- struct enic_rq_stats *rqstats = &enic->rq_stats[idx];
+ struct enic_rq_stats *rqstats = &enic->rq[idx].stats;
rxs->bytes = rqstats->bytes;
rxs->packets = rqstats->packets;
@@ -2590,7 +2604,7 @@ static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
struct netdev_queue_stats_tx *txs)
{
struct enic *enic = netdev_priv(dev);
- struct enic_wq_stats *wqstats = &enic->wq_stats[idx];
+ struct enic_wq_stats *wqstats = &enic->wq[idx].stats;
txs->bytes = wqstats->bytes;
txs->packets = wqstats->packets;
@@ -2674,6 +2688,71 @@ static const struct netdev_stat_ops enic_netdev_stat_ops = {
.get_base_stats = enic_get_base_stats,
};
+static void enic_free_enic_resources(struct enic *enic)
+{
+ kfree(enic->wq);
+ enic->wq = NULL;
+
+ kfree(enic->rq);
+ enic->rq = NULL;
+
+ kfree(enic->cq);
+ enic->cq = NULL;
+
+ kfree(enic->napi);
+ enic->napi = NULL;
+
+ kfree(enic->msix_entry);
+ enic->msix_entry = NULL;
+
+ kfree(enic->msix);
+ enic->msix = NULL;
+
+ kfree(enic->intr);
+ enic->intr = NULL;
+}
+
+static int enic_alloc_enic_resources(struct enic *enic)
+{
+ enic->wq = kcalloc(enic->wq_avail, sizeof(struct enic_wq), GFP_KERNEL);
+ if (!enic->wq)
+ goto free_queues;
+
+ enic->rq = kcalloc(enic->rq_avail, sizeof(struct enic_rq), GFP_KERNEL);
+ if (!enic->rq)
+ goto free_queues;
+
+ enic->cq = kcalloc(enic->cq_avail, sizeof(struct vnic_cq), GFP_KERNEL);
+ if (!enic->cq)
+ goto free_queues;
+
+ enic->napi = kcalloc(enic->wq_avail + enic->rq_avail,
+ sizeof(struct napi_struct), GFP_KERNEL);
+ if (!enic->napi)
+ goto free_queues;
+
+ enic->msix_entry = kcalloc(enic->intr_avail, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!enic->msix_entry)
+ goto free_queues;
+
+ enic->msix = kcalloc(enic->intr_avail, sizeof(struct enic_msix_entry),
+ GFP_KERNEL);
+ if (!enic->msix)
+ goto free_queues;
+
+ enic->intr = kcalloc(enic->intr_avail, sizeof(struct vnic_intr),
+ GFP_KERNEL);
+ if (!enic->intr)
+ goto free_queues;
+
+ return 0;
+
+free_queues:
+ enic_free_enic_resources(enic);
+ return -ENOMEM;
+}
+
static void enic_dev_deinit(struct enic *enic)
{
unsigned int i;
@@ -2691,18 +2770,7 @@ static void enic_dev_deinit(struct enic *enic)
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
enic_free_affinity_hint(enic);
-}
-
-static void enic_kdump_kernel_config(struct enic *enic)
-{
- if (is_kdump_kernel()) {
- dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
- enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
- enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
- }
+ enic_free_enic_resources(enic);
}
static int enic_dev_init(struct enic *enic)
@@ -2734,19 +2802,26 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
- /* modify resource count if we are in kdump_kernel
- */
- enic_kdump_kernel_config(enic);
+ err = enic_alloc_enic_resources(enic);
+ if (err) {
+ dev_err(dev, "Failed to allocate enic resources\n");
+ return err;
+ }
- /* Set interrupt mode based on resource counts and system
- * capabilities
- */
+ /* Set interrupt mode based on system capabilities */
err = enic_set_intr_mode(enic);
if (err) {
dev_err(dev, "Failed to set intr mode based on resource "
"counts and system capabilities, aborting\n");
- return err;
+ goto err_out_free_vnic_resources;
+ }
+
+ /* Adjust resource counts based on most constrained resources */
+ err = enic_adjust_resources(enic);
+ if (err) {
+ dev_err(dev, "Failed to adjust resources\n");
+ goto err_out_free_vnic_resources;
}
/* Allocate and configure vNIC resources
@@ -2788,6 +2863,7 @@ err_out_free_vnic_resources:
enic_free_affinity_hint(enic);
enic_clear_intr_mode(enic);
enic_free_vnic_resources(enic);
+ enic_free_enic_resources(enic);
return err;
}
@@ -2993,7 +3069,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
for (i = 0; i < enic->wq_count; i++)
- spin_lock_init(&enic->wq_lock[i]);
+ spin_lock_init(&enic->wq[i].lock);
/* Register net device
*/
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 1c48aebdbab0..126125199833 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -176,9 +176,9 @@ void enic_free_vnic_resources(struct enic *enic)
unsigned int i;
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_free(&enic->wq[i]);
+ vnic_wq_free(&enic->wq[i].vwq);
for (i = 0; i < enic->rq_count; i++)
- vnic_rq_free(&enic->rq[i]);
+ vnic_rq_free(&enic->rq[i].vrq);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -187,16 +187,21 @@ void enic_free_vnic_resources(struct enic *enic)
void enic_get_res_counts(struct enic *enic)
{
- enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
- enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
- enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
- enic->intr_count = vnic_dev_get_res_count(enic->vdev,
- RES_TYPE_INTR_CTRL);
+ enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+ enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+ enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+ enic->intr_avail = vnic_dev_get_res_count(enic->vdev,
+ RES_TYPE_INTR_CTRL);
+
+ enic->wq_count = enic->wq_avail;
+ enic->rq_count = enic->rq_avail;
+ enic->cq_count = enic->cq_avail;
+ enic->intr_count = enic->intr_avail;
dev_info(enic_get_dev(enic),
"vNIC resources avail: wq %d rq %d cq %d intr %d\n",
- enic->wq_count, enic->rq_count,
- enic->cq_count, enic->intr_count);
+ enic->wq_avail, enic->rq_avail,
+ enic->cq_avail, enic->intr_avail);
}
void enic_init_vnic_resources(struct enic *enic)
@@ -221,9 +226,12 @@ void enic_init_vnic_resources(struct enic *enic)
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_INTX:
+ error_interrupt_enable = 1;
+ error_interrupt_offset = ENIC_LEGACY_ERR_INTR;
+ break;
case VNIC_DEV_INTR_MODE_MSIX:
error_interrupt_enable = 1;
- error_interrupt_offset = enic->intr_count - 2;
+ error_interrupt_offset = enic_msix_err_intr(enic);
break;
default:
error_interrupt_enable = 0;
@@ -233,7 +241,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->rq_count; i++) {
cq_index = i;
- vnic_rq_init(&enic->rq[i],
+ vnic_rq_init(&enic->rq[i].vrq,
cq_index,
error_interrupt_enable,
error_interrupt_offset);
@@ -241,7 +249,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
cq_index = enic->rq_count + i;
- vnic_wq_init(&enic->wq[i],
+ vnic_wq_init(&enic->wq[i].vwq,
cq_index,
error_interrupt_enable,
error_interrupt_offset);
@@ -249,15 +257,15 @@ void enic_init_vnic_resources(struct enic *enic)
/* Init CQ resources
*
- * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
- * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
+ * All CQs point to INTR[0] for INTx, MSI
+ * CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X
*/
for (i = 0; i < enic->cq_count; i++) {
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSIX:
- interrupt_offset = i;
+ interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i;
break;
default:
interrupt_offset = 0;
@@ -322,7 +330,7 @@ int enic_alloc_vnic_resources(struct enic *enic)
*/
for (i = 0; i < enic->wq_count; i++) {
- err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
+ err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i,
enic->config.wq_desc_count,
sizeof(struct wq_enet_desc));
if (err)
@@ -330,7 +338,7 @@ int enic_alloc_vnic_resources(struct enic *enic)
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
+ err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i,
enic->config.rq_desc_count,
sizeof(struct rq_enet_desc));
if (err)
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 73e1c71c5092..991e3839858b 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2573,7 +2573,7 @@ static struct platform_driver gemini_ethernet_port_driver = {
.of_match_table = gemini_ethernet_port_of_match,
},
.probe = gemini_ethernet_port_probe,
- .remove_new = gemini_ethernet_port_remove,
+ .remove = gemini_ethernet_port_remove,
};
static int gemini_ethernet_probe(struct platform_device *pdev)
@@ -2637,7 +2637,7 @@ static struct platform_driver gemini_ethernet_driver = {
.of_match_table = gemini_ethernet_of_match,
},
.probe = gemini_ethernet_probe,
- .remove_new = gemini_ethernet_remove,
+ .remove = gemini_ethernet_remove,
};
static int __init gemini_ethernet_module_init(void)
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 150cc94ae9f8..8735e333034c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1799,7 +1799,7 @@ static struct platform_driver dm9000_driver = {
.of_match_table = of_match_ptr(dm9000_of_matches),
},
.probe = dm9000_probe,
- .remove_new = dm9000_drv_remove,
+ .remove = dm9000_drv_remove,
};
module_platform_driver(dm9000_driver);
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index 0d77f84c8e7b..e9e13654812c 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -32,24 +32,4 @@ config DL2K
To compile this driver as a module, choose M here: the
module will be called dl2k.
-config SUNDANCE
- tristate "Sundance Alta support"
- depends on PCI
- select CRC32
- select MII
- help
- This driver is for the Sundance "Alta" chip.
- More specific information and updates are available from
- <http://www.scyld.com/network/sundance.html>.
-
-config SUNDANCE_MMIO
- bool "Use MMIO instead of PIO"
- depends on SUNDANCE
- help
- Enable memory-mapped I/O for interaction with Sundance NIC registers.
- Do NOT enable this by default, PIO (enabled when MMIO is disabled)
- is known to solve bugs on certain chips.
-
- If unsure, say N.
-
endif # NET_VENDOR_DLINK
diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile
index 3ff503c747db..38c236eb6007 100644
--- a/drivers/net/ethernet/dlink/Makefile
+++ b/drivers/net/ethernet/dlink/Makefile
@@ -4,4 +4,3 @@
#
obj-$(CONFIG_DL2K) += dl2k.o
-obj-$(CONFIG_SUNDANCE) += sundance.o
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
deleted file mode 100644
index 8af5ecec7d61..000000000000
--- a/drivers/net/ethernet/dlink/sundance.c
+++ /dev/null
@@ -1,1985 +0,0 @@
-/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
-/*
- Written 1999-2000 by Donald Becker.
-
- This software may be used and distributed according to the terms of
- the GNU General Public License (GPL), incorporated herein by reference.
- Drivers based on or derived from this code fall under the GPL and must
- retain the authorship, copyright and license notice. This file is not
- a complete program and may only be used when the entire operating
- system is licensed under the GPL.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Support and updates available at
- http://www.scyld.com/network/sundance.html
- [link no longer provides useful info -jgarzik]
- Archives of the mailing list are still available at
- https://www.beowulf.org/pipermail/netdrivers/
-
-*/
-
-#define DRV_NAME "sundance"
-
-/* The user-configurable values.
- These may be modified when a driver module is loaded.*/
-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
- Typical is a 64 element hash table based on the Ethernet CRC. */
-static const int multicast_filter_limit = 32;
-
-/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
- Setting to > 1518 effectively disables this feature.
- This chip can receive into offset buffers, so the Alpha does not
- need a copy-align. */
-static int rx_copybreak;
-static int flowctrl=1;
-
-/* media[] specifies the media type the NIC operates at.
- autosense Autosensing active media.
- 10mbps_hd 10Mbps half duplex.
- 10mbps_fd 10Mbps full duplex.
- 100mbps_hd 100Mbps half duplex.
- 100mbps_fd 100Mbps full duplex.
- 0 Autosensing active media.
- 1 10Mbps half duplex.
- 2 10Mbps full duplex.
- 3 100Mbps half duplex.
- 4 100Mbps full duplex.
-*/
-#define MAX_UNITS 8
-static char *media[MAX_UNITS];
-
-
-/* Operational parameters that are set at compile time. */
-
-/* Keep the ring sizes a power of two for compile efficiency.
- The compiler will convert <unsigned>'%'<2^N> into a bit mask.
- Making the Tx ring too large decreases the effectiveness of channel
- bonding and packet priority, and more than 128 requires modifying the
- Tx error recovery.
- Large receive rings merely waste memory. */
-#define TX_RING_SIZE 32
-#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
-#define RX_RING_SIZE 64
-#define RX_BUDGET 32
-#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
-#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
-
-/* Operational parameters that usually are not changed. */
-/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (4*HZ)
-#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
-
-/* Include files, designed to support most kernel versions 2.0.0 and later. */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/uaccess.h>
-#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-#include <linux/crc32.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-
-MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
-MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
-MODULE_LICENSE("GPL");
-
-module_param(debug, int, 0);
-module_param(rx_copybreak, int, 0);
-module_param_array(media, charp, NULL, 0);
-module_param(flowctrl, int, 0);
-MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
-MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
-MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
-
-/*
- Theory of Operation
-
-I. Board Compatibility
-
-This driver is designed for the Sundance Technologies "Alta" ST201 chip.
-
-II. Board-specific settings
-
-III. Driver operation
-
-IIIa. Ring buffers
-
-This driver uses two statically allocated fixed-size descriptor lists
-formed into rings by a branch from the final descriptor to the beginning of
-the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
-Some chips explicitly use only 2^N sized rings, while others use a
-'next descriptor' pointer that the driver forms into rings.
-
-IIIb/c. Transmit/Receive Structure
-
-This driver uses a zero-copy receive and transmit scheme.
-The driver allocates full frame size skbuffs for the Rx ring buffers at
-open() time and passes the skb->data field to the chip as receive data
-buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
-a fresh skbuff is allocated and the frame is copied to the new skbuff.
-When the incoming frame is larger, the skbuff is passed directly up the
-protocol stack. Buffers consumed this way are replaced by newly allocated
-skbuffs in a later phase of receives.
-
-The RX_COPYBREAK value is chosen to trade-off the memory wasted by
-using a full-sized skbuff for small frames vs. the copying costs of larger
-frames. New boards are typically used in generously configured machines
-and the underfilled buffers have negligible impact compared to the benefit of
-a single allocation size, so the default value of zero results in never
-copying packets. When copying is done, the cost is usually mitigated by using
-a combined copy/checksum routine. Copying also preloads the cache, which is
-most useful with small frames.
-
-A subtle aspect of the operation is that the IP header at offset 14 in an
-ethernet frame isn't longword aligned for further processing.
-Unaligned buffers are permitted by the Sundance hardware, so
-frames are received into the skbuff at an offset of "+2", 16-byte aligning
-the IP header.
-
-IIId. Synchronization
-
-The driver runs as two independent, single-threaded flows of control. One
-is the send-packet routine, which enforces single-threaded use by the
-dev->tbusy flag. The other thread is the interrupt handler, which is single
-threaded by the hardware and interrupt handling software.
-
-The send packet thread has partial control over the Tx ring and 'dev->tbusy'
-flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
-queue slot is empty, it clears the tbusy flag when finished otherwise it sets
-the 'lp->tx_full' flag.
-
-The interrupt handler has exclusive control over the Rx ring and records stats
-from the Tx ring. After reaping the stats, it marks the Tx queue entry as
-empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
-clears both the tx_full and tbusy flags.
-
-IV. Notes
-
-IVb. References
-
-The Sundance ST201 datasheet, preliminary version.
-The Kendin KS8723 datasheet, preliminary version.
-The ICplus IP100 datasheet, preliminary version.
-http://www.scyld.com/expert/100mbps.html
-http://www.scyld.com/expert/NWay.html
-
-IVc. Errata
-
-*/
-
-/* Work-around for Kendin chip bugs. */
-#ifndef CONFIG_SUNDANCE_MMIO
-#define USE_IO_OPS 1
-#endif
-
-static const struct pci_device_id sundance_pci_tbl[] = {
- { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
- { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
- { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
- { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
- { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
- { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
- { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
- { }
-};
-MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
-
-enum {
- netdev_io_size = 128
-};
-
-struct pci_id_info {
- const char *name;
-};
-static const struct pci_id_info pci_id_tbl[] = {
- {"D-Link DFE-550TX FAST Ethernet Adapter"},
- {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
- {"D-Link DFE-580TX 4 port Server Adapter"},
- {"D-Link DFE-530TXS FAST Ethernet Adapter"},
- {"D-Link DL10050-based FAST Ethernet Adapter"},
- {"Sundance Technology Alta"},
- {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
- { } /* terminate list. */
-};
-
-/* This driver was written to use PCI memory space, however x86-oriented
- hardware often uses I/O space accesses. */
-
-/* Offsets to the device registers.
- Unlike software-only systems, device drivers interact with complex hardware.
- It's not useful to define symbolic names for every register bit in the
- device. The name can only partially document the semantics and make
- the driver longer and more difficult to read.
- In general, only the important configuration values or bits changed
- multiple times should be defined symbolically.
-*/
-enum alta_offsets {
- DMACtrl = 0x00,
- TxListPtr = 0x04,
- TxDMABurstThresh = 0x08,
- TxDMAUrgentThresh = 0x09,
- TxDMAPollPeriod = 0x0a,
- RxDMAStatus = 0x0c,
- RxListPtr = 0x10,
- DebugCtrl0 = 0x1a,
- DebugCtrl1 = 0x1c,
- RxDMABurstThresh = 0x14,
- RxDMAUrgentThresh = 0x15,
- RxDMAPollPeriod = 0x16,
- LEDCtrl = 0x1a,
- ASICCtrl = 0x30,
- EEData = 0x34,
- EECtrl = 0x36,
- FlashAddr = 0x40,
- FlashData = 0x44,
- WakeEvent = 0x45,
- TxStatus = 0x46,
- TxFrameId = 0x47,
- DownCounter = 0x18,
- IntrClear = 0x4a,
- IntrEnable = 0x4c,
- IntrStatus = 0x4e,
- MACCtrl0 = 0x50,
- MACCtrl1 = 0x52,
- StationAddr = 0x54,
- MaxFrameSize = 0x5A,
- RxMode = 0x5c,
- MIICtrl = 0x5e,
- MulticastFilter0 = 0x60,
- MulticastFilter1 = 0x64,
- RxOctetsLow = 0x68,
- RxOctetsHigh = 0x6a,
- TxOctetsLow = 0x6c,
- TxOctetsHigh = 0x6e,
- TxFramesOK = 0x70,
- RxFramesOK = 0x72,
- StatsCarrierError = 0x74,
- StatsLateColl = 0x75,
- StatsMultiColl = 0x76,
- StatsOneColl = 0x77,
- StatsTxDefer = 0x78,
- RxMissed = 0x79,
- StatsTxXSDefer = 0x7a,
- StatsTxAbort = 0x7b,
- StatsBcastTx = 0x7c,
- StatsBcastRx = 0x7d,
- StatsMcastTx = 0x7e,
- StatsMcastRx = 0x7f,
- /* Aliased and bogus values! */
- RxStatus = 0x0c,
-};
-
-#define ASIC_HI_WORD(x) ((x) + 2)
-
-enum ASICCtrl_HiWord_bit {
- GlobalReset = 0x0001,
- RxReset = 0x0002,
- TxReset = 0x0004,
- DMAReset = 0x0008,
- FIFOReset = 0x0010,
- NetworkReset = 0x0020,
- HostReset = 0x0040,
- ResetBusy = 0x0400,
-};
-
-/* Bits in the interrupt status/mask registers. */
-enum intr_status_bits {
- IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
- IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
- IntrDrvRqst=0x0040,
- StatsMax=0x0080, LinkChange=0x0100,
- IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
-};
-
-/* Bits in the RxMode register. */
-enum rx_mode_bits {
- AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
- AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
-};
-/* Bits in MACCtrl. */
-enum mac_ctrl0_bits {
- EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
- EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
-};
-enum mac_ctrl1_bits {
- StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
- TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
- RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
-};
-
-/* Bits in WakeEvent register. */
-enum wake_event_bits {
- WakePktEnable = 0x01,
- MagicPktEnable = 0x02,
- LinkEventEnable = 0x04,
- WolEnable = 0x80,
-};
-
-/* The Rx and Tx buffer descriptors. */
-/* Note that using only 32 bit fields simplifies conversion to big-endian
- architectures. */
-struct netdev_desc {
- __le32 next_desc;
- __le32 status;
- struct desc_frag { __le32 addr, length; } frag;
-};
-
-/* Bits in netdev_desc.status */
-enum desc_status_bits {
- DescOwn=0x8000,
- DescEndPacket=0x4000,
- DescEndRing=0x2000,
- LastFrag=0x80000000,
- DescIntrOnTx=0x8000,
- DescIntrOnDMADone=0x80000000,
- DisableAlign = 0x00000001,
-};
-
-#define PRIV_ALIGN 15 /* Required alignment mask */
-/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
- within the structure. */
-#define MII_CNT 4
-struct netdev_private {
- /* Descriptor rings first for alignment. */
- struct netdev_desc *rx_ring;
- struct netdev_desc *tx_ring;
- struct sk_buff* rx_skbuff[RX_RING_SIZE];
- struct sk_buff* tx_skbuff[TX_RING_SIZE];
- dma_addr_t tx_ring_dma;
- dma_addr_t rx_ring_dma;
- struct timer_list timer; /* Media monitoring timer. */
- struct net_device *ndev; /* backpointer */
- /* ethtool extra stats */
- struct {
- u64 tx_multiple_collisions;
- u64 tx_single_collisions;
- u64 tx_late_collisions;
- u64 tx_deferred;
- u64 tx_deferred_excessive;
- u64 tx_aborted;
- u64 tx_bcasts;
- u64 rx_bcasts;
- u64 tx_mcasts;
- u64 rx_mcasts;
- } xstats;
- /* Frequently used values: keep some adjacent for cache effect. */
- spinlock_t lock;
- int msg_enable;
- int chip_id;
- unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
- unsigned int rx_buf_sz; /* Based on MTU+slack. */
- struct netdev_desc *last_tx; /* Last Tx descriptor used. */
- unsigned int cur_tx, dirty_tx;
- /* These values are keep track of the transceiver/media in use. */
- unsigned int flowctrl:1;
- unsigned int default_port:4; /* Last dev->if_port value. */
- unsigned int an_enable:1;
- unsigned int speed;
- unsigned int wol_enabled:1; /* Wake on LAN enabled */
- struct tasklet_struct rx_tasklet;
- struct tasklet_struct tx_tasklet;
- int budget;
- int cur_task;
- /* Multicast and receive mode. */
- spinlock_t mcastlock; /* SMP lock multicast updates. */
- u16 mcast_filter[4];
- /* MII transceiver section. */
- struct mii_if_info mii_if;
- int mii_preamble_required;
- unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
- struct pci_dev *pci_dev;
- void __iomem *base;
- spinlock_t statlock;
-};
-
-/* The station address location in the EEPROM. */
-#define EEPROM_SA_OFFSET 0x10
-#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
- IntrDrvRqst | IntrTxDone | StatsMax | \
- LinkChange)
-
-static int change_mtu(struct net_device *dev, int new_mtu);
-static int eeprom_read(void __iomem *ioaddr, int location);
-static int mdio_read(struct net_device *dev, int phy_id, int location);
-static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
-static int mdio_wait_link(struct net_device *dev, int wait);
-static int netdev_open(struct net_device *dev);
-static void check_duplex(struct net_device *dev);
-static void netdev_timer(struct timer_list *t);
-static void tx_timeout(struct net_device *dev, unsigned int txqueue);
-static void init_ring(struct net_device *dev);
-static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
-static int reset_tx (struct net_device *dev);
-static irqreturn_t intr_handler(int irq, void *dev_instance);
-static void rx_poll(struct tasklet_struct *t);
-static void tx_poll(struct tasklet_struct *t);
-static void refill_rx (struct net_device *dev);
-static void netdev_error(struct net_device *dev, int intr_status);
-static void netdev_error(struct net_device *dev, int intr_status);
-static void set_rx_mode(struct net_device *dev);
-static int __set_mac_addr(struct net_device *dev);
-static int sundance_set_mac_addr(struct net_device *dev, void *data);
-static struct net_device_stats *get_stats(struct net_device *dev);
-static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static int netdev_close(struct net_device *dev);
-static const struct ethtool_ops ethtool_ops;
-
-static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base + ASICCtrl;
- int countdown;
-
- /* ST201 documentation states ASICCtrl is a 32bit register */
- iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
- /* ST201 documentation states reset can take up to 1 ms */
- countdown = 10 + 1;
- while (ioread32 (ioaddr) & (ResetBusy << 16)) {
- if (--countdown == 0) {
- printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
- break;
- }
- udelay(100);
- }
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void sundance_poll_controller(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
-
- disable_irq(np->pci_dev->irq);
- intr_handler(np->pci_dev->irq, dev);
- enable_irq(np->pci_dev->irq);
-}
-#endif
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = netdev_open,
- .ndo_stop = netdev_close,
- .ndo_start_xmit = start_tx,
- .ndo_get_stats = get_stats,
- .ndo_set_rx_mode = set_rx_mode,
- .ndo_eth_ioctl = netdev_ioctl,
- .ndo_tx_timeout = tx_timeout,
- .ndo_change_mtu = change_mtu,
- .ndo_set_mac_address = sundance_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = sundance_poll_controller,
-#endif
-};
-
-static int sundance_probe1(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct net_device *dev;
- struct netdev_private *np;
- static int card_idx;
- int chip_idx = ent->driver_data;
- int irq;
- int i;
- void __iomem *ioaddr;
- u16 mii_ctl;
- void *ring_space;
- dma_addr_t ring_dma;
-#ifdef USE_IO_OPS
- int bar = 0;
-#else
- int bar = 1;
-#endif
- int phy, phy_end, phy_idx = 0;
- __le16 addr[ETH_ALEN / 2];
-
- if (pci_enable_device(pdev))
- return -EIO;
- pci_set_master(pdev);
-
- irq = pdev->irq;
-
- dev = alloc_etherdev(sizeof(*np));
- if (!dev)
- return -ENOMEM;
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- if (pci_request_regions(pdev, DRV_NAME))
- goto err_out_netdev;
-
- ioaddr = pci_iomap(pdev, bar, netdev_io_size);
- if (!ioaddr)
- goto err_out_res;
-
- for (i = 0; i < 3; i++)
- addr[i] =
- cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
- eth_hw_addr_set(dev, (u8 *)addr);
-
- np = netdev_priv(dev);
- np->ndev = dev;
- np->base = ioaddr;
- np->pci_dev = pdev;
- np->chip_id = chip_idx;
- np->msg_enable = (1 << debug) - 1;
- spin_lock_init(&np->lock);
- spin_lock_init(&np->statlock);
- tasklet_setup(&np->rx_tasklet, rx_poll);
- tasklet_setup(&np->tx_tasklet, tx_poll);
-
- ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
- &ring_dma, GFP_KERNEL);
- if (!ring_space)
- goto err_out_cleardev;
- np->tx_ring = (struct netdev_desc *)ring_space;
- np->tx_ring_dma = ring_dma;
-
- ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
- &ring_dma, GFP_KERNEL);
- if (!ring_space)
- goto err_out_unmap_tx;
- np->rx_ring = (struct netdev_desc *)ring_space;
- np->rx_ring_dma = ring_dma;
-
- np->mii_if.dev = dev;
- np->mii_if.mdio_read = mdio_read;
- np->mii_if.mdio_write = mdio_write;
- np->mii_if.phy_id_mask = 0x1f;
- np->mii_if.reg_num_mask = 0x1f;
-
- /* The chip-specific entries in the device structure. */
- dev->netdev_ops = &netdev_ops;
- dev->ethtool_ops = &ethtool_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- /* MTU range: 68 - 8191 */
- dev->min_mtu = ETH_MIN_MTU;
- dev->max_mtu = 8191;
-
- pci_set_drvdata(pdev, dev);
-
- i = register_netdev(dev);
- if (i)
- goto err_out_unmap_rx;
-
- printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
- dev->name, pci_id_tbl[chip_idx].name, ioaddr,
- dev->dev_addr, irq);
-
- np->phys[0] = 1; /* Default setting */
- np->mii_preamble_required++;
-
- /*
- * It seems some phys doesn't deal well with address 0 being accessed
- * first
- */
- if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
- phy = 0;
- phy_end = 31;
- } else {
- phy = 1;
- phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
- }
- for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
- int phyx = phy & 0x1f;
- int mii_status = mdio_read(dev, phyx, MII_BMSR);
- if (mii_status != 0xffff && mii_status != 0x0000) {
- np->phys[phy_idx++] = phyx;
- np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
- if ((mii_status & 0x0040) == 0)
- np->mii_preamble_required++;
- printk(KERN_INFO "%s: MII PHY found at address %d, status "
- "0x%4.4x advertising %4.4x.\n",
- dev->name, phyx, mii_status, np->mii_if.advertising);
- }
- }
- np->mii_preamble_required--;
-
- if (phy_idx == 0) {
- printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
- dev->name, ioread32(ioaddr + ASICCtrl));
- goto err_out_unregister;
- }
-
- np->mii_if.phy_id = np->phys[0];
-
- /* Parse override configuration */
- np->an_enable = 1;
- if (card_idx < MAX_UNITS) {
- if (media[card_idx] != NULL) {
- np->an_enable = 0;
- if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
- strcmp (media[card_idx], "4") == 0) {
- np->speed = 100;
- np->mii_if.full_duplex = 1;
- } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
- strcmp (media[card_idx], "3") == 0) {
- np->speed = 100;
- np->mii_if.full_duplex = 0;
- } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
- strcmp (media[card_idx], "2") == 0) {
- np->speed = 10;
- np->mii_if.full_duplex = 1;
- } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
- strcmp (media[card_idx], "1") == 0) {
- np->speed = 10;
- np->mii_if.full_duplex = 0;
- } else {
- np->an_enable = 1;
- }
- }
- if (flowctrl == 1)
- np->flowctrl = 1;
- }
-
- /* Fibre PHY? */
- if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
- /* Default 100Mbps Full */
- if (np->an_enable) {
- np->speed = 100;
- np->mii_if.full_duplex = 1;
- np->an_enable = 0;
- }
- }
- /* Reset PHY */
- mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
- mdelay (300);
- /* If flow control enabled, we need to advertise it.*/
- if (np->flowctrl)
- mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
- mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
- /* Force media type */
- if (!np->an_enable) {
- mii_ctl = 0;
- mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
- mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
- mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
- printk (KERN_INFO "Override speed=%d, %s duplex\n",
- np->speed, np->mii_if.full_duplex ? "Full" : "Half");
-
- }
-
- /* Perhaps move the reset here? */
- /* Reset the chip to erase previous misconfiguration. */
- if (netif_msg_hw(np))
- printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
- sundance_reset(dev, 0x00ff << 16);
- if (netif_msg_hw(np))
- printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
-
- card_idx++;
- return 0;
-
-err_out_unregister:
- unregister_netdev(dev);
-err_out_unmap_rx:
- dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
- np->rx_ring, np->rx_ring_dma);
-err_out_unmap_tx:
- dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
- np->tx_ring, np->tx_ring_dma);
-err_out_cleardev:
- pci_iounmap(pdev, ioaddr);
-err_out_res:
- pci_release_regions(pdev);
-err_out_netdev:
- free_netdev (dev);
- return -ENODEV;
-}
-
-static int change_mtu(struct net_device *dev, int new_mtu)
-{
- if (netif_running(dev))
- return -EBUSY;
- WRITE_ONCE(dev->mtu, new_mtu);
- return 0;
-}
-
-#define eeprom_delay(ee_addr) ioread32(ee_addr)
-/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
-static int eeprom_read(void __iomem *ioaddr, int location)
-{
- int boguscnt = 10000; /* Typical 1900 ticks. */
- iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
- do {
- eeprom_delay(ioaddr + EECtrl);
- if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
- return ioread16(ioaddr + EEData);
- }
- } while (--boguscnt > 0);
- return 0;
-}
-
-/* MII transceiver control section.
- Read and write the MII registers using software-generated serial
- MDIO protocol. See the MII specifications or DP83840A data sheet
- for details.
-
- The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
- met by back-to-back 33Mhz PCI cycles. */
-#define mdio_delay() ioread8(mdio_addr)
-
-enum mii_reg_bits {
- MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
-};
-#define MDIO_EnbIn (0)
-#define MDIO_WRITE0 (MDIO_EnbOutput)
-#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
-
-/* Generate the preamble required for initial synchronization and
- a few older transceivers. */
-static void mdio_sync(void __iomem *mdio_addr)
-{
- int bits = 32;
-
- /* Establish sync by sending at least 32 logic ones. */
- while (--bits >= 0) {
- iowrite8(MDIO_WRITE1, mdio_addr);
- mdio_delay();
- iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
- mdio_delay();
- }
-}
-
-static int mdio_read(struct net_device *dev, int phy_id, int location)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *mdio_addr = np->base + MIICtrl;
- int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
- int i, retval = 0;
-
- if (np->mii_preamble_required)
- mdio_sync(mdio_addr);
-
- /* Shift the read command bits out. */
- for (i = 15; i >= 0; i--) {
- int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
-
- iowrite8(dataval, mdio_addr);
- mdio_delay();
- iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
- mdio_delay();
- }
- /* Read the two transition, 16 data, and wire-idle bits. */
- for (i = 19; i > 0; i--) {
- iowrite8(MDIO_EnbIn, mdio_addr);
- mdio_delay();
- retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
- iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
- mdio_delay();
- }
- return (retval>>1) & 0xffff;
-}
-
-static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *mdio_addr = np->base + MIICtrl;
- int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
- int i;
-
- if (np->mii_preamble_required)
- mdio_sync(mdio_addr);
-
- /* Shift the command bits out. */
- for (i = 31; i >= 0; i--) {
- int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
-
- iowrite8(dataval, mdio_addr);
- mdio_delay();
- iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
- mdio_delay();
- }
- /* Clear out extra bits. */
- for (i = 2; i > 0; i--) {
- iowrite8(MDIO_EnbIn, mdio_addr);
- mdio_delay();
- iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
- mdio_delay();
- }
-}
-
-static int mdio_wait_link(struct net_device *dev, int wait)
-{
- int bmsr;
- int phy_id;
- struct netdev_private *np;
-
- np = netdev_priv(dev);
- phy_id = np->phys[0];
-
- do {
- bmsr = mdio_read(dev, phy_id, MII_BMSR);
- if (bmsr & 0x0004)
- return 0;
- mdelay(1);
- } while (--wait > 0);
- return -1;
-}
-
-static int netdev_open(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- const int irq = np->pci_dev->irq;
- unsigned long flags;
- int i;
-
- sundance_reset(dev, 0x00ff << 16);
-
- i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
- if (i)
- return i;
-
- if (netif_msg_ifup(np))
- printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
-
- init_ring(dev);
-
- iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
- /* The Tx list pointer is written as packets are queued. */
-
- /* Initialize other registers. */
- __set_mac_addr(dev);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
-#else
- iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
-#endif
- if (dev->mtu > 2047)
- iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
-
- /* Configure the PCI bus bursts and FIFO thresholds. */
-
- if (dev->if_port == 0)
- dev->if_port = np->default_port;
-
- spin_lock_init(&np->mcastlock);
-
- set_rx_mode(dev);
- iowrite16(0, ioaddr + IntrEnable);
- iowrite16(0, ioaddr + DownCounter);
- /* Set the chip to poll every N*320nsec. */
- iowrite8(100, ioaddr + RxDMAPollPeriod);
- iowrite8(127, ioaddr + TxDMAPollPeriod);
- /* Fix DFE-580TX packet drop issue */
- if (np->pci_dev->revision >= 0x14)
- iowrite8(0x01, ioaddr + DebugCtrl1);
- netif_start_queue(dev);
-
- spin_lock_irqsave(&np->lock, flags);
- reset_tx(dev);
- spin_unlock_irqrestore(&np->lock, flags);
-
- iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
-
- /* Disable Wol */
- iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
- np->wol_enabled = 0;
-
- if (netif_msg_ifup(np))
- printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
- "MAC Control %x, %4.4x %4.4x.\n",
- dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
- ioread32(ioaddr + MACCtrl0),
- ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
-
- /* Set the timer to check for link beat. */
- timer_setup(&np->timer, netdev_timer, 0);
- np->timer.expires = jiffies + 3*HZ;
- add_timer(&np->timer);
-
- /* Enable interrupts by setting the interrupt mask. */
- iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
-
- return 0;
-}
-
-static void check_duplex(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
- int negotiated = mii_lpa & np->mii_if.advertising;
- int duplex;
-
- /* Force media */
- if (!np->an_enable || mii_lpa == 0xffff) {
- if (np->mii_if.full_duplex)
- iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
- ioaddr + MACCtrl0);
- return;
- }
-
- /* Autonegotiation */
- duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
- if (np->mii_if.full_duplex != duplex) {
- np->mii_if.full_duplex = duplex;
- if (netif_msg_link(np))
- printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
- "negotiated capability %4.4x.\n", dev->name,
- duplex ? "full" : "half", np->phys[0], negotiated);
- iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
- }
-}
-
-static void netdev_timer(struct timer_list *t)
-{
- struct netdev_private *np = from_timer(np, t, timer);
- struct net_device *dev = np->mii_if.dev;
- void __iomem *ioaddr = np->base;
- int next_tick = 10*HZ;
-
- if (netif_msg_timer(np)) {
- printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
- "Tx %x Rx %x.\n",
- dev->name, ioread16(ioaddr + IntrEnable),
- ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
- }
- check_duplex(dev);
- np->timer.expires = jiffies + next_tick;
- add_timer(&np->timer);
-}
-
-static void tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- unsigned long flag;
-
- netif_stop_queue(dev);
- tasklet_disable_in_atomic(&np->tx_tasklet);
- iowrite16(0, ioaddr + IntrEnable);
- printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
- "TxFrameId %2.2x,"
- " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
- ioread8(ioaddr + TxFrameId));
-
- {
- int i;
- for (i=0; i<TX_RING_SIZE; i++) {
- printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
- (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
- le32_to_cpu(np->tx_ring[i].next_desc),
- le32_to_cpu(np->tx_ring[i].status),
- (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
- le32_to_cpu(np->tx_ring[i].frag.addr),
- le32_to_cpu(np->tx_ring[i].frag.length));
- }
- printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
- ioread32(np->base + TxListPtr),
- netif_queue_stopped(dev));
- printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
- np->cur_tx, np->cur_tx % TX_RING_SIZE,
- np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
- printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
- printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
- }
- spin_lock_irqsave(&np->lock, flag);
-
- /* Stop and restart the chip's Tx processes . */
- reset_tx(dev);
- spin_unlock_irqrestore(&np->lock, flag);
-
- dev->if_port = 0;
-
- netif_trans_update(dev); /* prevent tx timeout */
- dev->stats.tx_errors++;
- if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
- netif_wake_queue(dev);
- }
- iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
- tasklet_enable(&np->tx_tasklet);
-}
-
-
-/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void init_ring(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- int i;
-
- np->cur_rx = np->cur_tx = 0;
- np->dirty_rx = np->dirty_tx = 0;
- np->cur_task = 0;
-
- np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
-
- /* Initialize all Rx descriptors. */
- for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
- ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
- np->rx_ring[i].status = 0;
- np->rx_ring[i].frag.length = 0;
- np->rx_skbuff[i] = NULL;
- }
-
- /* Fill in the Rx buffers. Handle allocation failure gracefully. */
- for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb =
- netdev_alloc_skb(dev, np->rx_buf_sz + 2);
- np->rx_skbuff[i] = skb;
- if (skb == NULL)
- break;
- skb_reserve(skb, 2); /* 16 byte align the IP header. */
- np->rx_ring[i].frag.addr = cpu_to_le32(
- dma_map_single(&np->pci_dev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
- if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[i].frag.addr)) {
- dev_kfree_skb(skb);
- np->rx_skbuff[i] = NULL;
- break;
- }
- np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
- }
- np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
-
- for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = NULL;
- np->tx_ring[i].status = 0;
- }
-}
-
-static void tx_poll(struct tasklet_struct *t)
-{
- struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
- unsigned head = np->cur_task % TX_RING_SIZE;
- struct netdev_desc *txdesc =
- &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
-
- /* Chain the next pointer */
- for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
- int entry = np->cur_task % TX_RING_SIZE;
- txdesc = &np->tx_ring[entry];
- if (np->last_tx) {
- np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
- entry*sizeof(struct netdev_desc));
- }
- np->last_tx = txdesc;
- }
- /* Indicate the latest descriptor of tx ring */
- txdesc->status |= cpu_to_le32(DescIntrOnTx);
-
- if (ioread32 (np->base + TxListPtr) == 0)
- iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
- np->base + TxListPtr);
-}
-
-static netdev_tx_t
-start_tx (struct sk_buff *skb, struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- struct netdev_desc *txdesc;
- unsigned entry;
-
- /* Calculate the next Tx descriptor entry. */
- entry = np->cur_tx % TX_RING_SIZE;
- np->tx_skbuff[entry] = skb;
- txdesc = &np->tx_ring[entry];
-
- txdesc->next_desc = 0;
- txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
- txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
- skb->data, skb->len, DMA_TO_DEVICE));
- if (dma_mapping_error(&np->pci_dev->dev,
- txdesc->frag.addr))
- goto drop_frame;
- txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
-
- /* Increment cur_tx before tasklet_schedule() */
- np->cur_tx++;
- mb();
- /* Schedule a tx_poll() task */
- tasklet_schedule(&np->tx_tasklet);
-
- /* On some architectures: explicitly flush cache lines here. */
- if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
- !netif_queue_stopped(dev)) {
- /* do nothing */
- } else {
- netif_stop_queue (dev);
- }
- if (netif_msg_tx_queued(np)) {
- printk (KERN_DEBUG
- "%s: Transmit frame #%d queued in slot %d.\n",
- dev->name, np->cur_tx, entry);
- }
- return NETDEV_TX_OK;
-
-drop_frame:
- dev_kfree_skb_any(skb);
- np->tx_skbuff[entry] = NULL;
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
-}
-
-/* Reset hardware tx and free all of tx buffers */
-static int
-reset_tx (struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- struct sk_buff *skb;
- int i;
-
- /* Reset tx logic, TxListPtr will be cleaned */
- iowrite16 (TxDisable, ioaddr + MACCtrl1);
- sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
-
- /* free all tx skbuff */
- for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_ring[i].next_desc = 0;
-
- skb = np->tx_skbuff[i];
- if (skb) {
- dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag.addr),
- skb->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(skb);
- np->tx_skbuff[i] = NULL;
- dev->stats.tx_dropped++;
- }
- }
- np->cur_tx = np->dirty_tx = 0;
- np->cur_task = 0;
-
- np->last_tx = NULL;
- iowrite8(127, ioaddr + TxDMAPollPeriod);
-
- iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
- return 0;
-}
-
-/* The interrupt handler cleans up after the Tx thread,
- and schedule a Rx thread work */
-static irqreturn_t intr_handler(int irq, void *dev_instance)
-{
- struct net_device *dev = (struct net_device *)dev_instance;
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- int hw_frame_id;
- int tx_cnt;
- int tx_status;
- int handled = 0;
- int i;
-
- do {
- int intr_status = ioread16(ioaddr + IntrStatus);
- iowrite16(intr_status, ioaddr + IntrStatus);
-
- if (netif_msg_intr(np))
- printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
- dev->name, intr_status);
-
- if (!(intr_status & DEFAULT_INTR))
- break;
-
- handled = 1;
-
- if (intr_status & (IntrRxDMADone)) {
- iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
- ioaddr + IntrEnable);
- if (np->budget < 0)
- np->budget = RX_BUDGET;
- tasklet_schedule(&np->rx_tasklet);
- }
- if (intr_status & (IntrTxDone | IntrDrvRqst)) {
- tx_status = ioread16 (ioaddr + TxStatus);
- for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
- if (netif_msg_tx_done(np))
- printk
- ("%s: Transmit status is %2.2x.\n",
- dev->name, tx_status);
- if (tx_status & 0x1e) {
- if (netif_msg_tx_err(np))
- printk("%s: Transmit error status %4.4x.\n",
- dev->name, tx_status);
- dev->stats.tx_errors++;
- if (tx_status & 0x10)
- dev->stats.tx_fifo_errors++;
- if (tx_status & 0x08)
- dev->stats.collisions++;
- if (tx_status & 0x04)
- dev->stats.tx_fifo_errors++;
- if (tx_status & 0x02)
- dev->stats.tx_window_errors++;
-
- /*
- ** This reset has been verified on
- ** DFE-580TX boards ! phdm@macqel.be.
- */
- if (tx_status & 0x10) { /* TxUnderrun */
- /* Restart Tx FIFO and transmitter */
- sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
- /* No need to reset the Tx pointer here */
- }
- /* Restart the Tx. Need to make sure tx enabled */
- i = 10;
- do {
- iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
- if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
- break;
- mdelay(1);
- } while (--i);
- }
- /* Yup, this is a documentation bug. It cost me *hours*. */
- iowrite16 (0, ioaddr + TxStatus);
- if (tx_cnt < 0) {
- iowrite32(5000, ioaddr + DownCounter);
- break;
- }
- tx_status = ioread16 (ioaddr + TxStatus);
- }
- hw_frame_id = (tx_status >> 8) & 0xff;
- } else {
- hw_frame_id = ioread8(ioaddr + TxFrameId);
- }
-
- if (np->pci_dev->revision >= 0x14) {
- spin_lock(&np->lock);
- for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
- int entry = np->dirty_tx % TX_RING_SIZE;
- struct sk_buff *skb;
- int sw_frame_id;
- sw_frame_id = (le32_to_cpu(
- np->tx_ring[entry].status) >> 2) & 0xff;
- if (sw_frame_id == hw_frame_id &&
- !(le32_to_cpu(np->tx_ring[entry].status)
- & 0x00010000))
- break;
- if (sw_frame_id == (hw_frame_id + 1) %
- TX_RING_SIZE)
- break;
- skb = np->tx_skbuff[entry];
- /* Free the original skb. */
- dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag.addr),
- skb->len, DMA_TO_DEVICE);
- dev_consume_skb_irq(np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag.addr = 0;
- np->tx_ring[entry].frag.length = 0;
- }
- spin_unlock(&np->lock);
- } else {
- spin_lock(&np->lock);
- for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
- int entry = np->dirty_tx % TX_RING_SIZE;
- struct sk_buff *skb;
- if (!(le32_to_cpu(np->tx_ring[entry].status)
- & 0x00010000))
- break;
- skb = np->tx_skbuff[entry];
- /* Free the original skb. */
- dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag.addr),
- skb->len, DMA_TO_DEVICE);
- dev_consume_skb_irq(np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag.addr = 0;
- np->tx_ring[entry].frag.length = 0;
- }
- spin_unlock(&np->lock);
- }
-
- if (netif_queue_stopped(dev) &&
- np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
- /* The ring is no longer full, clear busy flag. */
- netif_wake_queue (dev);
- }
- /* Abnormal error summary/uncommon events handlers. */
- if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
- netdev_error(dev, intr_status);
- } while (0);
- if (netif_msg_intr(np))
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, ioread16(ioaddr + IntrStatus));
- return IRQ_RETVAL(handled);
-}
-
-static void rx_poll(struct tasklet_struct *t)
-{
- struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
- struct net_device *dev = np->ndev;
- int entry = np->cur_rx % RX_RING_SIZE;
- int boguscnt = np->budget;
- void __iomem *ioaddr = np->base;
- int received = 0;
-
- /* If EOP is set on the next entry, it's a new packet. Send it up. */
- while (1) {
- struct netdev_desc *desc = &(np->rx_ring[entry]);
- u32 frame_status = le32_to_cpu(desc->status);
- int pkt_len;
-
- if (--boguscnt < 0) {
- goto not_done;
- }
- if (!(frame_status & DescOwn))
- break;
- pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
- if (netif_msg_rx_status(np))
- printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
- frame_status);
- if (frame_status & 0x001f4000) {
- /* There was a error. */
- if (netif_msg_rx_err(np))
- printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
- frame_status);
- dev->stats.rx_errors++;
- if (frame_status & 0x00100000)
- dev->stats.rx_length_errors++;
- if (frame_status & 0x00010000)
- dev->stats.rx_fifo_errors++;
- if (frame_status & 0x00060000)
- dev->stats.rx_frame_errors++;
- if (frame_status & 0x00080000)
- dev->stats.rx_crc_errors++;
- if (frame_status & 0x00100000) {
- printk(KERN_WARNING "%s: Oversized Ethernet frame,"
- " status %8.8x.\n",
- dev->name, frame_status);
- }
- } else {
- struct sk_buff *skb;
-#ifndef final_version
- if (netif_msg_rx_status(np))
- printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
- ", bogus_cnt %d.\n",
- pkt_len, boguscnt);
-#endif
- /* Check if the packet is long enough to accept without copying
- to a minimally-sized skbuff. */
- if (pkt_len < rx_copybreak &&
- (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
- skb_reserve(skb, 2); /* 16 byte align the IP header */
- dma_sync_single_for_cpu(&np->pci_dev->dev,
- le32_to_cpu(desc->frag.addr),
- np->rx_buf_sz, DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
- dma_sync_single_for_device(&np->pci_dev->dev,
- le32_to_cpu(desc->frag.addr),
- np->rx_buf_sz, DMA_FROM_DEVICE);
- skb_put(skb, pkt_len);
- } else {
- dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(desc->frag.addr),
- np->rx_buf_sz, DMA_FROM_DEVICE);
- skb_put(skb = np->rx_skbuff[entry], pkt_len);
- np->rx_skbuff[entry] = NULL;
- }
- skb->protocol = eth_type_trans(skb, dev);
- /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
- netif_rx(skb);
- }
- entry = (entry + 1) % RX_RING_SIZE;
- received++;
- }
- np->cur_rx = entry;
- refill_rx (dev);
- np->budget -= received;
- iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
- return;
-
-not_done:
- np->cur_rx = entry;
- refill_rx (dev);
- if (!received)
- received = 1;
- np->budget -= received;
- if (np->budget <= 0)
- np->budget = RX_BUDGET;
- tasklet_schedule(&np->rx_tasklet);
-}
-
-static void refill_rx (struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- int entry;
-
- /* Refill the Rx ring buffers. */
- for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
- np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
- struct sk_buff *skb;
- entry = np->dirty_rx % RX_RING_SIZE;
- if (np->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
- np->rx_skbuff[entry] = skb;
- if (skb == NULL)
- break; /* Better luck next round. */
- skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- np->rx_ring[entry].frag.addr = cpu_to_le32(
- dma_map_single(&np->pci_dev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
- if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[entry].frag.addr)) {
- dev_kfree_skb_irq(skb);
- np->rx_skbuff[entry] = NULL;
- break;
- }
- }
- /* Perhaps we need not reset this field. */
- np->rx_ring[entry].frag.length =
- cpu_to_le32(np->rx_buf_sz | LastFrag);
- np->rx_ring[entry].status = 0;
- }
-}
-static void netdev_error(struct net_device *dev, int intr_status)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- u16 mii_ctl, mii_advertise, mii_lpa;
- int speed;
-
- if (intr_status & LinkChange) {
- if (mdio_wait_link(dev, 10) == 0) {
- printk(KERN_INFO "%s: Link up\n", dev->name);
- if (np->an_enable) {
- mii_advertise = mdio_read(dev, np->phys[0],
- MII_ADVERTISE);
- mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
- mii_advertise &= mii_lpa;
- printk(KERN_INFO "%s: Link changed: ",
- dev->name);
- if (mii_advertise & ADVERTISE_100FULL) {
- np->speed = 100;
- printk("100Mbps, full duplex\n");
- } else if (mii_advertise & ADVERTISE_100HALF) {
- np->speed = 100;
- printk("100Mbps, half duplex\n");
- } else if (mii_advertise & ADVERTISE_10FULL) {
- np->speed = 10;
- printk("10Mbps, full duplex\n");
- } else if (mii_advertise & ADVERTISE_10HALF) {
- np->speed = 10;
- printk("10Mbps, half duplex\n");
- } else
- printk("\n");
-
- } else {
- mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
- speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
- np->speed = speed;
- printk(KERN_INFO "%s: Link changed: %dMbps ,",
- dev->name, speed);
- printk("%s duplex.\n",
- (mii_ctl & BMCR_FULLDPLX) ?
- "full" : "half");
- }
- check_duplex(dev);
- if (np->flowctrl && np->mii_if.full_duplex) {
- iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
- ioaddr + MulticastFilter1+2);
- iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
- ioaddr + MACCtrl0);
- }
- netif_carrier_on(dev);
- } else {
- printk(KERN_INFO "%s: Link down\n", dev->name);
- netif_carrier_off(dev);
- }
- }
- if (intr_status & StatsMax) {
- get_stats(dev);
- }
- if (intr_status & IntrPCIErr) {
- printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
- dev->name, intr_status);
- /* We must do a global reset of DMA to continue. */
- }
-}
-
-static struct net_device_stats *get_stats(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- unsigned long flags;
- u8 late_coll, single_coll, mult_coll;
-
- spin_lock_irqsave(&np->statlock, flags);
- /* The chip only need report frame silently dropped. */
- dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
- dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
- dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
- dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
-
- mult_coll = ioread8(ioaddr + StatsMultiColl);
- np->xstats.tx_multiple_collisions += mult_coll;
- single_coll = ioread8(ioaddr + StatsOneColl);
- np->xstats.tx_single_collisions += single_coll;
- late_coll = ioread8(ioaddr + StatsLateColl);
- np->xstats.tx_late_collisions += late_coll;
- dev->stats.collisions += mult_coll
- + single_coll
- + late_coll;
-
- np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
- np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
- np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
- np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
- np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
- np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
- np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
-
- dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
- dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
- dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
- dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
-
- spin_unlock_irqrestore(&np->statlock, flags);
-
- return &dev->stats;
-}
-
-static void set_rx_mode(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- u16 mc_filter[4]; /* Multicast hash filter */
- u32 rx_mode;
- int i;
-
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- memset(mc_filter, 0xff, sizeof(mc_filter));
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to match, or accept all multicasts. */
- memset(mc_filter, 0xff, sizeof(mc_filter));
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- } else if (!netdev_mc_empty(dev)) {
- struct netdev_hw_addr *ha;
- int bit;
- int index;
- int crc;
- memset (mc_filter, 0, sizeof (mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
- if (crc & 0x80000000) index |= 1 << bit;
- mc_filter[index/16] |= (1 << (index % 16));
- }
- rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
- } else {
- iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
- return;
- }
- if (np->mii_if.full_duplex && np->flowctrl)
- mc_filter[3] |= 0x0200;
-
- for (i = 0; i < 4; i++)
- iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
- iowrite8(rx_mode, ioaddr + RxMode);
-}
-
-static int __set_mac_addr(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- u16 addr16;
-
- addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
- iowrite16(addr16, np->base + StationAddr);
- addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
- iowrite16(addr16, np->base + StationAddr+2);
- addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
- iowrite16(addr16, np->base + StationAddr+4);
- return 0;
-}
-
-/* Invoked with rtnl_lock held */
-static int sundance_set_mac_addr(struct net_device *dev, void *data)
-{
- const struct sockaddr *addr = data;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
- eth_hw_addr_set(dev, addr->sa_data);
- __set_mac_addr(dev);
-
- return 0;
-}
-
-static const struct {
- const char name[ETH_GSTRING_LEN];
-} sundance_stats[] = {
- { "tx_multiple_collisions" },
- { "tx_single_collisions" },
- { "tx_late_collisions" },
- { "tx_deferred" },
- { "tx_deferred_excessive" },
- { "tx_aborted" },
- { "tx_bcasts" },
- { "rx_bcasts" },
- { "tx_mcasts" },
- { "rx_mcasts" },
-};
-
-static int check_if_running(struct net_device *dev)
-{
- if (!netif_running(dev))
- return -EINVAL;
- return 0;
-}
-
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- struct netdev_private *np = netdev_priv(dev);
- strscpy(info->driver, DRV_NAME, sizeof(info->driver));
- strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
-}
-
-static int get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct netdev_private *np = netdev_priv(dev);
- spin_lock_irq(&np->lock);
- mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
- spin_unlock_irq(&np->lock);
- return 0;
-}
-
-static int set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct netdev_private *np = netdev_priv(dev);
- int res;
- spin_lock_irq(&np->lock);
- res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
- spin_unlock_irq(&np->lock);
- return res;
-}
-
-static int nway_reset(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- return mii_nway_restart(&np->mii_if);
-}
-
-static u32 get_link(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- return mii_link_ok(&np->mii_if);
-}
-
-static u32 get_msglevel(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- return np->msg_enable;
-}
-
-static void set_msglevel(struct net_device *dev, u32 val)
-{
- struct netdev_private *np = netdev_priv(dev);
- np->msg_enable = val;
-}
-
-static void get_strings(struct net_device *dev, u32 stringset,
- u8 *data)
-{
- if (stringset == ETH_SS_STATS)
- memcpy(data, sundance_stats, sizeof(sundance_stats));
-}
-
-static int get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(sundance_stats);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct netdev_private *np = netdev_priv(dev);
- int i = 0;
-
- get_stats(dev);
- data[i++] = np->xstats.tx_multiple_collisions;
- data[i++] = np->xstats.tx_single_collisions;
- data[i++] = np->xstats.tx_late_collisions;
- data[i++] = np->xstats.tx_deferred;
- data[i++] = np->xstats.tx_deferred_excessive;
- data[i++] = np->xstats.tx_aborted;
- data[i++] = np->xstats.tx_bcasts;
- data[i++] = np->xstats.rx_bcasts;
- data[i++] = np->xstats.tx_mcasts;
- data[i++] = np->xstats.rx_mcasts;
-}
-
-#ifdef CONFIG_PM
-
-static void sundance_get_wol(struct net_device *dev,
- struct ethtool_wolinfo *wol)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- u8 wol_bits;
-
- wol->wolopts = 0;
-
- wol->supported = (WAKE_PHY | WAKE_MAGIC);
- if (!np->wol_enabled)
- return;
-
- wol_bits = ioread8(ioaddr + WakeEvent);
- if (wol_bits & MagicPktEnable)
- wol->wolopts |= WAKE_MAGIC;
- if (wol_bits & LinkEventEnable)
- wol->wolopts |= WAKE_PHY;
-}
-
-static int sundance_set_wol(struct net_device *dev,
- struct ethtool_wolinfo *wol)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- u8 wol_bits;
-
- if (!device_can_wakeup(&np->pci_dev->dev))
- return -EOPNOTSUPP;
-
- np->wol_enabled = !!(wol->wolopts);
- wol_bits = ioread8(ioaddr + WakeEvent);
- wol_bits &= ~(WakePktEnable | MagicPktEnable |
- LinkEventEnable | WolEnable);
-
- if (np->wol_enabled) {
- if (wol->wolopts & WAKE_MAGIC)
- wol_bits |= (MagicPktEnable | WolEnable);
- if (wol->wolopts & WAKE_PHY)
- wol_bits |= (LinkEventEnable | WolEnable);
- }
- iowrite8(wol_bits, ioaddr + WakeEvent);
-
- device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
-
- return 0;
-}
-#else
-#define sundance_get_wol NULL
-#define sundance_set_wol NULL
-#endif /* CONFIG_PM */
-
-static const struct ethtool_ops ethtool_ops = {
- .begin = check_if_running,
- .get_drvinfo = get_drvinfo,
- .nway_reset = nway_reset,
- .get_link = get_link,
- .get_wol = sundance_get_wol,
- .set_wol = sundance_set_wol,
- .get_msglevel = get_msglevel,
- .set_msglevel = set_msglevel,
- .get_strings = get_strings,
- .get_sset_count = get_sset_count,
- .get_ethtool_stats = get_ethtool_stats,
- .get_link_ksettings = get_link_ksettings,
- .set_link_ksettings = set_link_ksettings,
-};
-
-static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct netdev_private *np = netdev_priv(dev);
- int rc;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- spin_lock_irq(&np->lock);
- rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
- spin_unlock_irq(&np->lock);
-
- return rc;
-}
-
-static int netdev_close(struct net_device *dev)
-{
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
- struct sk_buff *skb;
- int i;
-
- /* Wait and kill tasklet */
- tasklet_kill(&np->rx_tasklet);
- tasklet_kill(&np->tx_tasklet);
- np->cur_tx = 0;
- np->dirty_tx = 0;
- np->cur_task = 0;
- np->last_tx = NULL;
-
- netif_stop_queue(dev);
-
- if (netif_msg_ifdown(np)) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
- "Rx %4.4x Int %2.2x.\n",
- dev->name, ioread8(ioaddr + TxStatus),
- ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
- printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
- dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
- }
-
- /* Disable interrupts by clearing the interrupt mask. */
- iowrite16(0x0000, ioaddr + IntrEnable);
-
- /* Disable Rx and Tx DMA for safely release resource */
- iowrite32(0x500, ioaddr + DMACtrl);
-
- /* Stop the chip's Tx and Rx processes. */
- iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
-
- for (i = 2000; i > 0; i--) {
- if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
- break;
- mdelay(1);
- }
-
- iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
- ioaddr + ASIC_HI_WORD(ASICCtrl));
-
- for (i = 2000; i > 0; i--) {
- if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
- break;
- mdelay(1);
- }
-
-#ifdef __i386__
- if (netif_msg_hw(np)) {
- printk(KERN_DEBUG " Tx ring at %8.8x:\n",
- (int)(np->tx_ring_dma));
- for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
- i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
- np->tx_ring[i].frag.length);
- printk(KERN_DEBUG " Rx ring %8.8x:\n",
- (int)(np->rx_ring_dma));
- for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
- printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
- np->rx_ring[i].frag.length);
- }
- }
-#endif /* __i386__ debugging only */
-
- free_irq(np->pci_dev->irq, dev);
-
- del_timer_sync(&np->timer);
-
- /* Free all the skbuffs in the Rx queue. */
- for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].status = 0;
- skb = np->rx_skbuff[i];
- if (skb) {
- dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->rx_ring[i].frag.addr),
- np->rx_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- np->rx_skbuff[i] = NULL;
- }
- np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
- }
- for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_ring[i].next_desc = 0;
- skb = np->tx_skbuff[i];
- if (skb) {
- dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag.addr),
- skb->len, DMA_TO_DEVICE);
- dev_kfree_skb(skb);
- np->tx_skbuff[i] = NULL;
- }
- }
-
- return 0;
-}
-
-static void sundance_remove1(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (dev) {
- struct netdev_private *np = netdev_priv(dev);
- unregister_netdev(dev);
- dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
- np->rx_ring, np->rx_ring_dma);
- dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
- np->tx_ring, np->tx_ring_dma);
- pci_iounmap(pdev, np->base);
- pci_release_regions(pdev);
- free_netdev(dev);
- }
-}
-
-static int __maybe_unused sundance_suspend(struct device *dev_d)
-{
- struct net_device *dev = dev_get_drvdata(dev_d);
- struct netdev_private *np = netdev_priv(dev);
- void __iomem *ioaddr = np->base;
-
- if (!netif_running(dev))
- return 0;
-
- netdev_close(dev);
- netif_device_detach(dev);
-
- if (np->wol_enabled) {
- iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
- iowrite16(RxEnable, ioaddr + MACCtrl1);
- }
-
- device_set_wakeup_enable(dev_d, np->wol_enabled);
-
- return 0;
-}
-
-static int __maybe_unused sundance_resume(struct device *dev_d)
-{
- struct net_device *dev = dev_get_drvdata(dev_d);
- int err = 0;
-
- if (!netif_running(dev))
- return 0;
-
- err = netdev_open(dev);
- if (err) {
- printk(KERN_ERR "%s: Can't resume interface!\n",
- dev->name);
- goto out;
- }
-
- netif_device_attach(dev);
-
-out:
- return err;
-}
-
-static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
-
-static struct pci_driver sundance_driver = {
- .name = DRV_NAME,
- .id_table = sundance_pci_tbl,
- .probe = sundance_probe1,
- .remove = sundance_remove1,
- .driver.pm = &sundance_pm_ops,
-};
-
-module_pci_driver(sundance_driver);
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 2a18df3605f1..0de3cd660ec8 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -863,7 +863,7 @@ static void dnet_remove(struct platform_device *pdev)
static struct platform_driver dnet_driver = {
.probe = dnet_probe,
- .remove_new = dnet_remove,
+ .remove = dnet_remove,
.driver = {
.name = "dnet",
},
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 44da335d66bd..95a5295d0361 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -2689,7 +2689,7 @@ static struct platform_driver tsnep_driver = {
.of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
- .remove_new = tsnep_remove,
+ .remove = tsnep_remove,
};
module_platform_driver(tsnep_driver);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ad41c9019018..0c418557264c 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1296,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match);
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
- .remove_new = ethoc_remove,
+ .remove = ethoc_remove,
.suspend = ethoc_suspend,
.resume = ethoc_resume,
.driver = {
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 9ebe751c1df0..5cb478e98697 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -651,7 +651,7 @@ MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
static struct platform_driver nps_enet_driver = {
.probe = nps_enet_probe,
- .remove_new = nps_enet_remove,
+ .remove = nps_enet_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = nps_enet_dt_ids,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 0b61f548fd18..17ec35e75a65 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1913,40 +1913,22 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_phy_connect;
}
err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
- PHY_INTERFACE_MODE_MII);
+ PHY_INTERFACE_MODE_RMII);
if (err) {
dev_err(&pdev->dev, "Connecting PHY failed\n");
goto err_phy_connect;
}
- } else if (np && of_phy_is_fixed_link(np)) {
- struct phy_device *phy;
-
- err = of_phy_register_fixed_link(np);
- if (err) {
- dev_err(&pdev->dev, "Failed to register fixed PHY\n");
- goto err_phy_connect;
- }
-
- phy = of_phy_get_and_connect(priv->netdev, np,
- &ftgmac100_adjust_link);
- if (!phy) {
- dev_err(&pdev->dev, "Failed to connect to fixed PHY\n");
- of_phy_deregister_fixed_link(np);
- err = -EINVAL;
- goto err_phy_connect;
- }
-
- /* Display what we found */
- phy_attached_info(phy);
- } else if (np && of_get_property(np, "phy-handle", NULL)) {
+ } else if (np && (of_phy_is_fixed_link(np) ||
+ of_get_property(np, "phy-handle", NULL))) {
struct phy_device *phy;
/* Support "mdio"/"phy" child nodes for ast2400/2500 with
* an embedded MDIO controller. Automatically scan the DTS for
* available PHYs and register them.
*/
- if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ if (of_get_property(np, "phy-handle", NULL) &&
+ (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
err = ftgmac100_setup_mdio(netdev);
if (err)
goto err_setup_mdio;
@@ -2089,7 +2071,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
static struct platform_driver ftgmac100_driver = {
.probe = ftgmac100_probe,
- .remove_new = ftgmac100_remove,
+ .remove = ftgmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftgmac100_of_match,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 1047c805054e..5803a382f0ba 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1243,7 +1243,7 @@ static const struct of_device_id ftmac100_of_ids[] = {
static struct platform_driver ftmac100_driver = {
.probe = ftmac100_probe,
- .remove_new = ftmac100_remove,
+ .remove = ftmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftmac100_of_ids
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index e15dd3d858df..bf5baef5c3e0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -463,6 +463,22 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
return 0;
}
+static int dpaa_addr_sync(struct net_device *net_dev, const u8 *addr)
+{
+ const struct dpaa_priv *priv = netdev_priv(net_dev);
+
+ return priv->mac_dev->add_hash_mac_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)addr);
+}
+
+static int dpaa_addr_unsync(struct net_device *net_dev, const u8 *addr)
+{
+ const struct dpaa_priv *priv = netdev_priv(net_dev);
+
+ return priv->mac_dev->remove_hash_mac_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)addr);
+}
+
static void dpaa_set_rx_mode(struct net_device *net_dev)
{
const struct dpaa_priv *priv;
@@ -490,9 +506,9 @@ static void dpaa_set_rx_mode(struct net_device *net_dev)
err);
}
- err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+ err = __dev_mc_sync(net_dev, dpaa_addr_sync, dpaa_addr_unsync);
if (err < 0)
- netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
+ netif_err(priv, drv, net_dev, "dpaa_addr_sync() = %d\n",
err);
}
@@ -1804,7 +1820,6 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
struct page *page, *head_page;
struct dpaa_bp *dpaa_bp;
void *vaddr, *sg_vaddr;
- int frag_off, frag_len;
struct sk_buff *skb;
dma_addr_t sg_addr;
int page_offset;
@@ -1847,6 +1862,11 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
* on Tx, if extra headers are added.
*/
WARN_ON(fd_off != priv->rx_headroom);
+ /* The offset to data start within the buffer holding
+ * the SGT should always be equal to the offset to data
+ * start within the first buffer holding the frame.
+ */
+ WARN_ON_ONCE(fd_off != qm_sg_entry_get_off(&sgt[i]));
skb_reserve(skb, fd_off);
skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
} else {
@@ -1860,21 +1880,23 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
page = virt_to_page(sg_vaddr);
head_page = virt_to_head_page(sg_vaddr);
- /* Compute offset in (possibly tail) page */
+ /* Compute offset of sg_vaddr in (possibly tail) page */
page_offset = ((unsigned long)sg_vaddr &
(PAGE_SIZE - 1)) +
(page_address(page) - page_address(head_page));
- /* page_offset only refers to the beginning of sgt[i];
- * but the buffer itself may have an internal offset.
+
+ /* Non-initial SGT entries should not have a buffer
+ * offset.
*/
- frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
- frag_len = qm_sg_entry_get_len(&sgt[i]);
+ WARN_ON_ONCE(qm_sg_entry_get_off(&sgt[i]));
+
/* skb_add_rx_frag() does no checking on the page; if
* we pass it a tail page, we'll end up with
- * bad page accounting and eventually with segafults.
+ * bad page accounting and eventually with segfaults.
*/
- skb_add_rx_frag(skb, i - 1, head_page, frag_off,
- frag_len, dpaa_bp->size);
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+ qm_sg_entry_get_len(&sgt[i]),
+ dpaa_bp->size);
}
/* Update the pool count for the current {cpu x bpool} */
@@ -2750,7 +2772,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
!fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
&hash_offset)) {
- hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
+ hash = be32_to_cpu(*(__be32 *)(vaddr + hash_offset));
hash_valid = true;
}
@@ -3571,7 +3593,7 @@ static struct platform_driver dpaa_driver = {
},
.id_table = dpaa_devtype,
.probe = dpaa_eth_probe,
- .remove_new = dpaa_remove
+ .remove = dpaa_remove
};
static int __init dpaa_load(void)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index b0060cf96090..9986f6e1f587 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -243,38 +243,24 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
u8 *data)
{
- unsigned int i, j, num_cpus, size;
- char string_cpu[ETH_GSTRING_LEN];
- u8 *strings;
+ unsigned int i, j, num_cpus;
- memset(string_cpu, 0, sizeof(string_cpu));
- strings = data;
- num_cpus = num_online_cpus();
- size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+ num_cpus = num_online_cpus();
for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
- for (j = 0; j < num_cpus; j++) {
- snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
- dpaa_stats_percpu[i], j);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
- }
- snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
- dpaa_stats_percpu[i]);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
- }
- for (j = 0; j < num_cpus; j++) {
- snprintf(string_cpu, ETH_GSTRING_LEN,
- "bpool [CPU %d]", j);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
+ for (j = 0; j < num_cpus; j++)
+ ethtool_sprintf(&data, "%s [CPU %d]",
+ dpaa_stats_percpu[i], j);
+
+ ethtool_sprintf(&data, "%s [TOTAL]", dpaa_stats_percpu[i]);
}
- snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
+ for (i = 0; i < num_cpus; i++)
+ ethtool_sprintf(&data, "bpool [CPU %d]", i);
+
+ ethtool_puts(&data, "bpool [TOTAL]");
- memcpy(strings, dpaa_stats_global, size);
+ for (i = 0; i < DPAA_STATS_GLOBAL_LEN; i++)
+ ethtool_puts(&data, dpaa_stats_global[i]);
}
static int dpaa_get_hash_opts(struct net_device *dev,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 7f476519b7ad..74ef77cb7078 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -217,20 +217,15 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
- strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
- strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- dpaa2_mac_get_strings(p);
+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++)
+ ethtool_puts(&data, dpaa2_ethtool_stats[i]);
+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++)
+ ethtool_puts(&data, dpaa2_ethtool_extras[i]);
+ dpaa2_mac_get_strings(&data);
break;
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index a69bb22c37ea..422ce13a7c94 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -558,15 +558,12 @@ int dpaa2_mac_get_sset_count(void)
return DPAA2_MAC_NUM_STATS;
}
-void dpaa2_mac_get_strings(u8 *data)
+void dpaa2_mac_get_strings(u8 **data)
{
- u8 *p = data;
int i;
- for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
- strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++)
+ ethtool_puts(data, dpaa2_mac_ethtool_stats[i]);
}
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index c1ec9efd413a..53f8d106d11e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -49,7 +49,7 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
int dpaa2_mac_get_sset_count(void);
-void dpaa2_mac_get_strings(u8 *data);
+void dpaa2_mac_get_strings(u8 **data);
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
index 6bc1988be311..a888f6e6e9b0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -170,17 +170,16 @@ dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
- u8 *p = data;
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
- memcpy(p, dpaa2_switch_ethtool_counters[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = dpaa2_switch_ethtool_counters[i].name;
+ ethtool_puts(&data, str);
}
- dpaa2_mac_get_strings(p);
+ dpaa2_mac_get_strings(&data);
break;
}
}
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 4d75e6807e92..6c2779047dcd 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -7,6 +7,14 @@ config FSL_ENETC_CORE
If compiled as module (M), the module name is fsl-enetc-core.
+config NXP_ENETC_PF_COMMON
+ tristate
+ help
+ This module supports common functionality between drivers of
+ different versions of NXP ENETC PF controllers.
+
+ If compiled as module (M), the module name is nxp-enetc-pf-common.
+
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI_MSI
@@ -14,6 +22,7 @@ config FSL_ENETC
select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
+ select NXP_ENETC_PF_COMMON
select PHYLINK
select PCS_LYNX
select DIMLIB
@@ -24,6 +33,23 @@ config FSL_ENETC
If compiled as module (M), the module name is fsl-enetc.
+config NXP_ENETC4
+ tristate "ENETC4 PF driver"
+ depends on PCI_MSI
+ select MDIO_DEVRES
+ select FSL_ENETC_CORE
+ select FSL_ENETC_MDIO
+ select NXP_ENETC_PF_COMMON
+ select PHYLINK
+ select DIMLIB
+ help
+ This driver supports NXP ENETC devices with major revision 4. ENETC is
+ as the NIC functionality in NETC, it supports virtualization/isolation
+ based on PCIe Single Root IO Virtualization (SR-IOV) and a full range
+ of TSN standards and NIC offload capabilities.
+
+ If compiled as module (M), the module name is nxp-enetc4.
+
config FSL_ENETC_VF
tristate "ENETC VF driver"
depends on PCI_MSI
@@ -75,3 +101,17 @@ config FSL_ENETC_QOS
enable/disable from user space via Qos commands(tc). In the kernel
side, it can be loaded by Qos driver. Currently, it is only support
taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu).
+
+config NXP_NETC_BLK_CTRL
+ tristate "NETC blocks control driver"
+ help
+ This driver configures Integrated Endpoint Register Block (IERB) and
+ Privileged Register Block (PRB) of NETC. For i.MX platforms, it also
+ includes the configuration of NETCMIX block.
+ The IERB contains registers that are used for pre-boot initialization,
+ debug, and non-customer configuration. The PRB controls global reset
+ and global error handling for NETC. The NETCMIX block is mainly used
+ to set MII protocol and PCS protocol of the links, it also contains
+ settings for some other functions.
+
+ If compiled as module (M), the module name is nxp-netc-blk-ctrl.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index b13cbbabb2ea..6fd27ee4fcd1 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -3,11 +3,17 @@
obj-$(CONFIG_FSL_ENETC_CORE) += fsl-enetc-core.o
fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
+obj-$(CONFIG_NXP_ENETC_PF_COMMON) += nxp-enetc-pf-common.o
+nxp-enetc-pf-common-y := enetc_pf_common.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+obj-$(CONFIG_NXP_ENETC4) += nxp-enetc4.o
+nxp-enetc4-y := enetc4_pf.o
+
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o
@@ -19,3 +25,6 @@ fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
fsl-enetc-ptp-y := enetc_ptp.o
+
+obj-$(CONFIG_NXP_NETC_BLK_CTRL) += nxp-netc-blk-ctrl.o
+nxp-netc-blk-ctrl-y := netc_blk_ctrl.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index c09370eab319..35634c516e26 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -3,6 +3,7 @@
#include "enetc.h"
#include <linux/bpf_trace.h>
+#include <linux/clk.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/vmalloc.h>
@@ -21,7 +22,7 @@ void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
{
enetc_port_wr(&si->hw, reg, val);
if (si->hw_features & ENETC_SI_F_QBU)
- enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val);
+ enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val);
}
EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
@@ -700,8 +701,9 @@ static void enetc_rx_dim_work(struct work_struct *w)
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
struct enetc_int_vector *v =
container_of(dim, struct enetc_int_vector, rx_dim);
+ struct enetc_ndev_priv *priv = netdev_priv(v->rx_ring.ndev);
- v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
+ v->rx_ictt = enetc_usecs_to_cycles(moder.usec, priv->sysclk_freq);
dim->state = DIM_START_MEASURE;
}
@@ -718,7 +720,7 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v)
v->rx_ring.stats.packets,
v->rx_ring.stats.bytes,
&dim_sample);
- net_dim(&v->rx_dim, dim_sample);
+ net_dim(&v->rx_dim, &dim_sample);
}
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
@@ -1736,9 +1738,15 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rx_rings = (val >> 16) & 0xff;
si->num_tx_rings = val & 0xff;
- val = enetc_rd(hw, ENETC_SIRFSCAPR);
- si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
- si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+ val = enetc_rd(hw, ENETC_SIPCAPR0);
+ if (val & ENETC_SIPCAPR0_RFS) {
+ val = enetc_rd(hw, ENETC_SIRFSCAPR);
+ si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
+ si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+ } else {
+ /* ENETC which not supports RFS */
+ si->num_fs_entries = 0;
+ }
si->num_rss = 0;
val = enetc_rd(hw, ENETC_SIPCAPR0);
@@ -2066,7 +2074,10 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
- if (si->num_rss) {
+ /* TODO: RSS support for i.MX95 will be supported later, and the
+ * is_enetc_rev1() condition will be removed
+ */
+ if (si->num_rss && is_enetc_rev1(si)) {
err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
if (err)
return err;
@@ -2090,9 +2101,9 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
*/
priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
priv->num_tx_rings = si->num_tx_rings;
- priv->bdr_int_num = cpus;
+ priv->bdr_int_num = priv->num_rx_rings;
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
- priv->tx_ictt = ENETC_TXIC_TIMETHR;
+ priv->tx_ictt = enetc_usecs_to_cycles(600, priv->sysclk_freq);
}
EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
@@ -2501,10 +2512,14 @@ int enetc_open(struct net_device *ndev)
extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
- err = enetc_setup_irqs(priv);
+ err = clk_prepare_enable(priv->ref_clk);
if (err)
return err;
+ err = enetc_setup_irqs(priv);
+ if (err)
+ goto err_setup_irqs;
+
err = enetc_phylink_connect(ndev);
if (err)
goto err_phy_connect;
@@ -2536,6 +2551,8 @@ err_alloc_tx:
phylink_disconnect_phy(priv->phylink);
err_phy_connect:
enetc_free_irqs(priv);
+err_setup_irqs:
+ clk_disable_unprepare(priv->ref_clk);
return err;
}
@@ -2589,6 +2606,7 @@ int enetc_close(struct net_device *ndev)
enetc_assign_tx_resources(priv, NULL);
enetc_free_irqs(priv);
+ clk_disable_unprepare(priv->ref_clk);
return 0;
}
@@ -2995,13 +3013,99 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
}
EXPORT_SYMBOL_GPL(enetc_ioctl);
+static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
+ int v_tx_rings)
+{
+ struct enetc_int_vector *v;
+ struct enetc_bdr *bdr;
+ int j, err;
+
+ v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ priv->int_vector[i] = v;
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ bdr->buffer_offset = ENETC_RXB_PAD;
+ priv->rx_ring[i] = bdr;
+
+ err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ if (err)
+ goto free_vector;
+
+ err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&bdr->xdp.rxq);
+ goto free_vector;
+ }
+
+ /* init defaults for adaptive IC */
+ if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
+ v->rx_ictt = 0x1;
+ v->rx_dim_en = true;
+ }
+
+ INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
+ v->count_tx_rings = v_tx_rings;
+
+ for (j = 0; j < v_tx_rings; j++) {
+ int idx;
+
+ /* default tx ring mapping policy */
+ idx = priv->bdr_int_num * j + i;
+ __set_bit(idx, &v->tx_rings_map);
+ bdr = &v->tx_ring[j];
+ bdr->index = idx;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->tx_bd_count;
+ priv->tx_ring[idx] = bdr;
+ }
+
+ return 0;
+
+free_vector:
+ priv->rx_ring[i] = NULL;
+ priv->int_vector[i] = NULL;
+ kfree(v);
+
+ return err;
+}
+
+static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i)
+{
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ int j, tx_ring_index;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+
+ for (j = 0; j < v->count_tx_rings; j++) {
+ tx_ring_index = priv->bdr_int_num * j + i;
+ priv->tx_ring[tx_ring_index] = NULL;
+ }
+
+ priv->rx_ring[i] = NULL;
+ priv->int_vector[i] = NULL;
+ kfree(v);
+}
+
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ int v_tx_rings, v_remainder;
int num_stack_tx_queues;
int first_xdp_tx_ring;
int i, n, err, nvec;
- int v_tx_rings;
nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
/* allocate MSIX for both messaging and Rx/Tx interrupts */
@@ -3015,64 +3119,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
/* # of tx rings per int vector */
v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+ v_remainder = priv->num_tx_rings % priv->bdr_int_num;
for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v;
- struct enetc_bdr *bdr;
- int j;
-
- v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
- if (!v) {
- err = -ENOMEM;
- goto fail;
- }
-
- priv->int_vector[i] = v;
-
- bdr = &v->rx_ring;
- bdr->index = i;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->rx_bd_count;
- bdr->buffer_offset = ENETC_RXB_PAD;
- priv->rx_ring[i] = bdr;
-
- err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
- if (err) {
- kfree(v);
- goto fail;
- }
+ /* Distribute the remaining TX rings to the first v_remainder
+ * interrupt vectors
+ */
+ int num_tx_rings = i < v_remainder ? v_tx_rings + 1 : v_tx_rings;
- err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err) {
- xdp_rxq_info_unreg(&bdr->xdp.rxq);
- kfree(v);
+ err = enetc_int_vector_init(priv, i, num_tx_rings);
+ if (err)
goto fail;
- }
-
- /* init defaults for adaptive IC */
- if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
- v->rx_ictt = 0x1;
- v->rx_dim_en = true;
- }
- INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll);
- v->count_tx_rings = v_tx_rings;
-
- for (j = 0; j < v_tx_rings; j++) {
- int idx;
-
- /* default tx ring mapping policy */
- idx = priv->bdr_int_num * j + i;
- __set_bit(idx, &v->tx_rings_map);
- bdr = &v->tx_ring[j];
- bdr->index = idx;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->tx_bd_count;
- priv->tx_ring[idx] = bdr;
- }
}
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
@@ -3092,16 +3149,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
return 0;
fail:
- while (i--) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- kfree(v);
- }
+ while (i--)
+ enetc_int_vector_destroy(priv, i);
pci_free_irq_vectors(pdev);
@@ -3113,26 +3162,8 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
{
int i;
- for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- }
-
- for (i = 0; i < priv->num_rx_rings; i++)
- priv->rx_ring[i] = NULL;
-
- for (i = 0; i < priv->num_tx_rings; i++)
- priv->tx_ring[i] = NULL;
-
- for (i = 0; i < priv->bdr_int_num; i++) {
- kfree(priv->int_vector[i]);
- priv->int_vector[i] = NULL;
- }
+ for (i = 0; i < priv->bdr_int_num; i++)
+ enetc_int_vector_destroy(priv, i);
/* disable all MSIX for this device */
pci_free_irq_vectors(priv->si->pdev);
@@ -3241,5 +3272,55 @@ void enetc_pci_remove(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(enetc_pci_remove);
+static const struct enetc_drvdata enetc_pf_data = {
+ .sysclk_freq = ENETC_CLK_400M,
+ .pmac_offset = ENETC_PMAC_OFFSET,
+ .eth_ops = &enetc_pf_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc4_pf_data = {
+ .sysclk_freq = ENETC_CLK_333M,
+ .pmac_offset = ENETC4_PMAC_OFFSET,
+ .eth_ops = &enetc4_pf_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc_vf_data = {
+ .sysclk_freq = ENETC_CLK_400M,
+ .eth_ops = &enetc_vf_ethtool_ops,
+};
+
+static const struct enetc_platform_info enetc_info[] = {
+ { .revision = ENETC_REV_1_0,
+ .dev_id = ENETC_DEV_ID_PF,
+ .data = &enetc_pf_data,
+ },
+ { .revision = ENETC_REV_4_1,
+ .dev_id = NXP_ENETC_PF_DEV_ID,
+ .data = &enetc4_pf_data,
+ },
+ { .revision = ENETC_REV_1_0,
+ .dev_id = ENETC_DEV_ID_VF,
+ .data = &enetc_vf_data,
+ },
+};
+
+int enetc_get_driver_data(struct enetc_si *si)
+{
+ u16 dev_id = si->pdev->device;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_info); i++) {
+ if (si->revision == enetc_info[i].revision &&
+ dev_id == enetc_info[i].dev_id) {
+ si->drvdata = enetc_info[i].data;
+
+ return 0;
+ }
+ }
+
+ return -ERANGE;
+}
+EXPORT_SYMBOL_GPL(enetc_get_driver_data);
+
MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index fb7d98d57783..72fa03dbc2dd 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -14,6 +14,7 @@
#include <net/xdp.h>
#include "enetc_hw.h"
+#include "enetc4_hw.h"
#define ENETC_MAC_MAXFRM_SIZE 9600
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
@@ -231,6 +232,18 @@ enum enetc_errata {
#define ENETC_SI_F_QBV BIT(1)
#define ENETC_SI_F_QBU BIT(2)
+struct enetc_drvdata {
+ u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
+ u64 sysclk_freq;
+ const struct ethtool_ops *eth_ops;
+};
+
+struct enetc_platform_info {
+ u16 revision;
+ u16 dev_id;
+ const struct enetc_drvdata *data;
+};
+
/* PCI IEP device data */
struct enetc_si {
struct pci_dev *pdev;
@@ -246,11 +259,18 @@ struct enetc_si {
int num_fs_entries;
int num_rss; /* number of RSS buckets */
unsigned short pad;
+ u16 revision;
int hw_features;
+ const struct enetc_drvdata *drvdata;
};
#define ENETC_SI_ALIGN 32
+static inline bool is_enetc_rev1(struct enetc_si *si)
+{
+ return si->pdev->revision == ENETC_REV1;
+}
+
static inline void *enetc_si_priv(const struct enetc_si *si)
{
return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
@@ -302,7 +322,7 @@ struct enetc_cls_rule {
int used;
};
-#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+#define ENETC_MAX_BDR_INT 6 /* fixed to max # of available cpus */
struct psfp_cap {
u32 max_streamid;
u32 max_psfp_filter;
@@ -341,7 +361,6 @@ enum enetc_ic_mode {
#define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
#define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
-#define ENETC_TXIC_TIMETHR enetc_usecs_to_cycles(600)
struct enetc_ndev_priv {
struct net_device *ndev;
@@ -389,6 +408,9 @@ struct enetc_ndev_priv {
* and link state updates
*/
struct mutex mm_lock;
+
+ struct clk *ref_clk; /* RGMII/RMII reference clock */
+ u64 sysclk_freq; /* NETC system clock frequency */
};
/* Messaging */
@@ -418,6 +440,7 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
void enetc_free_si_resources(struct enetc_ndev_priv *priv);
int enetc_configure_si(struct enetc_ndev_priv *priv);
+int enetc_get_driver_data(struct enetc_si *si);
int enetc_open(struct net_device *ndev);
int enetc_close(struct net_device *ndev);
@@ -434,6 +457,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
/* ethtool */
+extern const struct ethtool_ops enetc_pf_ethtool_ops;
+extern const struct ethtool_ops enetc4_pf_ethtool_ops;
+extern const struct ethtool_ops enetc_vf_ethtool_ops;
void enetc_set_ethtool_ops(struct net_device *ndev);
void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
new file mode 100644
index 000000000000..26b220677448
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * This header file defines the register offsets and bit fields
+ * of ENETC4 PF and VFs. Note that the same registers as ENETC
+ * version 1.0 are defined in the enetc_hw.h file.
+ *
+ * Copyright 2024 NXP
+ */
+#ifndef __ENETC4_HW_H_
+#define __ENETC4_HW_H_
+
+#define NXP_ENETC_VENDOR_ID 0x1131
+#define NXP_ENETC_PF_DEV_ID 0xe101
+
+/***************************ENETC port registers**************************/
+#define ENETC4_ECAPR0 0x0
+#define ECAPR0_RFS BIT(2)
+#define ECAPR0_TSD BIT(5)
+#define ECAPR0_RSS BIT(8)
+#define ECAPR0_RSC BIT(9)
+#define ECAPR0_LSO BIT(10)
+#define ECAPR0_WO BIT(13)
+
+#define ENETC4_ECAPR1 0x4
+#define ECAPR1_NUM_TCS GENMASK(6, 4)
+#define ECAPR1_NUM_MCH GENMASK(9, 8)
+#define ECAPR1_NUM_UCH GENMASK(11, 10)
+#define ECAPR1_NUM_MSIX GENMASK(22, 12)
+#define ECAPR1_NUM_VSI GENMASK(27, 24)
+#define ECAPR1_NUM_IPV BIT(31)
+
+#define ENETC4_ECAPR2 0x8
+#define ECAPR2_NUM_TX_BDR GENMASK(9, 0)
+#define ECAPR2_NUM_RX_BDR GENMASK(25, 16)
+
+#define ENETC4_PMR 0x10
+#define PMR_SI_EN(a) BIT((16 + (a)))
+
+/* Port Pause ON/OFF threshold register */
+#define ENETC4_PPAUONTR 0x108
+#define ENETC4_PPAUOFFTR 0x10c
+
+/* Port Station interface promiscuous MAC mode register */
+#define ENETC4_PSIPMMR 0x200
+#define PSIPMMR_SI_MAC_UP(a) BIT(a) /* a = SI index */
+#define PSIPMMR_SI_MAC_MP(a) BIT((a) + 16)
+
+/* Port Station interface promiscuous VLAN mode register */
+#define ENETC4_PSIPVMR 0x204
+
+/* Port RSS key register n. n = 0,1,2,...,9 */
+#define ENETC4_PRSSKR(n) ((n) * 0x4 + 0x250)
+
+/* Port station interface MAC address filtering capability register */
+#define ENETC4_PSIMAFCAPR 0x280
+#define PSIMAFCAPR_NUM_MAC_AFTE GENMASK(11, 0)
+
+/* Port station interface VLAN filtering capability register */
+#define ENETC4_PSIVLANFCAPR 0x2c0
+#define PSIVLANFCAPR_NUM_VLAN_FTE GENMASK(11, 0)
+
+/* Port station interface VLAN filtering mode register */
+#define ENETC4_PSIVLANFMR 0x2c4
+#define PSIVLANFMR_VS BIT(0)
+
+/* Port Station interface a primary MAC address registers */
+#define ENETC4_PSIPMAR0(a) ((a) * 0x80 + 0x2000)
+#define ENETC4_PSIPMAR1(a) ((a) * 0x80 + 0x2004)
+
+/* Port station interface a configuration register 0/2 */
+#define ENETC4_PSICFGR0(a) ((a) * 0x80 + 0x2010)
+#define PSICFGR0_VASE BIT(13)
+#define PSICFGR0_ASE BIT(15)
+#define PSICFGR0_ANTI_SPOOFING (PSICFGR0_VASE | PSICFGR0_ASE)
+
+#define ENETC4_PSICFGR2(a) ((a) * 0x80 + 0x2018)
+#define PSICFGR2_NUM_MSIX GENMASK(5, 0)
+
+#define ENETC4_PMCAPR 0x4004
+#define PMCAPR_HD BIT(8)
+#define PMCAPR_FP GENMASK(10, 9)
+
+/* Port configuration register */
+#define ENETC4_PCR 0x4010
+#define PCR_HDR_FMT BIT(0)
+#define PCR_L2DOSE BIT(4)
+#define PCR_TIMER_CS BIT(8)
+#define PCR_PSPEED GENMASK(29, 16)
+#define PCR_PSPEED_VAL(speed) (((speed) / 10 - 1) << 16)
+
+/* Port MAC address register 0/1 */
+#define ENETC4_PMAR0 0x4020
+#define ENETC4_PMAR1 0x4024
+
+/* Port operational register */
+#define ENETC4_POR 0x4100
+
+/* Port traffic class a transmit maximum SDU register */
+#define ENETC4_PTCTMSDUR(a) ((a) * 0x20 + 0x4208)
+#define PTCTMSDUR_MAXSDU GENMASK(15, 0)
+#define PTCTMSDUR_SDU_TYPE GENMASK(17, 16)
+#define SDU_TYPE_PPDU 0
+#define SDU_TYPE_MPDU 1
+#define SDU_TYPE_MSDU 2
+
+#define ENETC4_PMAC_OFFSET 0x400
+#define ENETC4_PM_CMD_CFG(mac) (0x5008 + (mac) * 0x400)
+#define PM_CMD_CFG_TX_EN BIT(0)
+#define PM_CMD_CFG_RX_EN BIT(1)
+#define PM_CMD_CFG_PAUSE_FWD BIT(7)
+#define PM_CMD_CFG_PAUSE_IGN BIT(8)
+#define PM_CMD_CFG_TX_ADDR_INS BIT(9)
+#define PM_CMD_CFG_LOOP_EN BIT(10)
+#define PM_CMD_CFG_LPBK_MODE GENMASK(12, 11)
+#define LPBCK_MODE_EXT_TX_CLK 0
+#define LPBCK_MODE_MAC_LEVEL 1
+#define LPBCK_MODE_INT_TX_CLK 2
+#define PM_CMD_CFG_CNT_FRM_EN BIT(13)
+#define PM_CMD_CFG_TXP BIT(15)
+#define PM_CMD_CFG_SEND_IDLE BIT(16)
+#define PM_CMD_CFG_HD_FCEN BIT(18)
+#define PM_CMD_CFG_SFD BIT(21)
+#define PM_CMD_CFG_TX_FLUSH BIT(22)
+#define PM_CMD_CFG_TX_LOWP_EN BIT(23)
+#define PM_CMD_CFG_RX_LOWP_EMPTY BIT(24)
+#define PM_CMD_CFG_SWR BIT(26)
+#define PM_CMD_CFG_TS_MODE BIT(30)
+#define PM_CMD_CFG_MG BIT(31)
+
+/* Port MAC 0/1 Maximum Frame Length Register */
+#define ENETC4_PM_MAXFRM(mac) (0x5014 + (mac) * 0x400)
+
+/* Port MAC 0/1 Pause Quanta Register */
+#define ENETC4_PM_PAUSE_QUANTA(mac) (0x5054 + (mac) * 0x400)
+
+/* Port MAC 0/1 Pause Quanta Threshold Register */
+#define ENETC4_PM_PAUSE_THRESH(mac) (0x5064 + (mac) * 0x400)
+
+/* Port MAC 0 Interface Mode Control Register */
+#define ENETC4_PM_IF_MODE(mac) (0x5300 + (mac) * 0x400)
+#define PM_IF_MODE_IFMODE GENMASK(2, 0)
+#define IFMODE_XGMII 0
+#define IFMODE_RMII 3
+#define IFMODE_RGMII 4
+#define IFMODE_SGMII 5
+#define PM_IF_MODE_REVMII BIT(3)
+#define PM_IF_MODE_M10 BIT(4)
+#define PM_IF_MODE_HD BIT(6)
+#define PM_IF_MODE_SSP GENMASK(14, 13)
+#define SSP_100M 0
+#define SSP_10M 1
+#define SSP_1G 2
+#define PM_IF_MODE_ENA BIT(15)
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
new file mode 100644
index 000000000000..fc41078c4f5d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -0,0 +1,756 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2024 NXP */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/unaligned.h>
+
+#include "enetc_pf_common.h"
+
+#define ENETC_SI_MAX_RING_NUM 8
+
+static void enetc4_get_port_caps(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC4_ECAPR1);
+ pf->caps.num_vsi = (val & ECAPR1_NUM_VSI) >> 24;
+ pf->caps.num_msix = ((val & ECAPR1_NUM_MSIX) >> 12) + 1;
+
+ val = enetc_port_rd(hw, ENETC4_ECAPR2);
+ pf->caps.num_rx_bdr = (val & ECAPR2_NUM_RX_BDR) >> 16;
+ pf->caps.num_tx_bdr = val & ECAPR2_NUM_TX_BDR;
+
+ val = enetc_port_rd(hw, ENETC4_PMCAPR);
+ pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
+}
+
+static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
+ const u8 *addr)
+{
+ u16 lower = get_unaligned_le16(addr + 4);
+ u32 upper = get_unaligned_le32(addr);
+
+ if (si != 0) {
+ __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si));
+ __raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si));
+ } else {
+ __raw_writel(upper, hw->port + ENETC4_PMAR0);
+ __raw_writew(lower, hw->port + ENETC4_PMAR1);
+ }
+}
+
+static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
+ u8 *addr)
+{
+ u32 upper;
+ u16 lower;
+
+ upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si));
+ lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si));
+
+ put_unaligned_le32(upper, addr);
+ put_unaligned_le16(lower, addr + 4);
+}
+
+static const struct enetc_pf_ops enetc4_pf_ops = {
+ .set_si_primary_mac = enetc4_pf_set_si_primary_mac,
+ .get_si_primary_mac = enetc4_pf_get_si_primary_mac,
+};
+
+static int enetc4_pf_struct_init(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ pf->si = si;
+ pf->total_vfs = pci_sriov_get_totalvfs(si->pdev);
+ pf->ops = &enetc4_pf_ops;
+
+ enetc4_get_port_caps(pf);
+
+ return 0;
+}
+
+static u32 enetc4_psicfgr0_val_construct(bool is_vf, u32 num_tx_bdr, u32 num_rx_bdr)
+{
+ u32 val;
+
+ val = ENETC_PSICFGR0_SET_TXBDR(num_tx_bdr);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rx_bdr);
+ val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+
+ if (is_vf)
+ val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
+
+ return val;
+}
+
+static void enetc4_default_rings_allocation(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 num_rx_bdr, num_tx_bdr, val;
+ u32 vf_tx_bdr, vf_rx_bdr;
+ int i, rx_rem, tx_rem;
+
+ if (pf->caps.num_rx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
+ num_rx_bdr = pf->caps.num_rx_bdr - pf->caps.num_vsi;
+ else
+ num_rx_bdr = ENETC_SI_MAX_RING_NUM;
+
+ if (pf->caps.num_tx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
+ num_tx_bdr = pf->caps.num_tx_bdr - pf->caps.num_vsi;
+ else
+ num_tx_bdr = ENETC_SI_MAX_RING_NUM;
+
+ val = enetc4_psicfgr0_val_construct(false, num_tx_bdr, num_rx_bdr);
+ enetc_port_wr(hw, ENETC4_PSICFGR0(0), val);
+
+ num_rx_bdr = pf->caps.num_rx_bdr - num_rx_bdr;
+ rx_rem = num_rx_bdr % pf->caps.num_vsi;
+ num_rx_bdr = num_rx_bdr / pf->caps.num_vsi;
+
+ num_tx_bdr = pf->caps.num_tx_bdr - num_tx_bdr;
+ tx_rem = num_tx_bdr % pf->caps.num_vsi;
+ num_tx_bdr = num_tx_bdr / pf->caps.num_vsi;
+
+ for (i = 0; i < pf->caps.num_vsi; i++) {
+ vf_tx_bdr = (i < tx_rem) ? num_tx_bdr + 1 : num_tx_bdr;
+ vf_rx_bdr = (i < rx_rem) ? num_rx_bdr + 1 : num_rx_bdr;
+ val = enetc4_psicfgr0_val_construct(true, vf_tx_bdr, vf_rx_bdr);
+ enetc_port_wr(hw, ENETC4_PSICFGR0(i + 1), val);
+ }
+}
+
+static void enetc4_allocate_si_rings(struct enetc_pf *pf)
+{
+ enetc4_default_rings_allocation(pf);
+}
+
+static void enetc4_pf_set_si_vlan_promisc(struct enetc_hw *hw, int si, bool en)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPVMR);
+
+ if (en)
+ val |= BIT(si);
+ else
+ val &= ~BIT(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPVMR, val);
+}
+
+static void enetc4_set_default_si_vlan_promisc(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int num_si = pf->caps.num_vsi + 1;
+ int i;
+
+ /* enforce VLAN promiscuous mode for all SIs */
+ for (i = 0; i < num_si; i++)
+ enetc4_pf_set_si_vlan_promisc(hw, i, true);
+}
+
+/* Allocate the number of MSI-X vectors for per SI. */
+static void enetc4_set_si_msix_num(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int i, num_msix, total_si;
+ u32 val;
+
+ total_si = pf->caps.num_vsi + 1;
+
+ num_msix = pf->caps.num_msix / total_si +
+ pf->caps.num_msix % total_si - 1;
+ val = num_msix & PSICFGR2_NUM_MSIX;
+ enetc_port_wr(hw, ENETC4_PSICFGR2(0), val);
+
+ num_msix = pf->caps.num_msix / total_si - 1;
+ val = num_msix & PSICFGR2_NUM_MSIX;
+ for (i = 0; i < pf->caps.num_vsi; i++)
+ enetc_port_wr(hw, ENETC4_PSICFGR2(i + 1), val);
+}
+
+static void enetc4_enable_all_si(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int num_si = pf->caps.num_vsi + 1;
+ u32 si_bitmap = 0;
+ int i;
+
+ /* Master enable for all SIs */
+ for (i = 0; i < num_si; i++)
+ si_bitmap |= PMR_SI_EN(i);
+
+ enetc_port_wr(hw, ENETC4_PMR, si_bitmap);
+}
+
+static void enetc4_configure_port_si(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ enetc4_allocate_si_rings(pf);
+
+ /* Outer VLAN tag will be used for VLAN filtering */
+ enetc_port_wr(hw, ENETC4_PSIVLANFMR, PSIVLANFMR_VS);
+
+ enetc4_set_default_si_vlan_promisc(pf);
+
+ /* Disable SI MAC multicast & unicast promiscuous */
+ enetc_port_wr(hw, ENETC4_PSIPMMR, 0);
+
+ enetc4_set_si_msix_num(pf);
+
+ enetc4_enable_all_si(pf);
+}
+
+static void enetc4_pf_reset_tc_msdu(struct enetc_hw *hw)
+{
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+ int tc;
+
+ val = u32_replace_bits(val, SDU_TYPE_MPDU, PTCTMSDUR_SDU_TYPE);
+
+ for (tc = 0; tc < ENETC_NUM_TC; tc++)
+ enetc_port_wr(hw, ENETC4_PTCTMSDUR(tc), val);
+}
+
+static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+
+ enetc_port_mac_wr(si, ENETC4_PM_MAXFRM(0),
+ ENETC_SET_MAXFRM(ENETC_MAC_MAXFRM_SIZE));
+
+ enetc4_pf_reset_tc_msdu(&si->hw);
+}
+
+static void enetc4_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+{
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ enetc_port_wr(hw, ENETC4_PRSSKR(i), ((u32 *)bytes)[i]);
+}
+
+static void enetc4_set_default_rss_key(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
+ struct enetc_hw *hw = &pf->si->hw;
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc4_set_rss_key(hw, hash_key);
+}
+
+static void enetc4_enable_trx(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ /* Enable port transmit/receive */
+ enetc_port_wr(hw, ENETC4_POR, 0);
+}
+
+static void enetc4_configure_port(struct enetc_pf *pf)
+{
+ enetc4_configure_port_si(pf);
+ enetc4_set_trx_frame_size(pf);
+ enetc4_set_default_rss_key(pf);
+ enetc4_enable_trx(pf);
+}
+
+static int enetc4_pf_init(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ int err;
+
+ /* Initialize the MAC address for PF and VFs */
+ err = enetc_setup_mac_addresses(dev->of_node, pf);
+ if (err) {
+ dev_err(dev, "Failed to set MAC addresses\n");
+ return err;
+ }
+
+ enetc4_configure_port(pf);
+
+ return 0;
+}
+
+static const struct net_device_ops enetc4_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_pf_set_mac_addr,
+};
+
+static struct phylink_pcs *
+enetc4_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ return pf->pcs;
+}
+
+static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode,
+ phy_interface_t phy_mode)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(pf->si->ndev);
+ struct enetc_si *si = pf->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val |= IFMODE_RGMII;
+ /* We need to enable auto-negotiation for the MAC
+ * if its RGMII interface support In-Band status.
+ */
+ if (phylink_autoneg_inband(mode))
+ val |= PM_IF_MODE_ENA;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val |= IFMODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ val |= IFMODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ val |= IFMODE_XGMII;
+ break;
+ default:
+ dev_err(priv->dev,
+ "Unsupported PHY mode:%d\n", phy_mode);
+ return;
+ }
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_pl_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc4_mac_config(pf, mode, state->interface);
+}
+
+static void enetc4_set_port_speed(struct enetc_ndev_priv *priv, int speed)
+{
+ u32 old_speed = priv->speed;
+ u32 val;
+
+ if (speed == old_speed)
+ return;
+
+ val = enetc_port_rd(&priv->si->hw, ENETC4_PCR);
+ val &= ~PCR_PSPEED;
+
+ switch (speed) {
+ case SPEED_100:
+ case SPEED_1000:
+ case SPEED_2500:
+ case SPEED_10000:
+ val |= (PCR_PSPEED & PCR_PSPEED_VAL(speed));
+ break;
+ case SPEED_10:
+ default:
+ val |= (PCR_PSPEED & PCR_PSPEED_VAL(SPEED_10));
+ }
+
+ priv->speed = speed;
+ enetc_port_wr(&priv->si->hw, ENETC4_PCR, val);
+}
+
+static void enetc4_set_rgmii_mac(struct enetc_pf *pf, int speed, int duplex)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_M10 | PM_IF_MODE_REVMII);
+
+ switch (speed) {
+ case SPEED_1000:
+ val = u32_replace_bits(val, SSP_1G, PM_IF_MODE_SSP);
+ break;
+ case SPEED_100:
+ val = u32_replace_bits(val, SSP_100M, PM_IF_MODE_SSP);
+ break;
+ case SPEED_10:
+ val = u32_replace_bits(val, SSP_10M, PM_IF_MODE_SSP);
+ }
+
+ val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
+ PM_IF_MODE_HD);
+
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_set_rmii_mac(struct enetc_pf *pf, int speed, int duplex)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_SSP);
+
+ switch (speed) {
+ case SPEED_100:
+ val &= ~PM_IF_MODE_M10;
+ break;
+ case SPEED_10:
+ val |= PM_IF_MODE_M10;
+ }
+
+ val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
+ PM_IF_MODE_HD);
+
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_set_hd_flow_control(struct enetc_pf *pf, bool enable)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ if (!pf->caps.half_duplex)
+ return;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(old_val, enable ? 1 : 0, PM_CMD_CFG_HD_FCEN);
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_set_rx_pause(struct enetc_pf *pf, bool rx_pause)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(old_val, rx_pause ? 0 : 1, PM_CMD_CFG_PAUSE_IGN);
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_set_tx_pause(struct enetc_pf *pf, int num_rxbdr, bool tx_pause)
+{
+ u32 pause_off_thresh = 0, pause_on_thresh = 0;
+ u32 init_quanta = 0, refresh_quanta = 0;
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 rbmr, old_rbmr;
+ int i;
+
+ for (i = 0; i < num_rxbdr; i++) {
+ old_rbmr = enetc_rxbdr_rd(hw, i, ENETC_RBMR);
+ rbmr = u32_replace_bits(old_rbmr, tx_pause ? 1 : 0, ENETC_RBMR_CM);
+ if (rbmr == old_rbmr)
+ continue;
+
+ enetc_rxbdr_wr(hw, i, ENETC_RBMR, rbmr);
+ }
+
+ if (tx_pause) {
+ /* When the port first enters congestion, send a PAUSE request
+ * with the maximum number of quanta. When the port exits
+ * congestion, it will automatically send a PAUSE frame with
+ * zero quanta.
+ */
+ init_quanta = 0xffff;
+
+ /* Also, set up the refresh timer to send follow-up PAUSE
+ * frames at half the quanta value, in case the congestion
+ * condition persists.
+ */
+ refresh_quanta = 0xffff / 2;
+
+ /* Start emitting PAUSE frames when 3 large frames (or more
+ * smaller frames) have accumulated in the FIFO waiting to be
+ * DMAed to the RX ring.
+ */
+ pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
+ pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
+ }
+
+ enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_QUANTA(0), init_quanta);
+ enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_THRESH(0), refresh_quanta);
+ enetc_port_wr(hw, ENETC4_PPAUONTR, pause_on_thresh);
+ enetc_port_wr(hw, ENETC4_PPAUOFFTR, pause_off_thresh);
+}
+
+static void enetc4_enable_mac(struct enetc_pf *pf, bool en)
+{
+ struct enetc_si *si = pf->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val &= ~(PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN);
+ val |= en ? (PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN) : 0;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pl_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_si *si = pf->si;
+ struct enetc_ndev_priv *priv;
+ bool hd_fc = false;
+
+ priv = netdev_priv(si->ndev);
+ enetc4_set_port_speed(priv, speed);
+
+ if (!phylink_autoneg_inband(mode) &&
+ phy_interface_mode_is_rgmii(interface))
+ enetc4_set_rgmii_mac(pf, speed, duplex);
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ enetc4_set_rmii_mac(pf, speed, duplex);
+
+ if (duplex == DUPLEX_FULL) {
+ /* When preemption is enabled, generation of PAUSE frames
+ * must be disabled, as stated in the IEEE 802.3 standard.
+ */
+ if (priv->active_offloads & ENETC_F_QBU)
+ tx_pause = false;
+ } else { /* DUPLEX_HALF */
+ if (tx_pause || rx_pause)
+ hd_fc = true;
+
+ /* As per 802.3 annex 31B, PAUSE frames are only supported
+ * when the link is configured for full duplex operation.
+ */
+ tx_pause = false;
+ rx_pause = false;
+ }
+
+ enetc4_set_hd_flow_control(pf, hd_fc);
+ enetc4_set_tx_pause(pf, priv->num_rx_rings, tx_pause);
+ enetc4_set_rx_pause(pf, rx_pause);
+ enetc4_enable_mac(pf, true);
+}
+
+static void enetc4_pl_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc4_enable_mac(pf, false);
+}
+
+static const struct phylink_mac_ops enetc_pl_mac_ops = {
+ .mac_select_pcs = enetc4_pl_mac_select_pcs,
+ .mac_config = enetc4_pl_mac_config,
+ .mac_link_up = enetc4_pl_mac_link_up,
+ .mac_link_down = enetc4_pl_mac_link_down,
+};
+
+static void enetc4_pci_remove(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ enetc_pci_remove(pdev);
+}
+
+static int enetc4_link_init(struct enetc_ndev_priv *priv,
+ struct device_node *node)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct device *dev = priv->dev;
+ int err;
+
+ err = of_get_phy_mode(node, &pf->if_mode);
+ if (err) {
+ dev_err(dev, "Failed to get PHY mode\n");
+ return err;
+ }
+
+ err = enetc_mdiobus_create(pf, node);
+ if (err) {
+ dev_err(dev, "Failed to create MDIO bus\n");
+ return err;
+ }
+
+ err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops);
+ if (err) {
+ dev_err(dev, "Failed to create phylink\n");
+ goto err_phylink_create;
+ }
+
+ return 0;
+
+err_phylink_create:
+ enetc_mdiobus_destroy(pf);
+
+ return err;
+}
+
+static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+}
+
+static int enetc4_pf_netdev_create(struct enetc_si *si)
+{
+ struct device *dev = &si->pdev->dev;
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct enetc_ndev_priv),
+ si->num_tx_rings, si->num_rx_rings);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ref_clk = devm_clk_get_optional(dev, "ref");
+ if (IS_ERR(priv->ref_clk)) {
+ dev_err(dev, "Get reference clock failed\n");
+ err = PTR_ERR(priv->ref_clk);
+ goto err_clk_get;
+ }
+
+ enetc_pf_netdev_setup(si, ndev, &enetc4_ndev_ops);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_configure_si(priv);
+ if (err) {
+ dev_err(dev, "Failed to configure SI\n");
+ goto err_config_si;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(dev, "Failed to alloc MSI-X\n");
+ goto err_alloc_msix;
+ }
+
+ err = enetc4_link_init(priv, dev->of_node);
+ if (err)
+ goto err_link_init;
+
+ err = register_netdev(ndev);
+ if (err) {
+ dev_err(dev, "Failed to register netdev\n");
+ goto err_reg_netdev;
+ }
+
+ return 0;
+
+err_reg_netdev:
+ enetc4_link_deinit(priv);
+err_link_init:
+ enetc_free_msix(priv);
+err_alloc_msix:
+err_config_si:
+err_clk_get:
+ mutex_destroy(&priv->mm_lock);
+ free_netdev(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_netdev_destroy(struct enetc_si *si)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
+ struct net_device *ndev = si->ndev;
+
+ unregister_netdev(ndev);
+ enetc_free_msix(priv);
+ free_netdev(ndev);
+}
+
+static int enetc4_pf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct enetc_si *si;
+ struct enetc_pf *pf;
+ int err;
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+ if (err)
+ return dev_err_probe(dev, err, "PCIe probing failed\n");
+
+ err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Add enetc4_pci_remove() action failed\n");
+
+ /* si is the private data. */
+ si = pci_get_drvdata(pdev);
+ if (!si->hw.port || !si->hw.global)
+ return dev_err_probe(dev, -ENODEV,
+ "Couldn't map PF only space\n");
+
+ si->revision = enetc_get_ip_revision(&si->hw);
+ err = enetc_get_driver_data(si);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Could not get VF driver data\n");
+
+ err = enetc4_pf_struct_init(si);
+ if (err)
+ return err;
+
+ pf = enetc_si_priv(si);
+ err = enetc4_pf_init(pf);
+ if (err)
+ return err;
+
+ enetc_get_si_caps(si);
+
+ return enetc4_pf_netdev_create(si);
+}
+
+static void enetc4_pf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+
+ enetc4_pf_netdev_destroy(si);
+}
+
+static const struct pci_device_id enetc4_pf_id_table[] = {
+ { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table);
+
+static struct pci_driver enetc4_pf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc4_pf_id_table,
+ .probe = enetc4_pf_probe,
+ .remove = enetc4_pf_remove,
+};
+module_pci_driver(enetc4_pf_driver);
+
+MODULE_DESCRIPTION("ENETC4 PF Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 2563eb8ac7b6..bf34b5bb1e35 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -247,38 +247,25 @@ static int enetc_get_sset_count(struct net_device *ndev, int sset)
static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- u8 *p = data;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
- strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < priv->num_tx_rings; i++) {
- for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) {
- snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j],
- i);
- p += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < priv->num_rx_rings; i++) {
- for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) {
- snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j],
- i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
+ ethtool_puts(&data, enetc_si_counters[i].name);
+ for (i = 0; i < priv->num_tx_rings; i++)
+ for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++)
+ ethtool_sprintf(&data, tx_ring_stats[j], i);
+ for (i = 0; i < priv->num_rx_rings; i++)
+ for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++)
+ ethtool_sprintf(&data, rx_ring_stats[j], i);
if (!enetc_si_is_pf(priv->si))
break;
- for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
- strscpy(p, enetc_port_counters[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
+ ethtool_puts(&data, enetc_port_counters[i].name);
+
break;
}
}
@@ -775,9 +762,10 @@ static int enetc_get_coalesce(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_int_vector *v = priv->int_vector[0];
+ u64 clk_freq = priv->sysclk_freq;
- ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt);
- ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt);
+ ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt, clk_freq);
+ ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt, clk_freq);
ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR;
ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR;
@@ -793,12 +781,13 @@ static int enetc_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u64 clk_freq = priv->sysclk_freq;
u32 rx_ictt, tx_ictt;
int i, ic_mode;
bool changed;
- tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs);
- rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs);
+ tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs, clk_freq);
+ rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs, clk_freq);
if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR)
return -EOPNOTSUPP;
@@ -1178,7 +1167,7 @@ void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link)
}
EXPORT_SYMBOL_GPL(enetc_mm_link_state_update);
-static const struct ethtool_ops enetc_pf_ethtool_ops = {
+const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -1213,7 +1202,7 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_mm_stats = enetc_get_mm_stats,
};
-static const struct ethtool_ops enetc_vf_ethtool_ops = {
+const struct ethtool_ops enetc_vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -1234,13 +1223,26 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = {
.get_ts_info = enetc_get_ts_info,
};
+const struct ethtool_ops enetc4_pf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_ringparam = enetc_get_ringparam,
+ .get_coalesce = enetc_get_coalesce,
+ .set_coalesce = enetc_set_coalesce,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_wol = enetc_get_wol,
+ .set_wol = enetc_set_wol,
+ .get_pauseparam = enetc_get_pauseparam,
+ .set_pauseparam = enetc_set_pauseparam,
+};
+
void enetc_set_ethtool_ops(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (enetc_si_is_pf(priv->si))
- ndev->ethtool_ops = &enetc_pf_ethtool_ops;
- else
- ndev->ethtool_ops = &enetc_vf_ethtool_ops;
+ ndev->ethtool_ops = priv->si->drvdata->eth_ops;
}
EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 1619943fb263..7c3285584f8a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -6,6 +6,8 @@
#define ENETC_MM_VERIFY_SLEEP_US USEC_PER_MSEC
#define ENETC_MM_VERIFY_RETRIES 3
+#define ENETC_NUM_TC 8
+
/* ENETC device IDs */
#define ENETC_DEV_ID_PF 0xe100
#define ENETC_DEV_ID_VF 0xef00
@@ -25,6 +27,7 @@
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR0_QBV BIT(4)
#define ENETC_SIPCAPR0_QBU BIT(3)
+#define ENETC_SIPCAPR0_RFS BIT(2)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -368,6 +371,10 @@ enum enetc_bdr_type {TX, RX};
/** Global regs, offset: 2_0000h */
#define ENETC_GLOBAL_BASE 0x20000
#define ENETC_G_EIPBRR0 0x0bf8
+#define EIPBRR0_REVISION GENMASK(15, 0)
+#define ENETC_REV_1_0 0x0100
+#define ENETC_REV_4_1 0X0401
+
#define ENETC_G_EIPBRR1 0x0bfc
#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n))
#define ENETC_G_EPFBLPR1_XGMII 0x80000000
@@ -396,18 +403,22 @@ struct enetc_hw {
*/
extern rwlock_t enetc_mdio_lock;
+DECLARE_STATIC_KEY_FALSE(enetc_has_err050089);
+
/* use this locking primitive only on the fast datapath to
* group together multiple non-MDIO register accesses to
* minimize the overhead of the lock
*/
static inline void enetc_lock_mdio(void)
{
- read_lock(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ read_lock(&enetc_mdio_lock);
}
static inline void enetc_unlock_mdio(void)
{
- read_unlock(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ read_unlock(&enetc_mdio_lock);
}
/* use these accessors only on the fast datapath under
@@ -416,14 +427,16 @@ static inline void enetc_unlock_mdio(void)
*/
static inline u32 enetc_rd_reg_hot(void __iomem *reg)
{
- lockdep_assert_held(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ lockdep_assert_held(&enetc_mdio_lock);
return ioread32(reg);
}
static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
{
- lockdep_assert_held(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ lockdep_assert_held(&enetc_mdio_lock);
iowrite32(val, reg);
}
@@ -452,9 +465,13 @@ static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
unsigned long flags;
u32 val;
- write_lock_irqsave(&enetc_mdio_lock, flags);
- val = ioread32(reg);
- write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ if (static_branch_unlikely(&enetc_has_err050089)) {
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ val = ioread32(reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ } else {
+ val = ioread32(reg);
+ }
return val;
}
@@ -463,9 +480,13 @@ static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
{
unsigned long flags;
- write_lock_irqsave(&enetc_mdio_lock, flags);
- iowrite32(val, reg);
- write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ if (static_branch_unlikely(&enetc_has_err050089)) {
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ iowrite32(val, reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ } else {
+ iowrite32(val, reg);
+ }
}
#ifdef ioread64
@@ -957,15 +978,17 @@ struct enetc_cbd {
u8 status_flags;
};
-#define ENETC_CLK 400000000ULL
-static inline u32 enetc_cycles_to_usecs(u32 cycles)
+#define ENETC_CLK_400M 400000000ULL
+#define ENETC_CLK_333M 333000000ULL
+
+static inline u32 enetc_cycles_to_usecs(u32 cycles, u64 clk_freq)
{
- return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK);
+ return (u32)div_u64(cycles * 1000000ULL, clk_freq);
}
-static inline u32 enetc_usecs_to_cycles(u32 usecs)
+static inline u32 enetc_usecs_to_cycles(u32 usecs, u64 clk_freq)
{
- return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL);
+ return (u32)div_u64(usecs * clk_freq, 1000000ULL);
}
/* Port traffic class frame preemption register */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index a1b595bd7993..e108cac8288d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -4,11 +4,35 @@
#include <linux/of_mdio.h>
#include "enetc_pf.h"
+#define NETC_EMDIO_VEN_ID 0x1131
+#define NETC_EMDIO_DEV_ID 0xee00
#define ENETC_MDIO_DEV_ID 0xee01
#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus"
#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver"
+DEFINE_STATIC_KEY_FALSE(enetc_has_err050089);
+EXPORT_SYMBOL_GPL(enetc_has_err050089);
+
+static void enetc_emdio_enable_err050089(struct pci_dev *pdev)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_FREESCALE &&
+ pdev->device == ENETC_MDIO_DEV_ID) {
+ static_branch_inc(&enetc_has_err050089);
+ dev_info(&pdev->dev, "Enabled ERR050089 workaround\n");
+ }
+}
+
+static void enetc_emdio_disable_err050089(struct pci_dev *pdev)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_FREESCALE &&
+ pdev->device == ENETC_MDIO_DEV_ID) {
+ static_branch_dec(&enetc_has_err050089);
+ if (!static_key_enabled(&enetc_has_err050089.key))
+ dev_info(&pdev->dev, "Disabled ERR050089 workaround\n");
+ }
+}
+
static int enetc_pci_mdio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -62,6 +86,8 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
goto err_pci_mem_reg;
}
+ enetc_emdio_enable_err050089(pdev);
+
err = of_mdiobus_register(bus, dev->of_node);
if (err)
goto err_mdiobus_reg;
@@ -71,6 +97,7 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
+ enetc_emdio_disable_err050089(pdev);
pci_release_region(pdev, 0);
err_pci_mem_reg:
pci_disable_device(pdev);
@@ -88,6 +115,9 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
struct enetc_mdio_priv *mdio_priv;
mdiobus_unregister(bus);
+
+ enetc_emdio_disable_err050089(pdev);
+
mdio_priv = bus->priv;
iounmap(mdio_priv->hw->port);
pci_release_region(pdev, 0);
@@ -96,6 +126,7 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
static const struct pci_device_id enetc_pci_mdio_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+ { PCI_DEVICE(NETC_EMDIO_VEN_ID, NETC_EMDIO_DEV_ID) },
{ 0, } /* End of table. */
};
MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c95a7c083b0f..c47b4a743d93 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -2,15 +2,13 @@
/* Copyright 2017-2019 NXP */
#include <linux/unaligned.h>
-#include <linux/mdio.h>
#include <linux/module.h>
-#include <linux/fsl/enetc_mdio.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pcs-lynx.h>
#include "enetc_ierb.h"
-#include "enetc_pf.h"
+#include "enetc_pf_common.h"
#define ENETC_DRV_NAME_STR "ENETC PF driver"
@@ -33,18 +31,15 @@ static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
__raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
}
-static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+static struct phylink_pcs *enetc_pf_create_pcs(struct enetc_pf *pf,
+ struct mii_bus *bus)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct sockaddr *saddr = addr;
-
- if (!is_valid_ether_addr(saddr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(ndev, saddr->sa_data);
- enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
+ return lynx_pcs_create_mdiodev(bus, 0);
+}
- return 0;
+static void enetc_pf_destroy_pcs(struct phylink_pcs *pcs)
+{
+ lynx_pcs_destroy(pcs);
}
static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
@@ -393,56 +388,6 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
return 0;
}
-static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
- int si)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_hw *hw = &pf->si->hw;
- u8 mac_addr[ETH_ALEN] = { 0 };
- int err;
-
- /* (1) try to get the MAC address from the device tree */
- if (np) {
- err = of_get_mac_address(np, mac_addr);
- if (err == -EPROBE_DEFER)
- return err;
- }
-
- /* (2) bootloader supplied MAC address */
- if (is_zero_ether_addr(mac_addr))
- enetc_pf_get_primary_mac_addr(hw, si, mac_addr);
-
- /* (3) choose a random one */
- if (is_zero_ether_addr(mac_addr)) {
- eth_random_addr(mac_addr);
- dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
- si, mac_addr);
- }
-
- enetc_pf_set_primary_mac_addr(hw, si, mac_addr);
-
- return 0;
-}
-
-static int enetc_setup_mac_addresses(struct device_node *np,
- struct enetc_pf *pf)
-{
- int err, i;
-
- /* The PF might take its MAC from the device tree */
- err = enetc_setup_mac_address(np, pf, 0);
- if (err)
- return err;
-
- for (i = 0; i < pf->total_vfs; i++) {
- err = enetc_setup_mac_address(NULL, pf, i + 1);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static void enetc_port_assign_rfs_entries(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
@@ -766,187 +711,6 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_xdp_xmit = enetc_xdp_xmit,
};
-static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
- const struct net_device_ops *ndev_ops)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
-
- SET_NETDEV_DEV(ndev, &si->pdev->dev);
- priv->ndev = ndev;
- priv->si = si;
- priv->dev = &si->pdev->dev;
- si->ndev = ndev;
-
- priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
- ndev->netdev_ops = ndev_ops;
- enetc_set_ethtool_ops(ndev);
- ndev->watchdog_timeo = 5 * HZ;
- ndev->max_mtu = ENETC_MAX_MTU;
-
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
- ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
-
- if (si->num_rss)
- ndev->hw_features |= NETIF_F_RXHASH;
-
- ndev->priv_flags |= IFF_UNICAST_FLT;
- ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
- NETDEV_XDP_ACT_NDO_XMIT_SG;
-
- if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
- priv->active_offloads |= ENETC_F_QCI;
- ndev->features |= NETIF_F_HW_TC;
- ndev->hw_features |= NETIF_F_HW_TC;
- }
-
- /* pick up primary MAC address from SI */
- enetc_load_primary_mac_addr(&si->hw, ndev);
-}
-
-static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct mii_bus *bus;
- int err;
-
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read_c22;
- bus->write = enetc_mdio_write_c22;
- bus->read_c45 = enetc_mdio_read_c45;
- bus->write_c45 = enetc_mdio_write_c45;
- bus->parent = dev;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_EMDIO_BASE;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
-
- err = of_mdiobus_register(bus, np);
- if (err)
- return dev_err_probe(dev, err, "cannot register MDIO bus\n");
-
- pf->mdio = bus;
-
- return 0;
-}
-
-static void enetc_mdio_remove(struct enetc_pf *pf)
-{
- if (pf->mdio)
- mdiobus_unregister(pf->mdio);
-}
-
-static int enetc_imdio_create(struct enetc_pf *pf)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct phylink_pcs *phylink_pcs;
- struct mii_bus *bus;
- int err;
-
- bus = mdiobus_alloc_size(sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC internal MDIO Bus";
- bus->read = enetc_mdio_read_c22;
- bus->write = enetc_mdio_write_c22;
- bus->read_c45 = enetc_mdio_read_c45;
- bus->write_c45 = enetc_mdio_write_c45;
- bus->parent = dev;
- bus->phy_mask = ~0;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
-
- err = mdiobus_register(bus);
- if (err) {
- dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
- goto free_mdio_bus;
- }
-
- phylink_pcs = lynx_pcs_create_mdiodev(bus, 0);
- if (IS_ERR(phylink_pcs)) {
- err = PTR_ERR(phylink_pcs);
- dev_err(dev, "cannot create lynx pcs (%d)\n", err);
- goto unregister_mdiobus;
- }
-
- pf->imdio = bus;
- pf->pcs = phylink_pcs;
-
- return 0;
-
-unregister_mdiobus:
- mdiobus_unregister(bus);
-free_mdio_bus:
- mdiobus_free(bus);
- return err;
-}
-
-static void enetc_imdio_remove(struct enetc_pf *pf)
-{
- if (pf->pcs)
- lynx_pcs_destroy(pf->pcs);
- if (pf->imdio) {
- mdiobus_unregister(pf->imdio);
- mdiobus_free(pf->imdio);
- }
-}
-
-static bool enetc_port_has_pcs(struct enetc_pf *pf)
-{
- return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
- pf->if_mode == PHY_INTERFACE_MODE_1000BASEX ||
- pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
- pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
-}
-
-static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
-{
- struct device_node *mdio_np;
- int err;
-
- mdio_np = of_get_child_by_name(node, "mdio");
- if (mdio_np) {
- err = enetc_mdio_probe(pf, mdio_np);
-
- of_node_put(mdio_np);
- if (err)
- return err;
- }
-
- if (enetc_port_has_pcs(pf)) {
- err = enetc_imdio_create(pf);
- if (err) {
- enetc_mdio_remove(pf);
- return err;
- }
- }
-
- return 0;
-}
-
-static void enetc_mdiobus_destroy(struct enetc_pf *pf)
-{
- enetc_mdio_remove(pf);
- enetc_imdio_remove(pf);
-}
-
static struct phylink_pcs *
enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
{
@@ -1092,47 +856,6 @@ static const struct phylink_mac_ops enetc_mac_phylink_ops = {
.mac_link_down = enetc_pl_mac_link_down,
};
-static int enetc_phylink_create(struct enetc_ndev_priv *priv,
- struct device_node *node)
-{
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- struct phylink *phylink;
- int err;
-
- pf->phylink_config.dev = &priv->ndev->dev;
- pf->phylink_config.type = PHYLINK_NETDEV;
- pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
-
- __set_bit(PHY_INTERFACE_MODE_INTERNAL,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_SGMII,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_1000BASEX,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_2500BASEX,
- pf->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_USXGMII,
- pf->phylink_config.supported_interfaces);
- phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
-
- phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
- pf->if_mode, &enetc_mac_phylink_ops);
- if (IS_ERR(phylink)) {
- err = PTR_ERR(phylink);
- return err;
- }
-
- priv->phylink = phylink;
-
- return 0;
-}
-
-static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
-{
- phylink_destroy(priv->phylink);
-}
-
/* Initialize the entire shared memory for the flow steering entries
* of this port (PF + VFs)
*/
@@ -1215,6 +938,13 @@ static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
goto out_pci_remove;
}
+ si->revision = enetc_get_ip_revision(&si->hw);
+ err = enetc_get_driver_data(si);
+ if (err) {
+ dev_err(&pdev->dev, "Could not get PF driver data\n");
+ goto out_pci_remove;
+ }
+
err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
&si->cbd_ring);
if (err)
@@ -1250,6 +980,14 @@ static void enetc_psi_destroy(struct pci_dev *pdev)
enetc_pci_remove(pdev);
}
+static const struct enetc_pf_ops enetc_pf_ops = {
+ .set_si_primary_mac = enetc_pf_set_primary_mac_addr,
+ .get_si_primary_mac = enetc_pf_get_primary_mac_addr,
+ .create_pcs = enetc_pf_create_pcs,
+ .destroy_pcs = enetc_pf_destroy_pcs,
+ .enable_psfp = enetc_psfp_enable,
+};
+
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1276,6 +1014,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
pf = enetc_si_priv(si);
pf->si = si;
+ pf->ops = &enetc_pf_ops;
+
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
if (pf->total_vfs) {
pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state),
@@ -1335,7 +1075,7 @@ static int enetc_pf_probe(struct pci_dev *pdev,
if (err)
goto err_mdiobus_create;
- err = enetc_phylink_create(priv, node);
+ err = enetc_phylink_create(priv, node, &enetc_mac_phylink_ops);
if (err)
goto err_phylink_create;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index c26bd66e4597..a26a12863855 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -28,6 +28,24 @@ struct enetc_vf_state {
enum enetc_vf_flags flags;
};
+struct enetc_port_caps {
+ u32 half_duplex:1;
+ int num_vsi;
+ int num_msix;
+ int num_rx_bdr;
+ int num_tx_bdr;
+};
+
+struct enetc_pf;
+
+struct enetc_pf_ops {
+ void (*set_si_primary_mac)(struct enetc_hw *hw, int si, const u8 *addr);
+ void (*get_si_primary_mac)(struct enetc_hw *hw, int si, u8 *addr);
+ struct phylink_pcs *(*create_pcs)(struct enetc_pf *pf, struct mii_bus *bus);
+ void (*destroy_pcs)(struct phylink_pcs *pcs);
+ int (*enable_psfp)(struct enetc_ndev_priv *priv);
+};
+
struct enetc_pf {
struct enetc_si *si;
int num_vfs; /* number of active VFs, after sriov_init */
@@ -50,6 +68,9 @@ struct enetc_pf {
phy_interface_t if_mode;
struct phylink_config phylink_config;
+
+ struct enetc_port_caps caps;
+ const struct enetc_pf_ops *ops;
};
#define phylink_to_enetc_pf(config) \
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
new file mode 100644
index 000000000000..0eecfc833164
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2024 NXP */
+
+#include <linux/fsl/enetc_mdio.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "enetc_pf_common.h"
+
+static void enetc_set_si_hw_addr(struct enetc_pf *pf, int si,
+ const u8 *mac_addr)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ pf->ops->set_si_primary_mac(hw, si, mac_addr);
+}
+
+static void enetc_get_si_hw_addr(struct enetc_pf *pf, int si, u8 *mac_addr)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ pf->ops->get_si_primary_mac(hw, si, mac_addr);
+}
+
+int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(ndev, saddr->sa_data);
+ enetc_set_si_hw_addr(pf, 0, saddr->sa_data);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_pf_set_mac_addr);
+
+static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
+ int si)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int err;
+
+ /* (1) try to get the MAC address from the device tree */
+ if (np) {
+ err = of_get_mac_address(np, mac_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
+ /* (2) bootloader supplied MAC address */
+ if (is_zero_ether_addr(mac_addr))
+ enetc_get_si_hw_addr(pf, si, mac_addr);
+
+ /* (3) choose a random one */
+ if (is_zero_ether_addr(mac_addr)) {
+ eth_random_addr(mac_addr);
+ dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
+ si, mac_addr);
+ }
+
+ enetc_set_si_hw_addr(pf, si, mac_addr);
+
+ return 0;
+}
+
+int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf)
+{
+ int err, i;
+
+ /* The PF might take its MAC from the device tree */
+ err = enetc_setup_mac_address(np, pf, 0);
+ if (err)
+ return err;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ err = enetc_setup_mac_address(NULL, pf, i + 1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_setup_mac_addresses);
+
+void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
+ priv->sysclk_freq = si->drvdata->sysclk_freq;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
+ if (!is_enetc_rev1(si)) {
+ ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK);
+ goto end;
+ }
+
+ if (si->num_rss)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
+ if (si->hw_features & ENETC_SI_F_PSFP && pf->ops->enable_psfp &&
+ !pf->ops->enable_psfp(priv)) {
+ priv->active_offloads |= ENETC_F_QCI;
+ ndev->features |= NETIF_F_HW_TC;
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
+end:
+ /* pick up primary MAC address from SI */
+ enetc_load_primary_mac_addr(&si->hw, ndev);
+}
+EXPORT_SYMBOL_GPL(enetc_pf_netdev_setup);
+
+static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC MDIO Bus";
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ err = of_mdiobus_register(bus, np);
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
+
+ pf->mdio = bus;
+
+ return 0;
+}
+
+static void enetc_mdio_remove(struct enetc_pf *pf)
+{
+ if (pf->mdio)
+ mdiobus_unregister(pf->mdio);
+}
+
+static int enetc_imdio_create(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct phylink_pcs *phylink_pcs;
+ struct mii_bus *bus;
+ int err;
+
+ if (!pf->ops->create_pcs) {
+ dev_err(dev, "Creating PCS is not supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC internal MDIO Bus";
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
+ bus->parent = dev;
+ bus->phy_mask = ~0;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+ mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+ err = mdiobus_register(bus);
+ if (err) {
+ dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
+ goto free_mdio_bus;
+ }
+
+ phylink_pcs = pf->ops->create_pcs(pf, bus);
+ if (IS_ERR(phylink_pcs)) {
+ err = PTR_ERR(phylink_pcs);
+ dev_err(dev, "cannot create lynx pcs (%d)\n", err);
+ goto unregister_mdiobus;
+ }
+
+ pf->imdio = bus;
+ pf->pcs = phylink_pcs;
+
+ return 0;
+
+unregister_mdiobus:
+ mdiobus_unregister(bus);
+free_mdio_bus:
+ mdiobus_free(bus);
+ return err;
+}
+
+static void enetc_imdio_remove(struct enetc_pf *pf)
+{
+ if (pf->pcs && pf->ops->destroy_pcs)
+ pf->ops->destroy_pcs(pf->pcs);
+
+ if (pf->imdio) {
+ mdiobus_unregister(pf->imdio);
+ mdiobus_free(pf->imdio);
+ }
+}
+
+static bool enetc_port_has_pcs(struct enetc_pf *pf)
+{
+ return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ pf->if_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
+}
+
+int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
+{
+ struct device_node *mdio_np;
+ int err;
+
+ mdio_np = of_get_child_by_name(node, "mdio");
+ if (mdio_np) {
+ err = enetc_mdio_probe(pf, mdio_np);
+
+ of_node_put(mdio_np);
+ if (err)
+ return err;
+ }
+
+ if (enetc_port_has_pcs(pf)) {
+ err = enetc_imdio_create(pf);
+ if (err) {
+ enetc_mdio_remove(pf);
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_mdiobus_create);
+
+void enetc_mdiobus_destroy(struct enetc_pf *pf)
+{
+ enetc_mdio_remove(pf);
+ enetc_imdio_remove(pf);
+}
+EXPORT_SYMBOL_GPL(enetc_mdiobus_destroy);
+
+int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
+ const struct phylink_mac_ops *ops)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct phylink *phylink;
+ int err;
+
+ pf->phylink_config.dev = &priv->ndev->dev;
+ pf->phylink_config.type = PHYLINK_NETDEV;
+ pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
+ pf->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
+ pf->if_mode, ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ return err;
+ }
+
+ priv->phylink = phylink;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_phylink_create);
+
+void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
+{
+ phylink_destroy(priv->phylink);
+}
+EXPORT_SYMBOL_GPL(enetc_phylink_destroy);
+
+MODULE_DESCRIPTION("NXP ENETC PF common functionality driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
new file mode 100644
index 000000000000..48f55ee743ad
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2024 NXP */
+
+#include "enetc_pf.h"
+
+int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr);
+int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf);
+void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops);
+int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node);
+void enetc_mdiobus_destroy(struct enetc_pf *pf);
+int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
+ const struct phylink_mac_ops *ops);
+void enetc_phylink_destroy(struct enetc_ndev_priv *priv);
+
+static inline u16 enetc_get_ip_revision(struct enetc_hw *hw)
+{
+ return enetc_global_rd(hw, ENETC_G_EIPBRR0) & EIPBRR0_REVISION;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index b65da49dd926..ccf86651455c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -336,7 +336,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
*
* (enetClockFrequency / portTransmitRate) * 100
*/
- hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
+ hi_credit_reg = (u32)div_u64((priv->sysclk_freq * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index b15db70769e5..a5f8ce576b6e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -135,6 +135,7 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
si->ndev = ndev;
priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
+ priv->sysclk_freq = si->drvdata->sysclk_freq;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -171,6 +172,13 @@ static int enetc_vf_probe(struct pci_dev *pdev,
return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
+ si->revision = ENETC_REV_1_0;
+ err = enetc_get_driver_data(si);
+ if (err) {
+ dev_err_probe(&pdev->dev, err,
+ "Could not get VF driver data\n");
+ goto err_alloc_netdev;
+ }
enetc_get_si_caps(si);
diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
new file mode 100644
index 000000000000..bcb8eefeb93c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NXP NETC Blocks Control Driver
+ *
+ * Copyright 2024 NXP
+ *
+ * This driver is used for pre-initialization of NETC, such as PCS and MII
+ * protocols, LDID, warm reset, etc. Therefore, all NETC device drivers can
+ * only be probed after the netc-blk-crtl driver has completed initialization.
+ * In addition, when the system enters suspend mode, IERB, PRB, and NETCMIX
+ * will be powered off, except for WOL. Therefore, when the system resumes,
+ * these blocks need to be reinitialized.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+/* NETCMIX registers */
+#define IMX95_CFG_LINK_IO_VAR 0x0
+#define IO_VAR_16FF_16G_SERDES 0x1
+#define IO_VAR(port, var) (((var) & 0xf) << ((port) << 2))
+
+#define IMX95_CFG_LINK_MII_PROT 0x4
+#define CFG_LINK_MII_PORT_0 GENMASK(3, 0)
+#define CFG_LINK_MII_PORT_1 GENMASK(7, 4)
+#define MII_PROT_MII 0x0
+#define MII_PROT_RMII 0x1
+#define MII_PROT_RGMII 0x2
+#define MII_PROT_SERIAL 0x3
+#define MII_PROT(port, prot) (((prot) & 0xf) << ((port) << 2))
+
+#define IMX95_CFG_LINK_PCS_PROT(a) (0x8 + (a) * 4)
+#define PCS_PROT_1G_SGMII BIT(0)
+#define PCS_PROT_2500M_SGMII BIT(1)
+#define PCS_PROT_XFI BIT(3)
+#define PCS_PROT_SFI BIT(4)
+#define PCS_PROT_10G_SXGMII BIT(6)
+
+/* NETC privileged register block register */
+#define PRB_NETCRR 0x100
+#define NETCRR_SR BIT(0)
+#define NETCRR_LOCK BIT(1)
+
+#define PRB_NETCSR 0x104
+#define NETCSR_ERROR BIT(0)
+#define NETCSR_STATE BIT(1)
+
+/* NETC integrated endpoint register block register */
+#define IERB_EMDIOFAUXR 0x344
+#define IERB_T0FAUXR 0x444
+#define IERB_EFAUXR(a) (0x3044 + 0x100 * (a))
+#define IERB_VFAUXR(a) (0x4004 + 0x40 * (a))
+#define FAUXR_LDID GENMASK(3, 0)
+
+/* Platform information */
+#define IMX95_ENETC0_BUS_DEVFN 0x0
+#define IMX95_ENETC1_BUS_DEVFN 0x40
+#define IMX95_ENETC2_BUS_DEVFN 0x80
+
+/* Flags for different platforms */
+#define NETC_HAS_NETCMIX BIT(0)
+
+struct netc_devinfo {
+ u32 flags;
+ int (*netcmix_init)(struct platform_device *pdev);
+ int (*ierb_init)(struct platform_device *pdev);
+};
+
+struct netc_blk_ctrl {
+ void __iomem *prb;
+ void __iomem *ierb;
+ void __iomem *netcmix;
+
+ const struct netc_devinfo *devinfo;
+ struct platform_device *pdev;
+ struct dentry *debugfs_root;
+};
+
+static void netc_reg_write(void __iomem *base, u32 offset, u32 val)
+{
+ netc_write(base + offset, val);
+}
+
+static u32 netc_reg_read(void __iomem *base, u32 offset)
+{
+ return netc_read(base + offset);
+}
+
+static int netc_of_pci_get_bus_devfn(struct device_node *np)
+{
+ u32 reg[5];
+ int error;
+
+ error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
+ if (error)
+ return error;
+
+ return (reg[0] >> 8) & 0xffff;
+}
+
+static int netc_get_link_mii_protocol(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ return MII_PROT_MII;
+ case PHY_INTERFACE_MODE_RMII:
+ return MII_PROT_RMII;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return MII_PROT_RGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ return MII_PROT_SERIAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx95_netcmix_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ phy_interface_t interface;
+ int bus_devfn, mii_proto;
+ u32 val;
+ int err;
+
+ /* Default setting of MII protocol */
+ val = MII_PROT(0, MII_PROT_RGMII) | MII_PROT(1, MII_PROT_RGMII) |
+ MII_PROT(2, MII_PROT_SERIAL);
+
+ /* Update the link MII protocol through parsing phy-mode */
+ for_each_available_child_of_node_scoped(np, child) {
+ for_each_available_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(gchild);
+ if (bus_devfn < 0)
+ return -EINVAL;
+
+ if (bus_devfn == IMX95_ENETC2_BUS_DEVFN)
+ continue;
+
+ err = of_get_phy_mode(gchild, &interface);
+ if (err)
+ continue;
+
+ mii_proto = netc_get_link_mii_protocol(interface);
+ if (mii_proto < 0)
+ return -EINVAL;
+
+ switch (bus_devfn) {
+ case IMX95_ENETC0_BUS_DEVFN:
+ val = u32_replace_bits(val, mii_proto,
+ CFG_LINK_MII_PORT_0);
+ break;
+ case IMX95_ENETC1_BUS_DEVFN:
+ val = u32_replace_bits(val, mii_proto,
+ CFG_LINK_MII_PORT_1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Configure Link I/O variant */
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_IO_VAR,
+ IO_VAR(2, IO_VAR_16FF_16G_SERDES));
+ /* Configure Link 2 PCS protocol */
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_PCS_PROT(2),
+ PCS_PROT_10G_SXGMII);
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_MII_PROT, val);
+
+ return 0;
+}
+
+static bool netc_ierb_is_locked(struct netc_blk_ctrl *priv)
+{
+ return !!(netc_reg_read(priv->prb, PRB_NETCRR) & NETCRR_LOCK);
+}
+
+static int netc_lock_ierb(struct netc_blk_ctrl *priv)
+{
+ u32 val;
+
+ netc_reg_write(priv->prb, PRB_NETCRR, NETCRR_LOCK);
+
+ return read_poll_timeout(netc_reg_read, val, !(val & NETCSR_STATE),
+ 100, 2000, false, priv->prb, PRB_NETCSR);
+}
+
+static int netc_unlock_ierb_with_warm_reset(struct netc_blk_ctrl *priv)
+{
+ u32 val;
+
+ netc_reg_write(priv->prb, PRB_NETCRR, 0);
+
+ return read_poll_timeout(netc_reg_read, val, !(val & NETCRR_LOCK),
+ 1000, 100000, true, priv->prb, PRB_NETCRR);
+}
+
+static int imx95_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+
+ /* EMDIO : No MSI-X intterupt */
+ netc_reg_write(priv->ierb, IERB_EMDIOFAUXR, 0);
+ /* ENETC0 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(0), 0);
+ /* ENETC0 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(0), 1);
+ /* ENETC0 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(1), 2);
+ /* ENETC1 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(1), 3);
+ /* ENETC1 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(2), 5);
+ /* ENETC1 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(3), 6);
+ /* ENETC2 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(2), 4);
+ /* ENETC2 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(4), 5);
+ /* ENETC2 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(5), 6);
+ /* NETC TIMER */
+ netc_reg_write(priv->ierb, IERB_T0FAUXR, 7);
+
+ return 0;
+}
+
+static int netc_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ const struct netc_devinfo *devinfo = priv->devinfo;
+ int err;
+
+ if (netc_ierb_is_locked(priv)) {
+ err = netc_unlock_ierb_with_warm_reset(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Unlock IERB failed.\n");
+ return err;
+ }
+ }
+
+ if (devinfo->ierb_init) {
+ err = devinfo->ierb_init(pdev);
+ if (err)
+ return err;
+ }
+
+ err = netc_lock_ierb(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Lock IERB failed.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int netc_prb_show(struct seq_file *s, void *data)
+{
+ struct netc_blk_ctrl *priv = s->private;
+ u32 val;
+
+ val = netc_reg_read(priv->prb, PRB_NETCRR);
+ seq_printf(s, "[PRB NETCRR] Lock:%d SR:%d\n",
+ (val & NETCRR_LOCK) ? 1 : 0,
+ (val & NETCRR_SR) ? 1 : 0);
+
+ val = netc_reg_read(priv->prb, PRB_NETCSR);
+ seq_printf(s, "[PRB NETCSR] State:%d Error:%d\n",
+ (val & NETCSR_STATE) ? 1 : 0,
+ (val & NETCSR_ERROR) ? 1 : 0);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(netc_prb);
+
+static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir("netc_blk_ctrl", NULL);
+ if (IS_ERR(root))
+ return;
+
+ priv->debugfs_root = root;
+
+ debugfs_create_file("prb", 0444, root, priv, &netc_prb_fops);
+}
+
+static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv)
+{
+ debugfs_remove_recursive(priv->debugfs_root);
+ priv->debugfs_root = NULL;
+}
+
+#else
+
+static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv)
+{
+}
+
+static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv)
+{
+}
+#endif
+
+static int netc_prb_check_error(struct netc_blk_ctrl *priv)
+{
+ if (netc_reg_read(priv->prb, PRB_NETCSR) & NETCSR_ERROR)
+ return -1;
+
+ return 0;
+}
+
+static const struct netc_devinfo imx95_devinfo = {
+ .flags = NETC_HAS_NETCMIX,
+ .netcmix_init = imx95_netcmix_init,
+ .ierb_init = imx95_ierb_init,
+};
+
+static const struct of_device_id netc_blk_ctrl_match[] = {
+ { .compatible = "nxp,imx95-netc-blk-ctrl", .data = &imx95_devinfo },
+ {},
+};
+MODULE_DEVICE_TABLE(of, netc_blk_ctrl_match);
+
+static int netc_blk_ctrl_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct netc_devinfo *devinfo;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ struct netc_blk_ctrl *priv;
+ struct clk *ipg_clk;
+ void __iomem *regs;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+ ipg_clk = devm_clk_get_optional_enabled(dev, "ipg");
+ if (IS_ERR(ipg_clk))
+ return dev_err_probe(dev, PTR_ERR(ipg_clk),
+ "Set ipg clock failed\n");
+
+ id = of_match_device(netc_blk_ctrl_match, dev);
+ if (!id)
+ return dev_err_probe(dev, -EINVAL, "Cannot match device\n");
+
+ devinfo = (struct netc_devinfo *)id->data;
+ if (!devinfo)
+ return dev_err_probe(dev, -EINVAL, "No device information\n");
+
+ priv->devinfo = devinfo;
+ regs = devm_platform_ioremap_resource_byname(pdev, "ierb");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing IERB resource\n");
+
+ priv->ierb = regs;
+ regs = devm_platform_ioremap_resource_byname(pdev, "prb");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing PRB resource\n");
+
+ priv->prb = regs;
+ if (devinfo->flags & NETC_HAS_NETCMIX) {
+ regs = devm_platform_ioremap_resource_byname(pdev, "netcmix");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing NETCMIX resource\n");
+ priv->netcmix = regs;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ if (devinfo->netcmix_init) {
+ err = devinfo->netcmix_init(pdev);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Initializing NETCMIX failed\n");
+ }
+
+ err = netc_ierb_init(pdev);
+ if (err)
+ return dev_err_probe(dev, err, "Initializing IERB failed\n");
+
+ if (netc_prb_check_error(priv) < 0)
+ dev_warn(dev, "The current IERB configuration is invalid\n");
+
+ netc_blk_ctrl_create_debugfs(priv);
+
+ err = of_platform_populate(node, NULL, NULL, dev);
+ if (err) {
+ netc_blk_ctrl_remove_debugfs(priv);
+ return dev_err_probe(dev, err, "of_platform_populate failed\n");
+ }
+
+ return 0;
+}
+
+static void netc_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+ netc_blk_ctrl_remove_debugfs(priv);
+}
+
+static struct platform_driver netc_blk_ctrl_driver = {
+ .driver = {
+ .name = "nxp-netc-blk-ctrl",
+ .of_match_table = netc_blk_ctrl_match,
+ },
+ .probe = netc_blk_ctrl_probe,
+ .remove = netc_blk_ctrl_remove,
+};
+
+module_platform_driver(netc_blk_ctrl_driver);
+
+MODULE_DESCRIPTION("NXP NETC Blocks Control Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9d9fcec41488..1b55047c0237 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -4766,7 +4766,7 @@ static struct platform_driver fec_driver = {
},
.id_table = fec_devtype,
.probe = fec_probe,
- .remove_new = fec_drv_remove,
+ .remove = fec_drv_remove,
};
module_platform_driver(fec_driver);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index ebae71ec26c6..2bfaf14f65c8 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -1040,7 +1040,7 @@ static struct platform_driver mpc52xx_fec_driver = {
.of_match_table = mpc52xx_fec_match,
},
.probe = mpc52xx_fec_probe,
- .remove_new = mpc52xx_fec_remove,
+ .remove = mpc52xx_fec_remove,
#ifdef CONFIG_PM
.suspend = mpc52xx_fec_of_suspend,
.resume = mpc52xx_fec_of_resume,
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 39689826cc8f..3d073f0fae63 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -94,7 +94,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
goto out_free;
}
- snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start);
bus->priv = priv;
bus->parent = dev;
@@ -144,7 +144,7 @@ struct platform_driver mpc52xx_fec_mdio_driver = {
.of_match_table = mpc52xx_fec_mdio_match,
},
.probe = mpc52xx_fec_mdio_probe,
- .remove_new = mpc52xx_fec_mdio_remove,
+ .remove = mpc52xx_fec_mdio_remove,
};
/* let fec driver call it, since this has to be registered before it */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a4eb6edb850a..7f6b57432071 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -84,8 +84,7 @@
#define FEC_CC_MULT (1 << 31)
#define FEC_COUNTER_PERIOD (1 << 31)
#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
-#define FEC_CHANNLE_0 0
-#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
+#define DEFAULT_PPS_CHANNEL 0
#define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL
#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
@@ -525,7 +524,6 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
int ret = 0;
if (rq->type == PTP_CLK_REQ_PPS) {
- fep->pps_channel = DEFAULT_PPS_CHANNEL;
fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
ret = fec_ptp_enable_pps(fep, on);
@@ -536,10 +534,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
if (rq->perout.flags)
return -EOPNOTSUPP;
- if (rq->perout.index != DEFAULT_PPS_CHANNEL)
+ if (rq->perout.index != fep->pps_channel)
return -EOPNOTSUPP;
- fep->pps_channel = DEFAULT_PPS_CHANNEL;
period.tv_sec = rq->perout.period.sec;
period.tv_nsec = rq->perout.period.nsec;
period_ns = timespec64_to_ns(&period);
@@ -707,12 +704,16 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct device_node *np = fep->pdev->dev.of_node;
int irq;
int ret;
fep->ptp_caps.owner = THIS_MODULE;
strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ of_property_read_u32(np, "fsl,pps-channel", &fep->pps_channel);
+
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
fep->ptp_caps.n_ext_ts = 0;
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index d96028f01770..fb416d60dcd7 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -24,7 +24,6 @@
/* General defines */
#define FMAN_LIODN_TBL 64 /* size of LIODN table */
-#define MAX_NUM_OF_MACS 10
#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
#define BASE_RX_PORTID 0x08
#define BASE_TX_PORTID 0x28
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index 2ea575a46675..74eb62eba0d7 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -74,6 +74,9 @@
#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */
#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */
+/* General defines */
+#define MAX_NUM_OF_MACS 10
+
struct fman; /* FMan data */
/* Enum for defining port types */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 3088da7adf0f..85617bb94959 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1415,7 +1415,6 @@ int dtsec_initialization(struct mac_device *mac_dev,
mac_dev->set_exception = dtsec_set_exception;
mac_dev->set_allmulti = dtsec_set_allmulti;
mac_dev->set_tstamp = dtsec_set_tstamp;
- mac_dev->set_multi = fman_set_multi;
mac_dev->enable = dtsec_enable;
mac_dev->disable = dtsec_disable;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 796e6f4e583d..3925441143fa 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1087,7 +1087,6 @@ int memac_initialization(struct mac_device *mac_dev,
mac_dev->set_exception = memac_set_exception;
mac_dev->set_allmulti = memac_set_allmulti;
mac_dev->set_tstamp = memac_set_tstamp;
- mac_dev->set_multi = fman_set_multi;
mac_dev->enable = memac_enable;
mac_dev->disable = memac_disable;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index f17a4e511510..e977389f7088 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -987,7 +987,7 @@ static int init_low_level_driver(struct fman_port *port)
return -ENODEV;
}
- /* The code bellow is a trick so the FM will not release the buffer
+ /* The code below is a trick so the FM will not release the buffer
* to BM nor will try to enqueue the frame to QM
*/
if (port->port_type == FMAN_PORT_TYPE_TX) {
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index c2261d26db5b..fecfca6eba03 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -771,7 +771,6 @@ int tgec_initialization(struct mac_device *mac_dev,
mac_dev->set_exception = tgec_set_exception;
mac_dev->set_allmulti = tgec_set_allmulti;
mac_dev->set_tstamp = tgec_set_tstamp;
- mac_dev->set_multi = fman_set_multi;
mac_dev->enable = tgec_enable;
mac_dev->disable = tgec_disable;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 11da139082e1..a39fcea6a77a 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -32,8 +32,6 @@ MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
u8 cell_index;
struct fman *fman;
- /* List of multicast addresses */
- struct list_head mc_addr_list;
struct platform_device *eth_dev;
u16 speed;
};
@@ -57,44 +55,6 @@ static void mac_exception(struct mac_device *mac_dev,
__func__, ex);
}
-int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
-{
- struct mac_priv_s *priv;
- struct mac_address *old_addr, *tmp;
- struct netdev_hw_addr *ha;
- int err;
- enet_addr_t *addr;
-
- priv = mac_dev->priv;
-
- /* Clear previous address list */
- list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
- addr = (enet_addr_t *)old_addr->addr;
- err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
- if (err < 0)
- return err;
-
- list_del(&old_addr->list);
- kfree(old_addr);
- }
-
- /* Add all the addresses from the new list */
- netdev_for_each_mc_addr(ha, net_dev) {
- addr = (enet_addr_t *)ha->addr;
- err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
- if (err < 0)
- return err;
-
- tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
- if (!tmp)
- return -ENOMEM;
-
- ether_addr_copy(tmp->addr, ha->addr);
- list_add(&tmp->list, &priv->mc_addr_list);
- }
- return 0;
-}
-
static DEFINE_MUTEX(eth_lock);
static struct platform_device *dpaa_eth_add_device(int fman_id,
@@ -181,8 +141,6 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->priv = priv;
mac_dev->dev = dev;
- INIT_LIST_HEAD(&priv->mc_addr_list);
-
/* Get the FM node */
dev_node = of_get_parent(mac_node);
if (!dev_node) {
@@ -259,6 +217,11 @@ static int mac_probe(struct platform_device *_of_dev)
err = -EINVAL;
goto _return_dev_put;
}
+ if (val >= MAX_NUM_OF_MACS) {
+ dev_err(dev, "cell-index value is too big for %pOF\n", mac_node);
+ err = -EINVAL;
+ goto _return_dev_put;
+ }
priv->cell_index = (u8)val;
/* Get the MAC address */
@@ -379,7 +342,7 @@ static struct platform_driver mac_driver = {
.of_match_table = mac_match,
},
.probe = mac_probe,
- .remove_new = mac_remove,
+ .remove = mac_remove,
};
builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 8b5b43d50f8e..955ace338965 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -40,8 +40,6 @@ struct mac_device {
int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
- int (*set_multi)(struct net_device *net_dev,
- struct mac_device *mac_dev);
int (*set_exception)(struct fman_mac *mac_dev,
enum fman_mac_exceptions exception, bool enable);
int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 3425c4a6abcb..f563692a4a00 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1052,7 +1052,7 @@ static struct platform_driver fs_enet_driver = {
.of_match_table = fs_enet_match,
},
.probe = fs_enet_probe,
- .remove_new = fs_enet_remove,
+ .remove = fs_enet_remove,
};
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 2e210a003558..66038e2a4ae3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -123,7 +123,7 @@ static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np)
* we get is an int, and the odds of multiple bitbang mdio buses
* is low enough that it's not worth going too crazy.
*/
- snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start);
data = of_get_property(np, "fsl,mdio-pin", &len);
if (!data || len != 4)
@@ -214,7 +214,7 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
.of_match_table = fs_enet_mdio_bb_match,
},
.probe = fs_enet_mdio_probe,
- .remove_new = fs_enet_mdio_remove,
+ .remove = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 93d91e8ad0de..dec31b638941 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -212,7 +212,7 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
.of_match_table = fs_enet_mdio_fec_match,
},
.probe = fs_enet_mdio_probe,
- .remove_new = fs_enet_mdio_remove,
+ .remove = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 026f7270a54d..56d2f79fb7e3 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -526,7 +526,7 @@ static struct platform_driver fsl_pq_mdio_driver = {
.of_match_table = fsl_pq_mdio_match,
},
.probe = fsl_pq_mdio_probe,
- .remove_new = fsl_pq_mdio_remove,
+ .remove = fsl_pq_mdio_remove,
};
module_platform_driver(fsl_pq_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ecb1703ea150..435138f4699d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2207,8 +2207,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
if (unlikely(do_tstamp)) {
struct skb_shared_hwtstamps shhwtstamps;
- u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
- ~0x7UL);
+ __be64 *ns;
+
+ ns = (__be64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
@@ -2471,7 +2472,7 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
/* Get receive timestamp from the skb */
if (priv->hwts_rx_en) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- u64 *ns = (u64 *) skb->data;
+ __be64 *ns = (__be64 *)skb->data;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
@@ -3642,7 +3643,7 @@ static struct platform_driver gfar_driver = {
.of_match_table = gfar_match,
},
.probe = gfar_probe,
- .remove_new = gfar_remove,
+ .remove = gfar_remove,
};
module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index a99b95c4bcfb..781d92e703cb 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -115,12 +115,14 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
{
struct gfar_private *priv = netdev_priv(dev);
+ int i;
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
- memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < GFAR_STATS_LEN; i++)
+ ethtool_puts(&buf, stat_gstrings[i]);
else
- memcpy(buf, stat_gstrings,
- GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ ethtool_puts(&buf, stat_gstrings[i]);
}
/* Fill in an array of 64-bit statistics from various sources.
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ab421243a419..6663c1768089 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3591,22 +3591,23 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
- ug_info = kmemdup(&ugeth_primary_info, sizeof(*ug_info), GFP_KERNEL);
- if (ug_info == NULL)
+ ug_info = devm_kmemdup(&ofdev->dev, &ugeth_primary_info,
+ sizeof(*ug_info), GFP_KERNEL);
+ if (!ug_info)
return -ENOMEM;
ug_info->uf_info.ucc_num = ucc_num;
err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock);
if (err)
- goto err_free_info;
+ return err;
err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock);
if (err)
- goto err_free_info;
+ return err;
err = of_address_to_resource(np, 0, &res);
if (err)
- goto err_free_info;
+ return err;
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
@@ -3619,7 +3620,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
*/
err = of_phy_register_fixed_link(np);
if (err)
- goto err_free_info;
+ return err;
ug_info->phy_node = of_node_get(np);
}
@@ -3687,9 +3688,8 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.irq);
/* Create an ethernet device instance */
- dev = alloc_etherdev(sizeof(*ugeth));
-
- if (dev == NULL) {
+ dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth));
+ if (!dev) {
err = -ENOMEM;
goto err_deregister_fixed_link;
}
@@ -3724,15 +3724,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(dev);
- err = register_netdev(dev);
+ err = devm_register_netdev(&ofdev->dev, dev);
if (err) {
if (netif_msg_probe(ugeth))
pr_err("%s: Cannot register net device, aborting\n",
dev->name);
- goto err_free_netdev;
+ goto err_deregister_fixed_link;
}
- of_get_ethdev_address(np, dev);
+ err = of_get_ethdev_address(np, dev);
+ if (err == -EPROBE_DEFER)
+ goto err_deregister_fixed_link;
ugeth->ug_info = ug_info;
ugeth->dev = device;
@@ -3741,16 +3743,11 @@ static int ucc_geth_probe(struct platform_device* ofdev)
return 0;
-err_free_netdev:
- free_netdev(dev);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ug_info->tbi_node);
of_node_put(ug_info->phy_node);
-err_free_info:
- kfree(ug_info);
-
return err;
}
@@ -3760,14 +3757,11 @@ static void ucc_geth_remove(struct platform_device* ofdev)
struct ucc_geth_private *ugeth = netdev_priv(dev);
struct device_node *np = ofdev->dev.of_node;
- unregister_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
- kfree(ugeth->ug_info);
- free_netdev(dev);
}
static const struct of_device_id ucc_geth_match[] = {
@@ -3786,7 +3780,7 @@ static struct platform_driver ucc_geth_driver = {
.of_match_table = ucc_geth_match,
},
.probe = ucc_geth_probe,
- .remove_new = ucc_geth_remove,
+ .remove = ucc_geth_remove,
.suspend = ucc_geth_suspend,
.resume = ucc_geth_resume,
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 601beb93d3b3..699f346faf5c 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -287,20 +287,17 @@ static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
u32 stats_mode = ugeth->ug_info->statisticsMode;
+ int i;
- if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
- memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN *
- ETH_GSTRING_LEN);
- buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN;
- }
- if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
- memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN *
- ETH_GSTRING_LEN);
- buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
- }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE)
+ for (i = 0; i < UEC_HW_STATS_LEN; i++)
+ ethtool_puts(&buf, hw_stat_gstrings[i]);
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX)
+ for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
+ ethtool_puts(&buf, tx_fw_stat_gstrings[i]);
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
- memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
- ETH_GSTRING_LEN);
+ for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
+ ethtool_puts(&buf, rx_fw_stat_gstrings[i]);
}
static void uec_get_ethtool_stats(struct net_device *netdev,
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.c b/drivers/net/ethernet/fungible/funcore/fun_queue.c
index 8ab9f68434f5..d07ee3e4f52a 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_queue.c
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.c
@@ -482,43 +482,6 @@ free_funq:
return NULL;
}
-/* Create a funq's CQ on the device. */
-static int fun_create_cq(struct fun_queue *funq)
-{
- struct fun_dev *fdev = funq->fdev;
- unsigned int rqid;
- int rc;
-
- rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ?
- funq->rqid : FUN_HCI_ID_INVALID;
- rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid,
- funq->cqe_size_log2, funq->cq_depth,
- funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries,
- funq->cq_intcoal_usec, funq->cq_vector, 0, 0,
- &funq->cqid, &funq->cq_db);
- if (!rc)
- dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid);
-
- return rc;
-}
-
-/* Create a funq's SQ on the device. */
-static int fun_create_sq(struct fun_queue *funq)
-{
- struct fun_dev *fdev = funq->fdev;
- int rc;
-
- rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid,
- funq->sqe_size_log2, funq->sq_depth,
- funq->sq_dma_addr, funq->sq_intcoal_nentries,
- funq->sq_intcoal_usec, funq->cq_vector, 0, 0,
- 0, &funq->sqid, &funq->sq_db);
- if (!rc)
- dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid);
-
- return rc;
-}
-
/* Create a funq's RQ on the device. */
int fun_create_rq(struct fun_queue *funq)
{
@@ -561,34 +524,6 @@ int fun_request_irq(struct fun_queue *funq, const char *devname,
return rc;
}
-/* Create all component queues of a funq on the device. */
-int fun_create_queue(struct fun_queue *funq)
-{
- int rc;
-
- rc = fun_create_cq(funq);
- if (rc)
- return rc;
-
- if (funq->rq_depth) {
- rc = fun_create_rq(funq);
- if (rc)
- goto release_cq;
- }
-
- rc = fun_create_sq(funq);
- if (rc)
- goto release_rq;
-
- return 0;
-
-release_rq:
- fun_destroy_sq(funq->fdev, funq->rqid);
-release_cq:
- fun_destroy_cq(funq->fdev, funq->cqid);
- return rc;
-}
-
void fun_free_irq(struct fun_queue *funq)
{
if (funq->irq_handler) {
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.h b/drivers/net/ethernet/fungible/funcore/fun_queue.h
index 7fb53d0ae8b0..2d966afb187a 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_queue.h
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.h
@@ -163,7 +163,6 @@ static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
}
int fun_create_rq(struct fun_queue *funq);
-int fun_create_queue(struct fun_queue *funq);
void fun_free_irq(struct fun_queue *funq);
int fun_request_irq(struct fun_queue *funq, const char *devname,
diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig
index 8641a00f8e63..564862a57124 100644
--- a/drivers/net/ethernet/google/Kconfig
+++ b/drivers/net/ethernet/google/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_GOOGLE
config GVE
tristate "Google Virtual NIC (gVNIC) support"
depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
+ select PAGE_POOL
help
This driver supports Google Virtual NIC (gVNIC)"
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
index 9ed07080b38a..4520f1c07a63 100644
--- a/drivers/net/ethernet/google/gve/Makefile
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -1,4 +1,5 @@
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o
+gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
+ gve_buffer_mgmt_dqo.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 301fa1ea4f51..dd92949bb214 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -13,6 +13,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
+#include <net/page_pool/helpers.h>
#include <net/xdp.h>
#include "gve_desc.h"
@@ -60,6 +61,8 @@
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
+#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
+
#define GVE_FLOW_RULES_CACHE_SIZE \
(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
@@ -102,6 +105,7 @@ struct gve_rx_slot_page_info {
struct page *page;
void *page_address;
u32 page_offset; /* offset to write to in page */
+ unsigned int buf_size;
int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
u16 pad; /* adjustment for rx padding */
u8 can_flip; /* tracks if the networking stack is using the page */
@@ -273,6 +277,8 @@ struct gve_rx_ring {
/* Address info of the buffers for header-split */
struct gve_header_buf hdr_bufs;
+
+ struct page_pool *page_pool;
} dqo;
};
@@ -1162,6 +1168,36 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
bool gve_header_split_supported(const struct gve_priv *priv);
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
+/* rx buffer handling */
+int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
+void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
+ bool free_page);
+struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
+bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+void gve_free_buf_state(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
+ struct gve_index_list *list);
+void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
+ struct gve_rx_buf_state_dqo *buf_state);
+struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
+void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+void gve_free_to_page_pool(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ bool allow_direct);
+int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
+void gve_reuse_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+void gve_free_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state);
+int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
+struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
+ struct gve_rx_ring *rx);
+
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index e44e8b139633..060e0e674938 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -1248,10 +1248,10 @@ gve_adminq_configure_flow_rule(struct gve_priv *priv,
sizeof(struct gve_adminq_configure_flow_rule),
flow_rule_cmd);
- if (err) {
+ if (err == -ETIME) {
dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset");
gve_reset(priv, true);
- } else {
+ } else if (!err) {
priv->flow_rules_cache.rules_cache_synced = false;
}
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
new file mode 100644
index 000000000000..403f0f335ba6
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2024 Google, Inc.
+ */
+
+#include "gve.h"
+#include "gve_utils.h"
+
+int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
+{
+ return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
+}
+
+struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+ s16 buffer_id;
+
+ buffer_id = rx->dqo.free_buf_states;
+ if (unlikely(buffer_id == -1))
+ return NULL;
+
+ buf_state = &rx->dqo.buf_states[buffer_id];
+
+ /* Remove buf_state from free list */
+ rx->dqo.free_buf_states = buf_state->next;
+
+ /* Point buf_state to itself to mark it as allocated */
+ buf_state->next = buffer_id;
+
+ return buf_state;
+}
+
+bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+ return buf_state->next == buffer_id;
+}
+
+void gve_free_buf_state(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+ buf_state->next = rx->dqo.free_buf_states;
+ rx->dqo.free_buf_states = buffer_id;
+}
+
+struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
+ struct gve_index_list *list)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+ s16 buffer_id;
+
+ buffer_id = list->head;
+ if (unlikely(buffer_id == -1))
+ return NULL;
+
+ buf_state = &rx->dqo.buf_states[buffer_id];
+
+ /* Remove buf_state from list */
+ list->head = buf_state->next;
+ if (buf_state->next == -1)
+ list->tail = -1;
+
+ /* Point buf_state to itself to mark it as allocated */
+ buf_state->next = buffer_id;
+
+ return buf_state;
+}
+
+void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ s16 buffer_id = buf_state - rx->dqo.buf_states;
+
+ buf_state->next = -1;
+
+ if (list->head == -1) {
+ list->head = buffer_id;
+ list->tail = buffer_id;
+ } else {
+ int tail = list->tail;
+
+ rx->dqo.buf_states[tail].next = buffer_id;
+ list->tail = buffer_id;
+ }
+}
+
+struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+ int i;
+
+ /* Recycled buf states are immediately usable. */
+ buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
+ if (likely(buf_state))
+ return buf_state;
+
+ if (unlikely(rx->dqo.used_buf_states.head == -1))
+ return NULL;
+
+ /* Used buf states are only usable when ref count reaches 0, which means
+ * no SKBs refer to them.
+ *
+ * Search a limited number before giving up.
+ */
+ for (i = 0; i < 5; i++) {
+ buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
+ if (gve_buf_ref_cnt(buf_state) == 0) {
+ rx->dqo.used_buf_states_cnt--;
+ return buf_state;
+ }
+
+ gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+ }
+
+ return NULL;
+}
+
+int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ struct gve_priv *priv = rx->gve;
+ u32 idx;
+
+ idx = rx->dqo.next_qpl_page_idx;
+ if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
+ net_err_ratelimited("%s: Out of QPL pages\n",
+ priv->dev->name);
+ return -ENOMEM;
+ }
+ buf_state->page_info.page = rx->dqo.qpl->pages[idx];
+ buf_state->addr = rx->dqo.qpl->page_buses[idx];
+ rx->dqo.next_qpl_page_idx++;
+ buf_state->page_info.page_offset = 0;
+ buf_state->page_info.page_address =
+ page_address(buf_state->page_info.page);
+ buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
+ buf_state->last_single_ref_offset = 0;
+
+ /* The page already has 1 ref. */
+ page_ref_add(buf_state->page_info.page, INT_MAX - 1);
+ buf_state->page_info.pagecnt_bias = INT_MAX;
+
+ return 0;
+}
+
+void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
+{
+ if (!buf_state->page_info.page)
+ return;
+
+ page_ref_sub(buf_state->page_info.page,
+ buf_state->page_info.pagecnt_bias - 1);
+ buf_state->page_info.page = NULL;
+}
+
+void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ const u16 data_buffer_size = priv->data_buffer_size_dqo;
+ int pagecount;
+
+ /* Can't reuse if we only fit one buffer per page */
+ if (data_buffer_size * 2 > PAGE_SIZE)
+ goto mark_used;
+
+ pagecount = gve_buf_ref_cnt(buf_state);
+
+ /* Record the offset when we have a single remaining reference.
+ *
+ * When this happens, we know all of the other offsets of the page are
+ * usable.
+ */
+ if (pagecount == 1) {
+ buf_state->last_single_ref_offset =
+ buf_state->page_info.page_offset;
+ }
+
+ /* Use the next buffer sized chunk in the page. */
+ buf_state->page_info.page_offset += data_buffer_size;
+ buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
+
+ /* If we wrap around to the same offset without ever dropping to 1
+ * reference, then we don't know if this offset was ever freed.
+ */
+ if (buf_state->page_info.page_offset ==
+ buf_state->last_single_ref_offset) {
+ goto mark_used;
+ }
+
+ gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+ return;
+
+mark_used:
+ gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+ rx->dqo.used_buf_states_cnt++;
+}
+
+void gve_free_to_page_pool(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ bool allow_direct)
+{
+ struct page *page = buf_state->page_info.page;
+
+ if (!page)
+ return;
+
+ page_pool_put_full_page(page->pp, page, allow_direct);
+ buf_state->page_info.page = NULL;
+}
+
+static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ struct gve_priv *priv = rx->gve;
+ struct page *page;
+
+ buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
+ page = page_pool_alloc(rx->dqo.page_pool,
+ &buf_state->page_info.page_offset,
+ &buf_state->page_info.buf_size, GFP_ATOMIC);
+
+ if (!page)
+ return -ENOMEM;
+
+ buf_state->page_info.page = page;
+ buf_state->page_info.page_address = page_address(page);
+ buf_state->addr = page_pool_get_dma_addr(page);
+
+ return 0;
+}
+
+struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
+ struct gve_rx_ring *rx)
+{
+ u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
+ struct page_pool_params pp = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
+ .dev = &priv->pdev->dev,
+ .netdev = priv->dev,
+ .napi = &priv->ntfy_blocks[ntfy_id].napi,
+ .max_len = PAGE_SIZE,
+ .dma_dir = DMA_FROM_DEVICE,
+ };
+
+ return page_pool_create(&pp);
+}
+
+void gve_free_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ if (rx->dqo.page_pool) {
+ gve_free_to_page_pool(rx, buf_state, true);
+ gve_free_buf_state(rx, buf_state);
+ } else {
+ gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
+ buf_state);
+ }
+}
+
+void gve_reuse_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ if (rx->dqo.page_pool) {
+ buf_state->page_info.page = NULL;
+ gve_free_buf_state(rx, buf_state);
+ } else {
+ gve_dec_pagecnt_bias(&buf_state->page_info);
+ gve_try_recycle_buf(rx->gve, rx, buf_state);
+ }
+}
+
+int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
+{
+ struct gve_rx_buf_state_dqo *buf_state;
+
+ if (rx->dqo.page_pool) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (WARN_ON_ONCE(!buf_state))
+ return -ENOMEM;
+
+ if (gve_alloc_from_page_pool(rx, buf_state))
+ goto free_buf_state;
+ } else {
+ buf_state = gve_get_recycled_buf_state(rx);
+ if (unlikely(!buf_state)) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (unlikely(!buf_state))
+ return -ENOMEM;
+
+ if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state)))
+ goto free_buf_state;
+ }
+ }
+ desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+ desc->buf_addr = cpu_to_le64(buf_state->addr +
+ buf_state->page_info.page_offset);
+
+ return 0;
+
+free_buf_state:
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 661566db68c8..e171ca248f9a 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1875,6 +1875,11 @@ static void gve_turndown(struct gve_priv *priv)
if (!gve_tx_was_added_to_block(priv, idx))
continue;
+
+ if (idx < priv->tx_cfg.num_queues)
+ netif_queue_set_napi(priv->dev, idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+
napi_disable(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
@@ -1883,6 +1888,9 @@ static void gve_turndown(struct gve_priv *priv)
if (!gve_rx_was_added_to_block(priv, idx))
continue;
+
+ netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
+ NULL);
napi_disable(&block->napi);
}
@@ -1909,6 +1917,12 @@ static void gve_turnup(struct gve_priv *priv)
continue;
napi_enable(&block->napi);
+
+ if (idx < priv->tx_cfg.num_queues)
+ netif_queue_set_napi(priv->dev, idx,
+ NETDEV_QUEUE_TYPE_TX,
+ &block->napi);
+
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
@@ -1931,6 +1945,9 @@ static void gve_turnup(struct gve_priv *priv)
continue;
napi_enable(&block->napi);
+ netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
+ &block->napi);
+
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
@@ -2544,6 +2561,54 @@ static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = {
.ndo_queue_stop = gve_rx_queue_stop,
};
+static void gve_get_rx_queue_stats(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rx_stats)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&rx->statss);
+ rx_stats->packets = rx->rpackets;
+ rx_stats->bytes = rx->rbytes;
+ rx_stats->alloc_fail = rx->rx_skb_alloc_fail +
+ rx->rx_buf_alloc_fail;
+ } while (u64_stats_fetch_retry(&rx->statss, start));
+}
+
+static void gve_get_tx_queue_stats(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *tx_stats)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx = &priv->tx[idx];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&tx->statss);
+ tx_stats->packets = tx->pkt_done;
+ tx_stats->bytes = tx->bytes_done;
+ } while (u64_stats_fetch_retry(&tx->statss, start));
+}
+
+static void gve_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+
+ tx->packets = 0;
+ tx->bytes = 0;
+}
+
+static const struct netdev_stat_ops gve_stat_ops = {
+ .get_queue_stats_rx = gve_get_rx_queue_stats,
+ .get_queue_stats_tx = gve_get_tx_queue_stats,
+ .get_base_stats = gve_get_base_stats,
+};
+
static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int max_tx_queues, max_rx_queues;
@@ -2599,6 +2664,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &gve_ethtool_ops;
dev->netdev_ops = &gve_netdev_ops;
dev->queue_mgmt_ops = &gve_queue_mgmt_ops;
+ dev->stat_ops = &gve_stat_ops;
/* Set default and supported features.
*
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 1154c1d8f66f..8ac0047f1ada 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -16,189 +16,6 @@
#include <net/ipv6.h>
#include <net/tcp.h>
-static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
-{
- return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
-}
-
-static void gve_free_page_dqo(struct gve_priv *priv,
- struct gve_rx_buf_state_dqo *bs,
- bool free_page)
-{
- page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
- if (free_page)
- gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
- DMA_FROM_DEVICE);
- bs->page_info.page = NULL;
-}
-
-static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
-{
- struct gve_rx_buf_state_dqo *buf_state;
- s16 buffer_id;
-
- buffer_id = rx->dqo.free_buf_states;
- if (unlikely(buffer_id == -1))
- return NULL;
-
- buf_state = &rx->dqo.buf_states[buffer_id];
-
- /* Remove buf_state from free list */
- rx->dqo.free_buf_states = buf_state->next;
-
- /* Point buf_state to itself to mark it as allocated */
- buf_state->next = buffer_id;
-
- return buf_state;
-}
-
-static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state)
-{
- s16 buffer_id = buf_state - rx->dqo.buf_states;
-
- return buf_state->next == buffer_id;
-}
-
-static void gve_free_buf_state(struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state)
-{
- s16 buffer_id = buf_state - rx->dqo.buf_states;
-
- buf_state->next = rx->dqo.free_buf_states;
- rx->dqo.free_buf_states = buffer_id;
-}
-
-static struct gve_rx_buf_state_dqo *
-gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list)
-{
- struct gve_rx_buf_state_dqo *buf_state;
- s16 buffer_id;
-
- buffer_id = list->head;
- if (unlikely(buffer_id == -1))
- return NULL;
-
- buf_state = &rx->dqo.buf_states[buffer_id];
-
- /* Remove buf_state from list */
- list->head = buf_state->next;
- if (buf_state->next == -1)
- list->tail = -1;
-
- /* Point buf_state to itself to mark it as allocated */
- buf_state->next = buffer_id;
-
- return buf_state;
-}
-
-static void gve_enqueue_buf_state(struct gve_rx_ring *rx,
- struct gve_index_list *list,
- struct gve_rx_buf_state_dqo *buf_state)
-{
- s16 buffer_id = buf_state - rx->dqo.buf_states;
-
- buf_state->next = -1;
-
- if (list->head == -1) {
- list->head = buffer_id;
- list->tail = buffer_id;
- } else {
- int tail = list->tail;
-
- rx->dqo.buf_states[tail].next = buffer_id;
- list->tail = buffer_id;
- }
-}
-
-static struct gve_rx_buf_state_dqo *
-gve_get_recycled_buf_state(struct gve_rx_ring *rx)
-{
- struct gve_rx_buf_state_dqo *buf_state;
- int i;
-
- /* Recycled buf states are immediately usable. */
- buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
- if (likely(buf_state))
- return buf_state;
-
- if (unlikely(rx->dqo.used_buf_states.head == -1))
- return NULL;
-
- /* Used buf states are only usable when ref count reaches 0, which means
- * no SKBs refer to them.
- *
- * Search a limited number before giving up.
- */
- for (i = 0; i < 5; i++) {
- buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
- if (gve_buf_ref_cnt(buf_state) == 0) {
- rx->dqo.used_buf_states_cnt--;
- return buf_state;
- }
-
- gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
- }
-
- /* For QPL, we cannot allocate any new buffers and must
- * wait for the existing ones to be available.
- */
- if (rx->dqo.qpl)
- return NULL;
-
- /* If there are no free buf states discard an entry from
- * `used_buf_states` so it can be used.
- */
- if (unlikely(rx->dqo.free_buf_states == -1)) {
- buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
- if (gve_buf_ref_cnt(buf_state) == 0)
- return buf_state;
-
- gve_free_page_dqo(rx->gve, buf_state, true);
- gve_free_buf_state(rx, buf_state);
- }
-
- return NULL;
-}
-
-static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state)
-{
- struct gve_priv *priv = rx->gve;
- u32 idx;
-
- if (!rx->dqo.qpl) {
- int err;
-
- err = gve_alloc_page(priv, &priv->pdev->dev,
- &buf_state->page_info.page,
- &buf_state->addr,
- DMA_FROM_DEVICE, GFP_ATOMIC);
- if (err)
- return err;
- } else {
- idx = rx->dqo.next_qpl_page_idx;
- if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
- net_err_ratelimited("%s: Out of QPL pages\n",
- priv->dev->name);
- return -ENOMEM;
- }
- buf_state->page_info.page = rx->dqo.qpl->pages[idx];
- buf_state->addr = rx->dqo.qpl->page_buses[idx];
- rx->dqo.next_qpl_page_idx++;
- }
- buf_state->page_info.page_offset = 0;
- buf_state->page_info.page_address =
- page_address(buf_state->page_info.page);
- buf_state->last_single_ref_offset = 0;
-
- /* The page already has 1 ref. */
- page_ref_add(buf_state->page_info.page, INT_MAX - 1);
- buf_state->page_info.pagecnt_bias = INT_MAX;
-
- return 0;
-}
-
static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
{
struct device *hdev = &priv->pdev->dev;
@@ -278,8 +95,10 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
for (i = 0; i < rx->dqo.num_buf_states; i++) {
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
- if (bs->page_info.page)
- gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
+ if (rx->dqo.page_pool)
+ gve_free_to_page_pool(rx, bs, false);
+ else
+ gve_free_qpl_page_dqo(bs);
}
}
@@ -321,9 +140,11 @@ void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
for (i = 0; i < rx->dqo.num_buf_states; i++) {
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
- /* Only free page for RDA. QPL pages are freed in gve_main. */
- if (bs->page_info.page)
- gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
+
+ if (rx->dqo.page_pool)
+ gve_free_to_page_pool(rx, bs, false);
+ else
+ gve_free_qpl_page_dqo(bs);
}
if (rx->dqo.qpl) {
@@ -350,6 +171,11 @@ void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
kvfree(rx->dqo.buf_states);
rx->dqo.buf_states = NULL;
+ if (rx->dqo.page_pool) {
+ page_pool_destroy(rx->dqo.page_pool);
+ rx->dqo.page_pool = NULL;
+ }
+
gve_rx_free_hdr_bufs(priv, rx);
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
@@ -382,6 +208,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
+ struct page_pool *pool;
int qpl_page_cnt;
size_t size;
u32 qpl_id;
@@ -395,8 +222,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->gve = priv;
rx->q_num = idx;
- rx->dqo.num_buf_states = cfg->raw_addressing ?
- min_t(s16, S16_MAX, buffer_queue_slots * 4) :
+ rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
@@ -424,7 +250,13 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->dqo.bufq.desc_ring)
goto err;
- if (!cfg->raw_addressing) {
+ if (cfg->raw_addressing) {
+ pool = gve_rx_create_page_pool(priv, rx);
+ if (IS_ERR(pool))
+ goto err;
+
+ rx->dqo.page_pool = pool;
+ } else {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
@@ -521,26 +353,14 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
num_avail_slots = min_t(u32, num_avail_slots, complq->num_free_slots);
while (num_posted < num_avail_slots) {
struct gve_rx_desc_dqo *desc = &bufq->desc_ring[bufq->tail];
- struct gve_rx_buf_state_dqo *buf_state;
-
- buf_state = gve_get_recycled_buf_state(rx);
- if (unlikely(!buf_state)) {
- buf_state = gve_alloc_buf_state(rx);
- if (unlikely(!buf_state))
- break;
-
- if (unlikely(gve_alloc_page_dqo(rx, buf_state))) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_buf_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- gve_free_buf_state(rx, buf_state);
- break;
- }
+
+ if (unlikely(gve_alloc_buffer(rx, desc))) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
+ break;
}
- desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
- desc->buf_addr = cpu_to_le64(buf_state->addr +
- buf_state->page_info.page_offset);
if (rx->dqo.hdr_bufs.data)
desc->header_buf_addr =
cpu_to_le64(rx->dqo.hdr_bufs.addr +
@@ -557,48 +377,6 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
rx->fill_cnt += num_posted;
}
-static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state)
-{
- const u16 data_buffer_size = priv->data_buffer_size_dqo;
- int pagecount;
-
- /* Can't reuse if we only fit one buffer per page */
- if (data_buffer_size * 2 > PAGE_SIZE)
- goto mark_used;
-
- pagecount = gve_buf_ref_cnt(buf_state);
-
- /* Record the offset when we have a single remaining reference.
- *
- * When this happens, we know all of the other offsets of the page are
- * usable.
- */
- if (pagecount == 1) {
- buf_state->last_single_ref_offset =
- buf_state->page_info.page_offset;
- }
-
- /* Use the next buffer sized chunk in the page. */
- buf_state->page_info.page_offset += data_buffer_size;
- buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
-
- /* If we wrap around to the same offset without ever dropping to 1
- * reference, then we don't know if this offset was ever freed.
- */
- if (buf_state->page_info.page_offset ==
- buf_state->last_single_ref_offset) {
- goto mark_used;
- }
-
- gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
- return;
-
-mark_used:
- gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
- rx->dqo.used_buf_states_cnt++;
-}
-
static void gve_rx_skb_csum(struct sk_buff *skb,
const struct gve_rx_compl_desc_dqo *desc,
struct gve_ptype ptype)
@@ -713,6 +491,9 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb)
return -1;
+ if (rx->dqo.page_pool)
+ skb_mark_for_recycle(skb);
+
if (rx->ctx.skb_tail == rx->ctx.skb_head)
skb_shinfo(rx->ctx.skb_head)->frag_list = skb;
else
@@ -723,7 +504,7 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (rx->ctx.skb_tail != rx->ctx.skb_head) {
rx->ctx.skb_head->len += buf_len;
rx->ctx.skb_head->data_len += buf_len;
- rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo;
+ rx->ctx.skb_head->truesize += buf_state->page_info.buf_size;
}
/* Trigger ondemand page allocation if we are running low on buffers */
@@ -733,13 +514,8 @@ static int gve_rx_append_frags(struct napi_struct *napi,
skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
buf_state->page_info.page,
buf_state->page_info.page_offset,
- buf_len, priv->data_buffer_size_dqo);
- gve_dec_pagecnt_bias(&buf_state->page_info);
-
- /* Advances buffer page-offset if page is partially used.
- * Marks buffer as used if page is full.
- */
- gve_try_recycle_buf(priv, rx, buf_state);
+ buf_len, buf_state->page_info.buf_size);
+ gve_reuse_buffer(rx, buf_state);
return 0;
}
@@ -773,8 +549,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
if (unlikely(compl_desc->rx_error)) {
- gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
- buf_state);
+ gve_free_buffer(rx, buf_state);
return -EINVAL;
}
@@ -798,6 +573,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (unlikely(!rx->ctx.skb_head))
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
+
+ if (rx->dqo.page_pool)
+ skb_mark_for_recycle(rx->ctx.skb_head);
} else {
unsplit = 1;
}
@@ -834,8 +612,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
rx->rx_copybreak_pkt++;
u64_stats_update_end(&rx->statss);
- gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
- buf_state);
+ gve_free_buffer(rx, buf_state);
return 0;
}
@@ -850,16 +627,17 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+ if (rx->dqo.page_pool)
+ skb_mark_for_recycle(rx->ctx.skb_head);
+
skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
buf_state->page_info.page_offset, buf_len,
- priv->data_buffer_size_dqo);
- gve_dec_pagecnt_bias(&buf_state->page_info);
-
- gve_try_recycle_buf(priv, rx, buf_state);
+ buf_state->page_info.buf_size);
+ gve_reuse_buffer(rx, buf_state);
return 0;
error:
- gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+ gve_free_buffer(rx, buf_state);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 2349750075a5..30fef100257e 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -111,6 +111,7 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
netif_napi_add(priv->dev, &block->napi, gve_poll);
+ netif_napi_set_irq(&block->napi, block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 3312e1d93c3b..65302c41bfb1 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -7,7 +7,6 @@ config NET_VENDOR_HISILICON
bool "Hisilicon devices"
default y
depends on OF || ACPI
- depends on ARM || ARM64 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,6 +17,8 @@ config NET_VENDOR_HISILICON
if NET_VENDOR_HISILICON
+if ARM || ARM64 || COMPILE_TEST
+
config HIX5HD2_GMAC
tristate "Hisilicon HIX5HD2 Family Network Device Support"
select PHYLIB
@@ -141,4 +142,19 @@ config HNS3_ENET
endif #HNS3
+endif # ARM || ARM64 || COMPILE_TEST
+
+config HIBMCGE
+ tristate "Hisilicon BMC Gigabit Ethernet Device Support"
+ depends on PCI && PCI_MSI
+ select PHYLIB
+ select MOTORCOMM_PHY
+ select REALTEK_PHY
+ help
+ If you wish to compile a kernel for a BMC with HIBMC-xx_gmac
+ then you should answer Y to this. This makes this driver suitable for use
+ on certain boards such as the HIBMC-210.
+
+ If you are unsure, say N.
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 7f76d412047a..0e2cadfea8ff 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
obj-$(CONFIG_HNS) += hns/
obj-$(CONFIG_HNS3) += hns3/
obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o
+obj-$(CONFIG_HIBMCGE) += hibmcge/
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
new file mode 100644
index 000000000000..ae58ac38c206
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON BMC GE network device drivers.
+#
+
+obj-$(CONFIG_HIBMCGE) += hibmcge.o
+
+hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
new file mode 100644
index 000000000000..96daf058d387
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_COMMON_H
+#define __HBG_COMMON_H
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include "hbg_reg.h"
+
+#define HBG_STATUS_DISABLE 0x0
+#define HBG_STATUS_ENABLE 0x1
+#define HBG_RX_SKIP1 0x00
+#define HBG_RX_SKIP2 0x01
+#define HBG_VECTOR_NUM 4
+#define HBG_PCU_CACHE_LINE_SIZE 32
+#define HBG_TX_TIMEOUT_BUF_LEN 1024
+#define HBG_RX_DESCR 0x01
+
+#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \
+ HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
+
+enum hbg_dir {
+ HBG_DIR_TX = 1 << 0,
+ HBG_DIR_RX = 1 << 1,
+ HBG_DIR_TX_RX = HBG_DIR_TX | HBG_DIR_RX,
+};
+
+enum hbg_tx_state {
+ HBG_TX_STATE_COMPLETE = 0, /* clear state, must fix to 0 */
+ HBG_TX_STATE_START,
+};
+
+enum hbg_nic_state {
+ HBG_NIC_STATE_EVENT_HANDLING = 0,
+};
+
+struct hbg_buffer {
+ u32 state;
+ dma_addr_t state_dma;
+
+ struct sk_buff *skb;
+ dma_addr_t skb_dma;
+ u32 skb_len;
+
+ enum hbg_dir dir;
+ struct hbg_ring *ring;
+ struct hbg_priv *priv;
+};
+
+struct hbg_ring {
+ struct hbg_buffer *queue;
+ dma_addr_t queue_dma;
+
+ union {
+ u32 head;
+ u32 ntc;
+ };
+ union {
+ u32 tail;
+ u32 ntu;
+ };
+ u32 len;
+
+ enum hbg_dir dir;
+ struct hbg_priv *priv;
+ struct napi_struct napi;
+ char *tout_log_buf; /* tx timeout log buffer */
+};
+
+enum hbg_hw_event_type {
+ HBG_HW_EVENT_NONE = 0,
+ HBG_HW_EVENT_INIT, /* driver is loading */
+ HBG_HW_EVENT_RESET,
+};
+
+struct hbg_dev_specs {
+ u32 mac_id;
+ struct sockaddr mac_addr;
+ u32 phy_addr;
+ u32 mdio_frequency;
+ u32 rx_fifo_num;
+ u32 tx_fifo_num;
+ u32 vlan_layers;
+ u32 max_mtu;
+ u32 min_mtu;
+
+ u32 max_frame_len;
+ u32 rx_buf_size;
+};
+
+struct hbg_irq_info {
+ const char *name;
+ u32 mask;
+ bool re_enable;
+ bool need_print;
+ u64 count;
+
+ void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info);
+};
+
+struct hbg_vector {
+ char name[HBG_VECTOR_NUM][32];
+ struct hbg_irq_info *info_array;
+ u32 info_array_len;
+};
+
+struct hbg_mac {
+ struct mii_bus *mdio_bus;
+ struct phy_device *phydev;
+ u8 phy_addr;
+
+ u32 speed;
+ u32 duplex;
+ u32 autoneg;
+ u32 link_status;
+};
+
+struct hbg_priv {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ u8 __iomem *io_base;
+ struct hbg_dev_specs dev_specs;
+ unsigned long state;
+ struct hbg_mac mac;
+ struct hbg_vector vectors;
+ struct hbg_ring tx_ring;
+ struct hbg_ring rx_ring;
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
new file mode 100644
index 000000000000..c3370114aef3
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include "hbg_ethtool.h"
+
+static const struct ethtool_ops hbg_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+void hbg_ethtool_set_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &hbg_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
new file mode 100644
index 000000000000..628707ec2686
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_ETHTOOL_H
+#define __HBG_ETHTOOL_H
+
+#include <linux/netdevice.h>
+
+void hbg_ethtool_set_ops(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
new file mode 100644
index 000000000000..05295c2ad439
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/iopoll.h>
+#include <linux/minmax.h>
+#include "hbg_common.h"
+#include "hbg_hw.h"
+#include "hbg_reg.h"
+
+#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
+#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
+/* little endian or big endian.
+ * ctrl means packet description, data means skb packet data
+ */
+#define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0
+#define HBG_PCU_FRAME_LEN_PLUS 4
+
+static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
+{
+ return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
+ !hbg_reg_read(priv, HBG_REG_EVENT_REQ_ADDR);
+}
+
+int hbg_hw_event_notify(struct hbg_priv *priv,
+ enum hbg_hw_event_type event_type)
+{
+ bool is_valid;
+ int ret;
+
+ if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state))
+ return -EBUSY;
+
+ /* notify */
+ hbg_reg_write(priv, HBG_REG_EVENT_REQ_ADDR, event_type);
+
+ ret = read_poll_timeout(hbg_hw_spec_is_valid, is_valid, is_valid,
+ HBG_HW_EVENT_WAIT_INTERVAL_US,
+ HBG_HW_EVENT_WAIT_TIMEOUT_US,
+ HBG_HW_EVENT_WAIT_INTERVAL_US, priv);
+
+ clear_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state);
+
+ if (ret)
+ dev_err(&priv->pdev->dev,
+ "event %d wait timeout\n", event_type);
+
+ return ret;
+}
+
+static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
+{
+ struct hbg_dev_specs *specs = &priv->dev_specs;
+ u64 mac_addr;
+
+ if (!hbg_hw_spec_is_valid(priv)) {
+ dev_err(&priv->pdev->dev, "dev_specs not init\n");
+ return -EINVAL;
+ }
+
+ specs->mac_id = hbg_reg_read(priv, HBG_REG_MAC_ID_ADDR);
+ specs->phy_addr = hbg_reg_read(priv, HBG_REG_PHY_ID_ADDR);
+ specs->mdio_frequency = hbg_reg_read(priv, HBG_REG_MDIO_FREQ_ADDR);
+ specs->max_mtu = hbg_reg_read(priv, HBG_REG_MAX_MTU_ADDR);
+ specs->min_mtu = hbg_reg_read(priv, HBG_REG_MIN_MTU_ADDR);
+ specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
+ specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
+ specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
+ mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
+ u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
+
+ if (!is_valid_ether_addr((u8 *)specs->mac_addr.sa_data))
+ return -EADDRNOTAVAIL;
+
+ specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu;
+ specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len;
+ return 0;
+}
+
+u32 hbg_hw_get_irq_status(struct hbg_priv *priv)
+{
+ u32 status;
+
+ status = hbg_reg_read(priv, HBG_REG_CF_INTRPT_STAT_ADDR);
+
+ hbg_field_modify(status, HBG_INT_MSK_TX_B,
+ hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_STAT_ADDR));
+ hbg_field_modify(status, HBG_INT_MSK_RX_B,
+ hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_STAT_ADDR));
+
+ return status;
+}
+
+void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask)
+{
+ if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
+ return hbg_reg_write(priv, HBG_REG_CF_IND_TXINT_CLR_ADDR, 0x1);
+
+ if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
+ return hbg_reg_write(priv, HBG_REG_CF_IND_RXINT_CLR_ADDR, 0x1);
+
+ return hbg_reg_write(priv, HBG_REG_CF_INTRPT_CLR_ADDR, mask);
+}
+
+bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask)
+{
+ if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
+ return hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_MSK_ADDR);
+
+ if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
+ return hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_MSK_ADDR);
+
+ return hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR) & mask;
+}
+
+void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
+{
+ u32 value;
+
+ if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
+ return hbg_reg_write(priv,
+ HBG_REG_CF_IND_TXINT_MSK_ADDR, enable);
+
+ if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
+ return hbg_reg_write(priv,
+ HBG_REG_CF_IND_RXINT_MSK_ADDR, enable);
+
+ value = hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR);
+ if (enable)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
+}
+
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr)
+{
+ hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr);
+}
+
+static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
+ u16 max_frame_len)
+{
+ max_frame_len = max_t(u32, max_frame_len, ETH_DATA_LEN);
+
+ /* lower two bits of value must be set to 0 */
+ max_frame_len = round_up(max_frame_len, HBG_PCU_FRAME_LEN_PLUS);
+
+ hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_LEN_ADDR,
+ HBG_REG_MAX_FRAME_LEN_M, max_frame_len);
+}
+
+static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
+ u16 max_frame_size)
+{
+ hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_SIZE_ADDR,
+ HBG_REG_MAX_FRAME_LEN_M, max_frame_size);
+}
+
+void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
+{
+ hbg_hw_set_pcu_max_frame_len(priv, mtu);
+ hbg_hw_set_mac_max_frame_len(priv, mtu);
+}
+
+void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
+{
+ hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
+ HBG_REG_PORT_ENABLE_TX_B, enable);
+ hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
+ HBG_REG_PORT_ENABLE_RX_B, enable);
+}
+
+u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir)
+{
+ if (dir & HBG_DIR_TX)
+ return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
+ HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M);
+
+ if (dir & HBG_DIR_RX)
+ return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
+ HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M);
+
+ return 0;
+}
+
+void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc)
+{
+ hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_0_ADDR, tx_desc->word0);
+ hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_1_ADDR, tx_desc->word1);
+ hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_2_ADDR, tx_desc->word2);
+ hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3);
+}
+
+void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
+{
+ hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr);
+}
+
+void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
+{
+ hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
+ HBG_REG_PORT_MODE_M, speed);
+ hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
+ HBG_REG_DUPLEX_B, duplex);
+}
+
+static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
+{
+ u32 ctrl = 0;
+
+ ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B, HBG_STATUS_ENABLE);
+ ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B, HBG_STATUS_ENABLE);
+ ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B, HBG_STATUS_ENABLE);
+
+ hbg_reg_write(priv, HBG_REG_TRANSMIT_CTRL_ADDR, ctrl);
+}
+
+static void hbg_hw_init_rx_ctrl(struct hbg_priv *priv)
+{
+ u32 ctrl = 0;
+
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B,
+ HBG_STATUS_ENABLE);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B, HBG_STATUS_DISABLE);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M, HBG_RX_SKIP1);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M,
+ HBG_RX_SKIP2);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M, NET_IP_ALIGN);
+ ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM, priv->dev_specs.mac_id);
+
+ hbg_reg_write(priv, HBG_REG_RX_CTRL_ADDR, ctrl);
+}
+
+static void hbg_hw_init_rx_control(struct hbg_priv *priv)
+{
+ hbg_hw_init_rx_ctrl(priv);
+
+ /* parse from L2 layer */
+ hbg_reg_write_field(priv, HBG_REG_RX_PKT_MODE_ADDR,
+ HBG_REG_RX_PKT_MODE_PARSE_MODE_M, 0x1);
+
+ hbg_reg_write_field(priv, HBG_REG_RECV_CTRL_ADDR,
+ HBG_REG_RECV_CTRL_STRIP_PAD_EN_B,
+ HBG_STATUS_ENABLE);
+ hbg_reg_write_field(priv, HBG_REG_RX_BUF_SIZE_ADDR,
+ HBG_REG_RX_BUF_SIZE_M, priv->dev_specs.rx_buf_size);
+ hbg_reg_write_field(priv, HBG_REG_CF_CRC_STRIP_ADDR,
+ HBG_REG_CF_CRC_STRIP_B, HBG_STATUS_DISABLE);
+}
+
+int hbg_hw_init(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_dev_specs_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_reg_write_field(priv, HBG_REG_BUS_CTRL_ADDR,
+ HBG_REG_BUS_CTRL_ENDIAN_M,
+ HBG_ENDIAN_CTRL_LE_DATA_BE);
+ hbg_reg_write_field(priv, HBG_REG_MODE_CHANGE_EN_ADDR,
+ HBG_REG_MODE_CHANGE_EN_B, HBG_STATUS_ENABLE);
+
+ hbg_hw_init_rx_control(priv);
+ hbg_hw_init_transmit_ctrl(priv);
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
new file mode 100644
index 000000000000..14fb39241c93
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_HW_H
+#define __HBG_HW_H
+
+#include <linux/bitfield.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+static inline u32 hbg_reg_read(struct hbg_priv *priv, u32 addr)
+{
+ return readl(priv->io_base + addr);
+}
+
+static inline void hbg_reg_write(struct hbg_priv *priv, u32 addr, u32 value)
+{
+ writel(value, priv->io_base + addr);
+}
+
+static inline u64 hbg_reg_read64(struct hbg_priv *priv, u32 addr)
+{
+ return lo_hi_readq(priv->io_base + addr);
+}
+
+static inline void hbg_reg_write64(struct hbg_priv *priv, u32 addr, u64 value)
+{
+ lo_hi_writeq(value, priv->io_base + addr);
+}
+
+#define hbg_reg_read_field(priv, addr, mask) \
+ FIELD_GET(mask, hbg_reg_read(priv, addr))
+
+#define hbg_field_modify(reg_value, mask, value) ({ \
+ (reg_value) &= ~(mask); \
+ (reg_value) |= FIELD_PREP(mask, value); })
+
+#define hbg_reg_write_field(priv, addr, mask, val) ({ \
+ typeof(priv) _priv = (priv); \
+ typeof(addr) _addr = (addr); \
+ u32 _value = hbg_reg_read(_priv, _addr); \
+ hbg_field_modify(_value, mask, val); \
+ hbg_reg_write(_priv, _addr, _value); })
+
+int hbg_hw_event_notify(struct hbg_priv *priv,
+ enum hbg_hw_event_type event_type);
+int hbg_hw_init(struct hbg_priv *priv);
+void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex);
+u32 hbg_hw_get_irq_status(struct hbg_priv *priv);
+void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask);
+bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask);
+void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable);
+void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu);
+void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable);
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr);
+u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir);
+void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc);
+void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
new file mode 100644
index 000000000000..25dd25f096fe
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/interrupt.h>
+#include "hbg_irq.h"
+#include "hbg_hw.h"
+
+static void hbg_irq_handle_err(struct hbg_priv *priv,
+ struct hbg_irq_info *irq_info)
+{
+ if (irq_info->need_print)
+ dev_err(&priv->pdev->dev,
+ "receive error interrupt: %s\n", irq_info->name);
+}
+
+static void hbg_irq_handle_tx(struct hbg_priv *priv,
+ struct hbg_irq_info *irq_info)
+{
+ napi_schedule(&priv->tx_ring.napi);
+}
+
+static void hbg_irq_handle_rx(struct hbg_priv *priv,
+ struct hbg_irq_info *irq_info)
+{
+ napi_schedule(&priv->rx_ring.napi);
+}
+
+#define HBG_TXRX_IRQ_I(name, handle) \
+ {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle}
+#define HBG_ERR_IRQ_I(name, need_print) \
+ {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err}
+
+static struct hbg_irq_info hbg_irqs[] = {
+ HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx),
+ HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx),
+ HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true),
+ HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true),
+ HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true),
+ HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true),
+ HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true),
+ HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true),
+ HBG_ERR_IRQ_I(TX_AHB_ERR, true),
+ HBG_ERR_IRQ_I(RX_BUF_AVL, false),
+ HBG_ERR_IRQ_I(REL_BUF_ERR, true),
+ HBG_ERR_IRQ_I(TXCFG_AVL, false),
+ HBG_ERR_IRQ_I(TX_DROP, false),
+ HBG_ERR_IRQ_I(RX_DROP, false),
+ HBG_ERR_IRQ_I(RX_AHB_ERR, true),
+ HBG_ERR_IRQ_I(MAC_FIFO_ERR, false),
+ HBG_ERR_IRQ_I(RBREQ_ERR, false),
+ HBG_ERR_IRQ_I(WE_ERR, false),
+};
+
+static irqreturn_t hbg_irq_handle(int irq_num, void *p)
+{
+ struct hbg_irq_info *info;
+ struct hbg_priv *priv = p;
+ u32 status;
+ u32 i;
+
+ status = hbg_hw_get_irq_status(priv);
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ if (status & info->mask) {
+ if (!hbg_hw_irq_is_enabled(priv, info->mask))
+ continue;
+
+ hbg_hw_irq_enable(priv, info->mask, false);
+ hbg_hw_irq_clear(priv, info->mask);
+
+ info->count++;
+ if (info->irq_handle)
+ info->irq_handle(priv, info);
+
+ if (info->re_enable)
+ hbg_hw_irq_enable(priv, info->mask, true);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const char *irq_names_map[HBG_VECTOR_NUM] = { "tx", "rx",
+ "err", "mdio" };
+
+int hbg_irq_init(struct hbg_priv *priv)
+{
+ struct hbg_vector *vectors = &priv->vectors;
+ struct device *dev = &priv->pdev->dev;
+ int ret, id;
+ u32 i;
+
+ /* used pcim_enable_device(), so the vectors become device managed */
+ ret = pci_alloc_irq_vectors(priv->pdev, HBG_VECTOR_NUM, HBG_VECTOR_NUM,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to allocate vectors\n");
+
+ if (ret != HBG_VECTOR_NUM)
+ return dev_err_probe(dev, -EINVAL,
+ "requested %u MSI, but allocated %d MSI\n",
+ HBG_VECTOR_NUM, ret);
+
+ /* mdio irq not requested, so the number of requested interrupts
+ * is HBG_VECTOR_NUM - 1.
+ */
+ for (i = 0; i < HBG_VECTOR_NUM - 1; i++) {
+ id = pci_irq_vector(priv->pdev, i);
+ if (id < 0)
+ return dev_err_probe(dev, id, "failed to get irq id\n");
+
+ snprintf(vectors->name[i], sizeof(vectors->name[i]), "%s-%s-%s",
+ dev_driver_string(dev), pci_name(priv->pdev),
+ irq_names_map[i]);
+
+ ret = devm_request_irq(dev, id, hbg_irq_handle, 0,
+ vectors->name[i], priv);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to request irq: %s\n",
+ irq_names_map[i]);
+ }
+
+ vectors->info_array = hbg_irqs;
+ vectors->info_array_len = ARRAY_SIZE(hbg_irqs);
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h
new file mode 100644
index 000000000000..5c5323cfc751
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_IRQ_H
+#define __HBG_IRQ_H
+
+#include "hbg_common.h"
+
+int hbg_irq_init(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
new file mode 100644
index 000000000000..75505fb5cc4a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include "hbg_common.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+#include "hbg_irq.h"
+#include "hbg_mdio.h"
+#include "hbg_txrx.h"
+
+static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu);
+
+static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
+{
+ struct hbg_irq_info *info;
+ u32 i;
+
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ hbg_hw_irq_enable(priv, info->mask, enabled);
+ }
+}
+
+static int hbg_net_open(struct net_device *netdev)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ ret = hbg_txrx_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_all_irq_enable(priv, true);
+ hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+ netif_start_queue(netdev);
+ hbg_phy_start(priv);
+
+ return 0;
+}
+
+/* This function only can be called after hbg_txrx_uninit() */
+static int hbg_hw_txrx_clear(struct hbg_priv *priv)
+{
+ int ret;
+
+ /* After ring buffers have been released,
+ * do a reset to release hw fifo rx ring buffer
+ */
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
+ if (ret)
+ return ret;
+
+ /* After reset, regs need to be reconfigured */
+ hbg_hw_init(priv);
+ hbg_hw_set_uc_addr(priv, ether_addr_to_u64(priv->netdev->dev_addr));
+ hbg_change_mtu(priv, priv->netdev->mtu);
+
+ return 0;
+}
+
+static int hbg_net_stop(struct net_device *netdev)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_phy_stop(priv);
+ netif_stop_queue(netdev);
+ hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
+ hbg_all_irq_enable(priv, false);
+ hbg_txrx_uninit(priv);
+ return hbg_hw_txrx_clear(priv);
+}
+
+static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ u8 *mac_addr;
+
+ mac_addr = ((struct sockaddr *)addr)->sa_data;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return -EADDRNOTAVAIL;
+
+ hbg_hw_set_uc_addr(priv, ether_addr_to_u64(mac_addr));
+ dev_addr_set(netdev, mac_addr);
+
+ return 0;
+}
+
+static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu)
+{
+ u32 frame_len;
+
+ frame_len = new_mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
+ ETH_HLEN + ETH_FCS_LEN;
+ hbg_hw_set_mtu(priv, frame_len);
+}
+
+static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ hbg_change_mtu(priv, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
+ dev_dbg(&priv->pdev->dev,
+ "change mtu from %u to %u\n", netdev->mtu, new_mtu);
+
+ return 0;
+}
+
+static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_ring *ring = &priv->tx_ring;
+ char *buf = ring->tout_log_buf;
+ u32 pos = 0;
+
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "ring used num: %u, fifo used num: %u\n",
+ hbg_get_queue_used_num(ring),
+ hbg_hw_get_fifo_used_num(priv, HBG_DIR_TX));
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "ntc: %u, ntu: %u, irq enabled: %u\n",
+ ring->ntc, ring->ntu,
+ hbg_hw_irq_is_enabled(priv, HBG_INT_MSK_TX_B));
+
+ netdev_info(netdev, "%s", buf);
+}
+
+static const struct net_device_ops hbg_netdev_ops = {
+ .ndo_open = hbg_net_open,
+ .ndo_stop = hbg_net_stop,
+ .ndo_start_xmit = hbg_net_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = hbg_net_set_mac_address,
+ .ndo_change_mtu = hbg_net_change_mtu,
+ .ndo_tx_timeout = hbg_net_tx_timeout,
+};
+
+static int hbg_init(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_INIT);
+ if (ret)
+ return ret;
+
+ ret = hbg_hw_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_irq_init(priv);
+ if (ret)
+ return ret;
+
+ return hbg_mdio_init(priv);
+}
+
+static int hbg_pci_init(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable PCI device\n");
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set PCI DMA mask\n");
+
+ ret = pcim_iomap_regions(pdev, BIT(0), dev_driver_string(dev));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to map PCI bar space\n");
+
+ priv->io_base = pcim_iomap_table(pdev)[0];
+ if (!priv->io_base)
+ return dev_err_probe(dev, -ENOMEM, "failed to get io base\n");
+
+ pci_set_master(pdev);
+ return 0;
+}
+
+static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct hbg_priv *priv;
+ int ret;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(struct hbg_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, dev);
+
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->pdev = pdev;
+
+ ret = hbg_pci_init(pdev);
+ if (ret)
+ return ret;
+
+ ret = hbg_init(priv);
+ if (ret)
+ return ret;
+
+ netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ netdev->max_mtu = priv->dev_specs.max_mtu;
+ netdev->min_mtu = priv->dev_specs.min_mtu;
+ netdev->netdev_ops = &hbg_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
+
+ hbg_change_mtu(priv, ETH_DATA_LEN);
+ hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
+ hbg_ethtool_set_ops(netdev);
+
+ ret = devm_register_netdev(dev, netdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register netdev\n");
+
+ netif_carrier_off(netdev);
+ return 0;
+}
+
+static const struct pci_device_id hbg_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, 0x3730), 0},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, hbg_pci_tbl);
+
+static struct pci_driver hbg_driver = {
+ .name = "hibmcge",
+ .id_table = hbg_pci_tbl,
+ .probe = hbg_probe,
+};
+module_pci_driver(hbg_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("hibmcge driver");
+MODULE_VERSION("1.0");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
new file mode 100644
index 000000000000..a3479fba8501
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/phy.h>
+#include "hbg_common.h"
+#include "hbg_hw.h"
+#include "hbg_mdio.h"
+#include "hbg_reg.h"
+
+#define HBG_MAC_GET_PRIV(mac) ((struct hbg_priv *)(mac)->mdio_bus->priv)
+#define HBG_MII_BUS_GET_MAC(bus) (&((struct hbg_priv *)(bus)->priv)->mac)
+
+#define HBG_MDIO_C22_MODE 0x1
+#define HBG_MDIO_C22_REG_WRITE 0x1
+#define HBG_MDIO_C22_REG_READ 0x2
+
+#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000)
+#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
+
+static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
+{
+ hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd);
+}
+
+static void hbg_mdio_get_command(struct hbg_mac *mac, u32 *cmd)
+{
+ *cmd = hbg_reg_read(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR);
+}
+
+static void hbg_mdio_set_wdata_reg(struct hbg_mac *mac, u16 wdata_value)
+{
+ hbg_reg_write_field(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_WDATA_ADDR,
+ HBG_REG_MDIO_WDATA_M, wdata_value);
+}
+
+static u32 hbg_mdio_get_rdata_reg(struct hbg_mac *mac)
+{
+ return hbg_reg_read_field(HBG_MAC_GET_PRIV(mac),
+ HBG_REG_MDIO_RDATA_ADDR,
+ HBG_REG_MDIO_WDATA_M);
+}
+
+static int hbg_mdio_wait_ready(struct hbg_mac *mac)
+{
+ struct hbg_priv *priv = HBG_MAC_GET_PRIV(mac);
+ u32 cmd = 0;
+ int ret;
+
+ ret = readl_poll_timeout(priv->io_base + HBG_REG_MDIO_COMMAND_ADDR, cmd,
+ !FIELD_GET(HBG_REG_MDIO_COMMAND_START_B, cmd),
+ HBG_MDIO_OP_INTERVAL_US,
+ HBG_MDIO_OP_TIMEOUT_US);
+
+ return ret ? -ETIMEDOUT : 0;
+}
+
+static int hbg_mdio_cmd_send(struct hbg_mac *mac, u32 prt_addr, u32 dev_addr,
+ u32 type, u32 op_code)
+{
+ u32 cmd = 0;
+
+ hbg_mdio_get_command(mac, &cmd);
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_ST_M, type);
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_OP_M, op_code);
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_PRTAD_M, prt_addr);
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_DEVAD_M, dev_addr);
+
+ /* if auto scan enabled, this value need fix to 0 */
+ hbg_field_modify(cmd, HBG_REG_MDIO_COMMAND_START_B, 0x1);
+
+ hbg_mdio_set_command(mac, cmd);
+
+ /* wait operation complete and check the result */
+ return hbg_mdio_wait_ready(mac);
+}
+
+static int hbg_mdio_read22(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct hbg_mac *mac = HBG_MII_BUS_GET_MAC(bus);
+ int ret;
+
+ ret = hbg_mdio_cmd_send(mac, phy_addr, regnum, HBG_MDIO_C22_MODE,
+ HBG_MDIO_C22_REG_READ);
+ if (ret)
+ return ret;
+
+ return hbg_mdio_get_rdata_reg(mac);
+}
+
+static int hbg_mdio_write22(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 val)
+{
+ struct hbg_mac *mac = HBG_MII_BUS_GET_MAC(bus);
+
+ hbg_mdio_set_wdata_reg(mac, val);
+ return hbg_mdio_cmd_send(mac, phy_addr, regnum, HBG_MDIO_C22_MODE,
+ HBG_MDIO_C22_REG_WRITE);
+}
+
+static void hbg_mdio_init_hw(struct hbg_priv *priv)
+{
+ u32 freq = priv->dev_specs.mdio_frequency;
+ struct hbg_mac *mac = &priv->mac;
+ u32 cmd = 0;
+
+ cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_ST_M, HBG_MDIO_C22_MODE);
+ cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_AUTO_SCAN_B, HBG_STATUS_DISABLE);
+
+ /* freq use two bits, which are stored in clk_sel and clk_sel_exp */
+ cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_CLK_SEL_B, freq & 0x1);
+ cmd |= FIELD_PREP(HBG_REG_MDIO_COMMAND_CLK_SEL_EXP_B,
+ (freq >> 1) & 0x1);
+
+ hbg_mdio_set_command(mac, cmd);
+}
+
+static void hbg_phy_adjust_link(struct net_device *netdev)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 speed;
+
+ if (phydev->link != priv->mac.link_status) {
+ if (phydev->link) {
+ switch (phydev->speed) {
+ case SPEED_10:
+ speed = HBG_PORT_MODE_SGMII_10M;
+ break;
+ case SPEED_100:
+ speed = HBG_PORT_MODE_SGMII_100M;
+ break;
+ case SPEED_1000:
+ speed = HBG_PORT_MODE_SGMII_1000M;
+ break;
+ default:
+ return;
+ }
+
+ priv->mac.speed = speed;
+ priv->mac.duplex = phydev->duplex;
+ priv->mac.autoneg = phydev->autoneg;
+ hbg_hw_adjust_link(priv, speed, phydev->duplex);
+ }
+
+ priv->mac.link_status = phydev->link;
+ phy_print_status(phydev);
+ }
+}
+
+static void hbg_phy_disconnect(void *data)
+{
+ phy_disconnect((struct phy_device *)data);
+}
+
+static int hbg_phy_connect(struct hbg_priv *priv)
+{
+ struct phy_device *phydev = priv->mac.phydev;
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ ret = phy_connect_direct(priv->netdev, phydev, hbg_phy_adjust_link,
+ PHY_INTERFACE_MODE_SGMII);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to connect phy\n");
+
+ ret = devm_add_action_or_reset(dev, hbg_phy_disconnect, phydev);
+ if (ret)
+ return ret;
+
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+void hbg_phy_start(struct hbg_priv *priv)
+{
+ phy_start(priv->mac.phydev);
+}
+
+void hbg_phy_stop(struct hbg_priv *priv)
+{
+ phy_stop(priv->mac.phydev);
+}
+
+int hbg_mdio_init(struct hbg_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct hbg_mac *mac = &priv->mac;
+ struct phy_device *phydev;
+ struct mii_bus *mdio_bus;
+ int ret;
+
+ mac->phy_addr = priv->dev_specs.phy_addr;
+ mdio_bus = devm_mdiobus_alloc(dev);
+ if (!mdio_bus)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to alloc MDIO bus\n");
+
+ mdio_bus->parent = dev;
+ mdio_bus->priv = priv;
+ mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ mdio_bus->name = "hibmcge mii bus";
+ mac->mdio_bus = mdio_bus;
+
+ mdio_bus->read = hbg_mdio_read22;
+ mdio_bus->write = hbg_mdio_write22;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii", dev_name(dev));
+
+ ret = devm_mdiobus_register(dev, mdio_bus);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register MDIO bus\n");
+
+ phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr);
+ if (!phydev)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get phy device\n");
+
+ mac->phydev = phydev;
+ hbg_mdio_init_hw(priv);
+ return hbg_phy_connect(priv);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
new file mode 100644
index 000000000000..febd02a309c7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_MDIO_H
+#define __HBG_MDIO_H
+
+#include "hbg_common.h"
+
+int hbg_mdio_init(struct hbg_priv *priv);
+void hbg_phy_start(struct hbg_priv *priv);
+void hbg_phy_stop(struct hbg_priv *priv);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
new file mode 100644
index 000000000000..57d81c6d7633
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_REG_H
+#define __HBG_REG_H
+
+/* DEV SPEC */
+#define HBG_REG_SPEC_VALID_ADDR 0x0000
+#define HBG_REG_EVENT_REQ_ADDR 0x0004
+#define HBG_REG_MAC_ID_ADDR 0x0008
+#define HBG_REG_PHY_ID_ADDR 0x000C
+#define HBG_REG_MAC_ADDR_ADDR 0x0010
+#define HBG_REG_MDIO_FREQ_ADDR 0x0024
+#define HBG_REG_MAX_MTU_ADDR 0x0028
+#define HBG_REG_MIN_MTU_ADDR 0x002C
+#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030
+#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034
+#define HBG_REG_VLAN_LAYERS_ADDR 0x0038
+
+/* MDIO */
+#define HBG_REG_MDIO_BASE 0x8000
+#define HBG_REG_MDIO_COMMAND_ADDR (HBG_REG_MDIO_BASE + 0x0000)
+#define HBG_REG_MDIO_COMMAND_CLK_SEL_EXP_B BIT(17)
+#define HBG_REG_MDIO_COMMAND_AUTO_SCAN_B BIT(16)
+#define HBG_REG_MDIO_COMMAND_CLK_SEL_B BIT(15)
+#define HBG_REG_MDIO_COMMAND_START_B BIT(14)
+#define HBG_REG_MDIO_COMMAND_ST_M GENMASK(13, 12)
+#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10)
+#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5)
+#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0)
+#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008)
+#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0)
+#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C)
+#define HBG_REG_MDIO_STA_ADDR (HBG_REG_MDIO_BASE + 0x0010)
+
+/* GMAC */
+#define HBG_REG_SGMII_BASE 0x10000
+#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008)
+#define HBG_REG_DUPLEX_B BIT(0)
+#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C)
+#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040)
+#define HBG_REG_PORT_MODE_M GENMASK(3, 0)
+#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044)
+#define HBG_REG_PORT_ENABLE_RX_B BIT(1)
+#define HBG_REG_PORT_ENABLE_TX_B BIT(2)
+#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
+#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
+#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
+#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
+#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
+#define HBG_REG_CF_CRC_STRIP_B BIT(0)
+#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4)
+#define HBG_REG_MODE_CHANGE_EN_B BIT(0)
+#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
+#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
+#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210)
+#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214)
+
+/* PCU */
+#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
+#define HBG_INT_MSK_WE_ERR_B BIT(31)
+#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
+#define HBG_INT_MSK_MAC_FIFO_ERR_B BIT(29)
+#define HBG_INT_MSK_RX_AHB_ERR_B BIT(28)
+#define HBG_INT_MSK_RX_DROP_B BIT(26)
+#define HBG_INT_MSK_TX_DROP_B BIT(25)
+#define HBG_INT_MSK_TXCFG_AVL_B BIT(24)
+#define HBG_INT_MSK_REL_BUF_ERR_B BIT(23)
+#define HBG_INT_MSK_RX_BUF_AVL_B BIT(22)
+#define HBG_INT_MSK_TX_AHB_ERR_B BIT(21)
+#define HBG_INT_MSK_SRAM_PARITY_ERR_B BIT(20)
+#define HBG_INT_MSK_MAC_APP_TX_FIFO_ERR_B BIT(19)
+#define HBG_INT_MSK_MAC_APP_RX_FIFO_ERR_B BIT(18)
+#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17)
+#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16)
+#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15)
+#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */
+#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
+#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
+#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438)
+#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
+#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
+#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
+#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
+#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
+#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
+#define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494)
+#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0)
+#define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4)
+#define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0)
+#define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8)
+#define HBG_REG_BUS_CTRL_ENDIAN_M GENMASK(2, 1)
+#define HBG_REG_RX_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04F0)
+#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M GENMASK(31, 28)
+#define HBG_REG_RX_CTRL_TIME_INF_EN_B BIT(23)
+#define HBG_REG_RX_CTRL_RX_ALIGN_NUM_M GENMASK(18, 17)
+#define HBG_REG_RX_CTRL_PORT_NUM GENMASK(16, 13)
+#define HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B BIT(12)
+#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
+#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
+#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694)
+#define HBG_REG_IND_INTR_MASK_B BIT(0)
+#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698)
+#define HBG_REG_CF_IND_TXINT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x069C)
+#define HBG_REG_CF_IND_RXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x06a0)
+#define HBG_REG_CF_IND_RXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x06a4)
+#define HBG_REG_CF_IND_RXINT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x06a8)
+
+enum hbg_port_mode {
+ /* 0x0 ~ 0x5 are reserved */
+ HBG_PORT_MODE_SGMII_10M = 0x6,
+ HBG_PORT_MODE_SGMII_100M = 0x7,
+ HBG_PORT_MODE_SGMII_1000M = 0x8,
+};
+
+struct hbg_tx_desc {
+ u32 word0;
+ u32 word1;
+ u32 word2; /* pkt_addr */
+ u32 word3; /* clear_addr */
+};
+
+#define HBG_TX_DESC_W0_IP_OFF_M GENMASK(30, 26)
+#define HBG_TX_DESC_W0_l3_CS_B BIT(2)
+#define HBG_TX_DESC_W0_WB_B BIT(1)
+#define HBG_TX_DESC_W0_l4_CS_B BIT(0)
+#define HBG_TX_DESC_W1_SEND_LEN_M GENMASK(19, 4)
+
+struct hbg_rx_desc {
+ u32 word0;
+ u32 word1; /* tag */
+ u32 word2;
+ u32 word3;
+ u32 word4;
+ u32 word5;
+};
+
+#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
new file mode 100644
index 000000000000..f4f256a0dfea
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <net/netdev_queues.h>
+#include "hbg_common.h"
+#include "hbg_irq.h"
+#include "hbg_reg.h"
+#include "hbg_txrx.h"
+
+#define netdev_get_tx_ring(netdev) \
+ (&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
+
+#define buffer_to_dma_dir(buffer) (((buffer)->dir == HBG_DIR_RX) ? \
+ DMA_FROM_DEVICE : DMA_TO_DEVICE)
+
+#define hbg_queue_used_num(head, tail, ring) ({ \
+ typeof(ring) _ring = (ring); \
+ ((tail) + _ring->len - (head)) % _ring->len; })
+#define hbg_queue_left_num(head, tail, ring) ({ \
+ typeof(ring) _r = (ring); \
+ _r->len - hbg_queue_used_num((head), (tail), _r) - 1; })
+#define hbg_queue_is_empty(head, tail, ring) \
+ (hbg_queue_used_num((head), (tail), (ring)) == 0)
+#define hbg_queue_is_full(head, tail, ring) \
+ (hbg_queue_left_num((head), (tail), (ring)) == 0)
+#define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len)
+#define hbg_queue_move_next(p, ring) ({ \
+ typeof(ring) _ring = (ring); \
+ _ring->p = hbg_queue_next_prt(_ring->p, _ring); })
+
+#define HBG_TX_STOP_THRS 2
+#define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS)
+
+static int hbg_dma_map(struct hbg_buffer *buffer)
+{
+ struct hbg_priv *priv = buffer->priv;
+
+ buffer->skb_dma = dma_map_single(&priv->pdev->dev,
+ buffer->skb->data, buffer->skb_len,
+ buffer_to_dma_dir(buffer));
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void hbg_dma_unmap(struct hbg_buffer *buffer)
+{
+ struct hbg_priv *priv = buffer->priv;
+
+ if (unlikely(!buffer->skb_dma))
+ return;
+
+ dma_unmap_single(&priv->pdev->dev, buffer->skb_dma, buffer->skb_len,
+ buffer_to_dma_dir(buffer));
+ buffer->skb_dma = 0;
+}
+
+static void hbg_init_tx_desc(struct hbg_buffer *buffer,
+ struct hbg_tx_desc *tx_desc)
+{
+ u32 ip_offset = buffer->skb->network_header - buffer->skb->mac_header;
+ u32 word0 = 0;
+
+ word0 |= FIELD_PREP(HBG_TX_DESC_W0_WB_B, HBG_STATUS_ENABLE);
+ word0 |= FIELD_PREP(HBG_TX_DESC_W0_IP_OFF_M, ip_offset);
+ if (likely(buffer->skb->ip_summed == CHECKSUM_PARTIAL)) {
+ word0 |= FIELD_PREP(HBG_TX_DESC_W0_l3_CS_B, HBG_STATUS_ENABLE);
+ word0 |= FIELD_PREP(HBG_TX_DESC_W0_l4_CS_B, HBG_STATUS_ENABLE);
+ }
+
+ tx_desc->word0 = word0;
+ tx_desc->word1 = FIELD_PREP(HBG_TX_DESC_W1_SEND_LEN_M,
+ buffer->skb->len);
+ tx_desc->word2 = buffer->skb_dma;
+ tx_desc->word3 = buffer->state_dma;
+}
+
+netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hbg_ring *ring = netdev_get_tx_ring(netdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hbg_napi_tx_recycle() called in tx interrupt handle process.
+ */
+ u32 ntc = smp_load_acquire(&ring->ntc);
+ struct hbg_buffer *buffer;
+ struct hbg_tx_desc tx_desc;
+ u32 ntu = ring->ntu;
+
+ if (unlikely(!skb->len ||
+ skb->len > hbg_spec_max_frame_len(priv, HBG_DIR_TX))) {
+ dev_kfree_skb_any(skb);
+ netdev->stats.tx_errors++;
+ return NETDEV_TX_OK;
+ }
+
+ if (!netif_subqueue_maybe_stop(netdev, 0,
+ hbg_queue_left_num(ntc, ntu, ring),
+ HBG_TX_STOP_THRS, HBG_TX_START_THRS))
+ return NETDEV_TX_BUSY;
+
+ buffer = &ring->queue[ntu];
+ buffer->skb = skb;
+ buffer->skb_len = skb->len;
+ if (unlikely(hbg_dma_map(buffer))) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ buffer->state = HBG_TX_STATE_START;
+ hbg_init_tx_desc(buffer, &tx_desc);
+ hbg_hw_set_tx_desc(priv, &tx_desc);
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * hbg_napi_tx_recycle() called in tx interrupt handle process.
+ */
+ smp_store_release(&ring->ntu, hbg_queue_next_prt(ntu, ring));
+ dev_sw_netstats_tx_add(netdev, 1, skb->len);
+ return NETDEV_TX_OK;
+}
+
+static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
+{
+ if (unlikely(!buffer->skb))
+ return;
+
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+}
+
+static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer)
+{
+ u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir);
+ struct hbg_priv *priv = buffer->priv;
+
+ buffer->skb = netdev_alloc_skb(priv->netdev, len);
+ if (unlikely(!buffer->skb))
+ return -ENOMEM;
+
+ buffer->skb_len = len;
+ memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE);
+ return 0;
+}
+
+static void hbg_buffer_free(struct hbg_buffer *buffer)
+{
+ hbg_dma_unmap(buffer);
+ hbg_buffer_free_skb(buffer);
+}
+
+static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
+{
+ struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hbg_net_start_xmit() called in xmit process.
+ */
+ u32 ntu = smp_load_acquire(&ring->ntu);
+ struct hbg_priv *priv = ring->priv;
+ struct hbg_buffer *buffer;
+ u32 ntc = ring->ntc;
+ int packet_done = 0;
+
+ /* We need do cleanup even if budget is 0.
+ * Per NAPI documentation budget is for Rx.
+ * So We hardcode the amount of work Tx NAPI does to 128.
+ */
+ budget = 128;
+ while (packet_done < budget) {
+ if (unlikely(hbg_queue_is_empty(ntc, ntu, ring)))
+ break;
+
+ /* make sure HW write desc complete */
+ dma_rmb();
+
+ buffer = &ring->queue[ntc];
+ if (buffer->state != HBG_TX_STATE_COMPLETE)
+ break;
+
+ hbg_buffer_free(buffer);
+ ntc = hbg_queue_next_prt(ntc, ring);
+ packet_done++;
+ }
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * hbg_net_start_xmit() called in xmit process.
+ */
+ smp_store_release(&ring->ntc, ntc);
+ netif_wake_queue(priv->netdev);
+
+ if (likely(packet_done < budget &&
+ napi_complete_done(napi, packet_done)))
+ hbg_hw_irq_enable(priv, HBG_INT_MSK_TX_B, true);
+
+ return packet_done;
+}
+
+static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
+{
+ struct hbg_ring *ring = &priv->rx_ring;
+ struct hbg_buffer *buffer;
+ int ret;
+
+ if (hbg_queue_is_full(ring->ntc, ring->ntu, ring))
+ return 0;
+
+ buffer = &ring->queue[ring->ntu];
+ ret = hbg_buffer_alloc_skb(buffer);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hbg_dma_map(buffer);
+ if (unlikely(ret)) {
+ hbg_buffer_free_skb(buffer);
+ return ret;
+ }
+
+ hbg_hw_fill_buffer(priv, buffer->skb_dma);
+ hbg_queue_move_next(ntu, ring);
+ return 0;
+}
+
+static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
+ struct hbg_buffer *buffer)
+{
+ struct hbg_rx_desc *rx_desc;
+
+ /* make sure HW write desc complete */
+ dma_rmb();
+
+ dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
+ buffer->skb_len, DMA_FROM_DEVICE);
+
+ rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+ return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
+}
+
+static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
+ struct hbg_priv *priv = ring->priv;
+ struct hbg_rx_desc *rx_desc;
+ struct hbg_buffer *buffer;
+ u32 packet_done = 0;
+ u32 pkt_len;
+
+ while (packet_done < budget) {
+ if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
+ break;
+
+ buffer = &ring->queue[ring->ntc];
+ if (unlikely(!buffer->skb))
+ goto next_buffer;
+
+ if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
+ break;
+ rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+ pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
+
+ hbg_dma_unmap(buffer);
+
+ skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
+ skb_put(buffer->skb, pkt_len);
+ buffer->skb->protocol = eth_type_trans(buffer->skb,
+ priv->netdev);
+
+ dev_sw_netstats_rx_add(priv->netdev, pkt_len);
+ napi_gro_receive(napi, buffer->skb);
+ buffer->skb = NULL;
+
+next_buffer:
+ hbg_rx_fill_one_buffer(priv);
+ hbg_queue_move_next(ntc, ring);
+ packet_done++;
+ }
+
+ if (likely(packet_done < budget &&
+ napi_complete_done(napi, packet_done)))
+ hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true);
+
+ return packet_done;
+}
+
+static void hbg_ring_uninit(struct hbg_ring *ring)
+{
+ struct hbg_buffer *buffer;
+ u32 i;
+
+ if (!ring->queue)
+ return;
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
+
+ for (i = 0; i < ring->len; i++) {
+ buffer = &ring->queue[i];
+ hbg_buffer_free(buffer);
+ buffer->ring = NULL;
+ buffer->priv = NULL;
+ }
+
+ dma_free_coherent(&ring->priv->pdev->dev,
+ ring->len * sizeof(*ring->queue),
+ ring->queue, ring->queue_dma);
+ ring->queue = NULL;
+ ring->queue_dma = 0;
+ ring->len = 0;
+ ring->priv = NULL;
+}
+
+static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
+ int (*napi_poll)(struct napi_struct *, int),
+ enum hbg_dir dir)
+{
+ struct hbg_buffer *buffer;
+ u32 i, len;
+
+ len = hbg_get_spec_fifo_max_num(priv, dir) + 1;
+ ring->queue = dma_alloc_coherent(&priv->pdev->dev,
+ len * sizeof(*ring->queue),
+ &ring->queue_dma, GFP_KERNEL);
+ if (!ring->queue)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ buffer = &ring->queue[i];
+ buffer->skb_len = 0;
+ buffer->dir = dir;
+ buffer->ring = ring;
+ buffer->priv = priv;
+ buffer->state_dma = ring->queue_dma + (i * sizeof(*buffer));
+ }
+
+ ring->dir = dir;
+ ring->priv = priv;
+ ring->ntc = 0;
+ ring->ntu = 0;
+ ring->len = len;
+
+ if (dir == HBG_DIR_TX)
+ netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
+ else
+ netif_napi_add(priv->netdev, &ring->napi, napi_poll);
+
+ napi_enable(&ring->napi);
+ return 0;
+}
+
+static int hbg_tx_ring_init(struct hbg_priv *priv)
+{
+ struct hbg_ring *tx_ring = &priv->tx_ring;
+
+ if (!tx_ring->tout_log_buf)
+ tx_ring->tout_log_buf = devm_kmalloc(&priv->pdev->dev,
+ HBG_TX_TIMEOUT_BUF_LEN,
+ GFP_KERNEL);
+
+ if (!tx_ring->tout_log_buf)
+ return -ENOMEM;
+
+ return hbg_ring_init(priv, tx_ring, hbg_napi_tx_recycle, HBG_DIR_TX);
+}
+
+static int hbg_rx_ring_init(struct hbg_priv *priv)
+{
+ int ret;
+ u32 i;
+
+ ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < priv->rx_ring.len - 1; i++) {
+ ret = hbg_rx_fill_one_buffer(priv);
+ if (ret) {
+ hbg_ring_uninit(&priv->rx_ring);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int hbg_txrx_init(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_tx_ring_init(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to init tx ring, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hbg_rx_ring_init(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to init rx ring, ret = %d\n", ret);
+ hbg_ring_uninit(&priv->tx_ring);
+ }
+
+ return ret;
+}
+
+void hbg_txrx_uninit(struct hbg_priv *priv)
+{
+ hbg_ring_uninit(&priv->tx_ring);
+ hbg_ring_uninit(&priv->rx_ring);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
new file mode 100644
index 000000000000..2883a5899ae2
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_TXRX_H
+#define __HBG_TXRX_H
+
+#include <linux/etherdevice.h>
+#include "hbg_hw.h"
+
+static inline u32 hbg_spec_max_frame_len(struct hbg_priv *priv,
+ enum hbg_dir dir)
+{
+ return (dir == HBG_DIR_TX) ? priv->dev_specs.max_frame_len :
+ priv->dev_specs.rx_buf_size;
+}
+
+static inline u32 hbg_get_spec_fifo_max_num(struct hbg_priv *priv,
+ enum hbg_dir dir)
+{
+ return (dir == HBG_DIR_TX) ? priv->dev_specs.tx_fifo_num :
+ priv->dev_specs.rx_fifo_num;
+}
+
+static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir)
+{
+ return hbg_hw_get_fifo_used_num(priv, dir) >=
+ hbg_get_spec_fifo_max_num(priv, dir);
+}
+
+static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring)
+{
+ return (ring->ntu + ring->len - ring->ntc) % ring->len;
+}
+
+netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+int hbg_txrx_init(struct hbg_priv *priv);
+void hbg_txrx_uninit(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index beb815e5289b..a376d4bdf281 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -1047,7 +1047,7 @@ MODULE_DEVICE_TABLE(of, hip04_mac_match);
static struct platform_driver hip04_mac_driver = {
.probe = hip04_mac_probe,
- .remove_new = hip04_remove,
+ .remove = hip04_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = hip04_mac_match,
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 2406263c9dd3..d244a40df430 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -959,7 +959,7 @@ static struct platform_driver hisi_femac_driver = {
.of_match_table = hisi_femac_match,
},
.probe = hisi_femac_drv_probe,
- .remove_new = hisi_femac_drv_remove,
+ .remove = hisi_femac_drv_remove,
#ifdef CONFIG_PM
.suspend = hisi_femac_drv_suspend,
.resume = hisi_femac_drv_resume,
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 1a972b093a42..e3e7f2270560 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1312,7 +1312,7 @@ static struct platform_driver hix5hd2_dev_driver = {
.of_match_table = hix5hd2_of_match,
},
.probe = hix5hd2_dev_probe,
- .remove_new = hix5hd2_dev_remove,
+ .remove = hix5hd2_dev_remove,
};
module_platform_driver(hix5hd2_dev_driver);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index d72657444ef3..2ae34d01fd36 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -512,7 +512,7 @@ struct hnae_ae_ops {
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae_handle *handle, u64 *data);
void (*get_strings)(struct hnae_handle *handle,
- u32 stringset, u8 *data);
+ u32 stringset, u8 **data);
int (*get_sset_count)(struct hnae_handle *handle, int stringset);
void (*update_led_status)(struct hnae_handle *handle);
int (*set_led_id)(struct hnae_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index bc3e406f0139..8ce910f8d0cc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -730,15 +730,14 @@ static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
}
-static void hns_ae_get_strings(struct hnae_handle *handle,
- u32 stringset, u8 *data)
+static void hns_ae_get_strings(struct hnae_handle *handle, u32 stringset,
+ u8 **data)
{
int port;
int idx;
struct hns_mac_cb *mac_cb;
struct hns_ppe_cb *ppe_cb;
struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
- u8 *p = data;
struct hnae_vf_cb *vf_cb;
assert(handle);
@@ -748,19 +747,14 @@ static void hns_ae_get_strings(struct hnae_handle *handle,
mac_cb = hns_get_mac_cb(handle);
ppe_cb = hns_get_ppe_cb(handle);
- for (idx = 0; idx < handle->q_num; idx++) {
- hns_rcb_get_strings(stringset, p, idx);
- p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
- }
-
- hns_ppe_get_strings(ppe_cb, stringset, p);
- p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
+ for (idx = 0; idx < handle->q_num; idx++)
+ hns_rcb_get_strings(stringset, data, idx);
- hns_mac_get_strings(mac_cb, stringset, p);
- p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
+ hns_ppe_get_strings(ppe_cb, stringset, data);
+ hns_mac_get_strings(mac_cb, stringset, data);
if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
+ hns_dsaf_get_strings(stringset, data, port, dsaf_dev);
}
static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index bdb7afaabdd0..400933ca1a29 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -669,16 +669,15 @@ static void hns_gmac_get_stats(void *mac_drv, u64 *data)
}
}
-static void hns_gmac_get_strings(u32 stringset, u8 *data)
+static void hns_gmac_get_strings(u32 stringset, u8 **data)
{
- u8 *buff = data;
u32 i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++)
- ethtool_puts(&buff, g_gmac_stats_string[i].desc);
+ ethtool_puts(data, g_gmac_stats_string[i].desc);
}
static int hns_gmac_get_sset_count(int stringset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 58baac7103b3..bc6b269be299 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1090,28 +1090,24 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
u32 port_id;
int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
struct hns_mac_cb *mac_cb;
- struct fwnode_handle *child;
- device_for_each_child_node(dsaf_dev->dev, child) {
+ device_for_each_child_node_scoped(dsaf_dev->dev, child) {
ret = fwnode_property_read_u32(child, "reg", &port_id);
if (ret) {
- fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"get reg fail, ret=%d!\n", ret);
return ret;
}
if (port_id >= max_port_num) {
- fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"reg(%u) out of range!\n", port_id);
return -EINVAL;
}
mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
GFP_KERNEL);
- if (!mac_cb) {
- fwnode_handle_put(child);
+ if (!mac_cb)
return -ENOMEM;
- }
+
mac_cb->fw_port = child;
mac_cb->mac_id = (u8)port_id;
dsaf_dev->mac_cb[port_id] = mac_cb;
@@ -1194,8 +1190,7 @@ void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data)
mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data);
}
-void hns_mac_get_strings(struct hns_mac_cb *mac_cb,
- int stringset, u8 *data)
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 **data)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index edf0bcf76ac9..630f01cf7a71 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -378,7 +378,7 @@ struct mac_driver {
void (*get_regs)(void *mac_drv, void *data);
int (*get_regs_count)(void);
/* get strings name for ethtool statistic */
- void (*get_strings)(u32 stringset, u8 *data);
+ void (*get_strings)(u32 stringset, u8 **data);
/* get the number of strings*/
int (*get_sset_count)(int stringset);
@@ -445,7 +445,7 @@ int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
enum hnae_loop loop, int en);
void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data);
-void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data);
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 **data);
int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset);
void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data);
int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 1b67da1f6fa8..851490346261 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2590,55 +2590,34 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
p[i] = 0xdddddddd;
}
-static char *hns_dsaf_get_node_stats_strings(char *data, int node,
- struct dsaf_device *dsaf_dev)
+static void hns_dsaf_get_node_stats_strings(u8 **data, int node,
+ struct dsaf_device *dsaf_dev)
{
- char *buff = data;
- int i;
bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ int i;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
- snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "innod%d_pad_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_manage_pkts", node);
+ ethtool_sprintf(data, "innod%d_rx_pkts", node);
+ ethtool_sprintf(data, "innod%d_rx_pkt_id", node);
+ ethtool_sprintf(data, "innod%d_rx_pause_frame", node);
+ ethtool_sprintf(data, "innod%d_release_buf_num", node);
+ ethtool_sprintf(data, "innod%d_sbm_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_crc_false_pkts", node);
+ ethtool_sprintf(data, "innod%d_bp_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_lookup_rslt_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_local_rslt_fail_pkts", node);
+ ethtool_sprintf(data, "innod%d_vlan_drop_pkts", node);
+ ethtool_sprintf(data, "innod%d_stp_drop_pkts", node);
if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
for (i = 0; i < DSAF_PRIO_NR; i++) {
- snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
- ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts",
- node, i);
- snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
- ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts",
- node, i);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "inod%d_pfc_prio%d_pkts", node,
+ i);
+ ethtool_sprintf(data, "onod%d_pfc_prio%d_pkts", node,
+ i);
}
- buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN;
}
- snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
- buff += ETH_GSTRING_LEN;
-
- return buff;
+ ethtool_sprintf(data, "onnod%d_tx_pkts", node);
}
static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
@@ -2720,21 +2699,20 @@ int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
*@port:port index
*@dsaf_dev: dsaf device
*/
-void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+void hns_dsaf_get_strings(int stringset, u8 **data, int port,
struct dsaf_device *dsaf_dev)
{
- char *buff = (char *)data;
int node = port;
if (stringset != ETH_SS_STATS)
return;
/* for ge/xge node info */
- buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+ hns_dsaf_get_node_stats_strings(data, node, dsaf_dev);
/* for ppe node info */
node = port + DSAF_PPE_INODE_BASE;
- (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+ hns_dsaf_get_node_stats_strings(data, node, dsaf_dev);
}
/**
@@ -3031,7 +3009,7 @@ MODULE_DEVICE_TABLE(of, g_dsaf_match);
static struct platform_driver g_dsaf_driver = {
.probe = hns_dsaf_probe,
- .remove_new = hns_dsaf_remove,
+ .remove = hns_dsaf_remove,
.driver = {
.name = DSAF_DRV_NAME,
.of_match_table = g_dsaf_match,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 5526a10caac5..0eb03dff1a8b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -442,7 +442,7 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset);
void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
-void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+void hns_dsaf_get_strings(int stringset, u8 **data, int port,
struct dsaf_device *dsaf_dev);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index a08d1f0a5a16..5013beb4d282 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -457,24 +457,23 @@ int hns_ppe_get_regs_count(void)
* @stringset: string set type
* @data: output string
*/
-void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 **data)
{
int index = ppe_cb->index;
- u8 *buff = data;
-
- ethtool_sprintf(&buff, "ppe%d_rx_sw_pkt", index);
- ethtool_sprintf(&buff, "ppe%d_rx_pkt_ok", index);
- ethtool_sprintf(&buff, "ppe%d_rx_drop_pkt_no_bd", index);
- ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_fail", index);
- ethtool_sprintf(&buff, "ppe%d_rx_alloc_buf_wait", index);
- ethtool_sprintf(&buff, "ppe%d_rx_pkt_drop_no_buf", index);
- ethtool_sprintf(&buff, "ppe%d_rx_pkt_err_fifo_full", index);
-
- ethtool_sprintf(&buff, "ppe%d_tx_bd", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt_ok", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_fifo_empty", index);
- ethtool_sprintf(&buff, "ppe%d_tx_pkt_err_csum_fail", index);
+
+ ethtool_sprintf(data, "ppe%d_rx_sw_pkt", index);
+ ethtool_sprintf(data, "ppe%d_rx_pkt_ok", index);
+ ethtool_sprintf(data, "ppe%d_rx_drop_pkt_no_bd", index);
+ ethtool_sprintf(data, "ppe%d_rx_alloc_buf_fail", index);
+ ethtool_sprintf(data, "ppe%d_rx_alloc_buf_wait", index);
+ ethtool_sprintf(data, "ppe%d_rx_pkt_drop_no_buf", index);
+ ethtool_sprintf(data, "ppe%d_rx_pkt_err_fifo_full", index);
+
+ ethtool_sprintf(data, "ppe%d_tx_bd", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt_ok", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt_err_fifo_empty", index);
+ ethtool_sprintf(data, "ppe%d_tx_pkt_err_csum_fail", index);
}
void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 7e00231c1acf..602c8e971fe4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -109,7 +109,7 @@ int hns_ppe_get_sset_count(int stringset);
int hns_ppe_get_regs_count(void);
void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data);
-void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data);
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 **data);
void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data);
void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value);
void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 93344563a259..46af467aa596 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -923,44 +923,42 @@ int hns_rcb_get_ring_regs_count(void)
*@data:strings name value
*@index:queue index
*/
-void hns_rcb_get_strings(int stringset, u8 *data, int index)
+void hns_rcb_get_strings(int stringset, u8 **data, int index)
{
- u8 *buff = data;
-
if (stringset != ETH_SS_STATS)
return;
- ethtool_sprintf(&buff, "tx_ring%d_rcb_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_ppe_tx_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_ppe_drop_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_fbd_num", index);
-
- ethtool_sprintf(&buff, "tx_ring%d_pkt_num", index);
- ethtool_sprintf(&buff, "tx_ring%d_bytes", index);
- ethtool_sprintf(&buff, "tx_ring%d_err_cnt", index);
- ethtool_sprintf(&buff, "tx_ring%d_io_err", index);
- ethtool_sprintf(&buff, "tx_ring%d_sw_err", index);
- ethtool_sprintf(&buff, "tx_ring%d_seg_pkt", index);
- ethtool_sprintf(&buff, "tx_ring%d_restart_queue", index);
- ethtool_sprintf(&buff, "tx_ring%d_tx_busy", index);
-
- ethtool_sprintf(&buff, "rx_ring%d_rcb_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_ppe_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_ppe_drop_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_fbd_num", index);
-
- ethtool_sprintf(&buff, "rx_ring%d_pkt_num", index);
- ethtool_sprintf(&buff, "rx_ring%d_bytes", index);
- ethtool_sprintf(&buff, "rx_ring%d_err_cnt", index);
- ethtool_sprintf(&buff, "rx_ring%d_io_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_sw_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_seg_pkt", index);
- ethtool_sprintf(&buff, "rx_ring%d_reuse_pg", index);
- ethtool_sprintf(&buff, "rx_ring%d_len_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_non_vld_desc_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_bd_num_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_l2_err", index);
- ethtool_sprintf(&buff, "rx_ring%d_l3l4csum_err", index);
+ ethtool_sprintf(data, "tx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_ppe_tx_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(data, "tx_ring%d_pkt_num", index);
+ ethtool_sprintf(data, "tx_ring%d_bytes", index);
+ ethtool_sprintf(data, "tx_ring%d_err_cnt", index);
+ ethtool_sprintf(data, "tx_ring%d_io_err", index);
+ ethtool_sprintf(data, "tx_ring%d_sw_err", index);
+ ethtool_sprintf(data, "tx_ring%d_seg_pkt", index);
+ ethtool_sprintf(data, "tx_ring%d_restart_queue", index);
+ ethtool_sprintf(data, "tx_ring%d_tx_busy", index);
+
+ ethtool_sprintf(data, "rx_ring%d_rcb_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_ppe_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_ppe_drop_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_fbd_num", index);
+
+ ethtool_sprintf(data, "rx_ring%d_pkt_num", index);
+ ethtool_sprintf(data, "rx_ring%d_bytes", index);
+ ethtool_sprintf(data, "rx_ring%d_err_cnt", index);
+ ethtool_sprintf(data, "rx_ring%d_io_err", index);
+ ethtool_sprintf(data, "rx_ring%d_sw_err", index);
+ ethtool_sprintf(data, "rx_ring%d_seg_pkt", index);
+ ethtool_sprintf(data, "rx_ring%d_reuse_pg", index);
+ ethtool_sprintf(data, "rx_ring%d_len_err", index);
+ ethtool_sprintf(data, "rx_ring%d_non_vld_desc_err", index);
+ ethtool_sprintf(data, "rx_ring%d_bd_num_err", index);
+ ethtool_sprintf(data, "rx_ring%d_l2_err", index);
+ ethtool_sprintf(data, "rx_ring%d_l3l4csum_err", index);
}
void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index c1e9b6997853..0f4cc184ef39 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -157,7 +157,7 @@ int hns_rcb_get_ring_regs_count(void);
void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
-void hns_rcb_get_strings(int stringset, u8 *data, int index);
+void hns_rcb_get_strings(int stringset, u8 **data, int index);
void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index c58833eb4830..dbc44c2c26c2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -743,16 +743,15 @@ static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
*@stringset: type of values in data
*@data:data for value of string name
*/
-static void hns_xgmac_get_strings(u32 stringset, u8 *data)
+static void hns_xgmac_get_strings(u32 stringset, u8 **data)
{
- u8 *buff = data;
u32 i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++)
- ethtool_puts(&buff, g_xgmac_stats_string[i].desc);
+ ethtool_puts(data, g_xgmac_stats_string[i].desc);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index fd32e15cadcb..42bb341fd80b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2439,7 +2439,7 @@ static struct platform_driver hns_nic_dev_driver = {
.acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
},
.probe = hns_nic_dev_probe,
- .remove_new = hns_nic_dev_remove,
+ .remove = hns_nic_dev_remove,
};
module_platform_driver(hns_nic_dev_driver);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index a5bb306b2cf1..6c458f037262 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -903,7 +903,6 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- u8 *buff = data;
if (!h->dev->ops->get_strings) {
netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
@@ -912,43 +911,43 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
if (stringset == ETH_SS_TEST) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
- ethtool_puts(&buff,
+ ethtool_puts(&data,
hns_nic_test_strs[MAC_INTERNALLOOP_MAC]);
- ethtool_puts(&buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
+ ethtool_puts(&data, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
if ((netdev->phydev) && (!netdev->phydev->is_c45))
- ethtool_puts(&buff,
+ ethtool_puts(&data,
hns_nic_test_strs[MAC_INTERNALLOOP_PHY]);
} else {
- ethtool_puts(&buff, "rx_packets");
- ethtool_puts(&buff, "tx_packets");
- ethtool_puts(&buff, "rx_bytes");
- ethtool_puts(&buff, "tx_bytes");
- ethtool_puts(&buff, "rx_errors");
- ethtool_puts(&buff, "tx_errors");
- ethtool_puts(&buff, "rx_dropped");
- ethtool_puts(&buff, "tx_dropped");
- ethtool_puts(&buff, "multicast");
- ethtool_puts(&buff, "collisions");
- ethtool_puts(&buff, "rx_over_errors");
- ethtool_puts(&buff, "rx_crc_errors");
- ethtool_puts(&buff, "rx_frame_errors");
- ethtool_puts(&buff, "rx_fifo_errors");
- ethtool_puts(&buff, "rx_missed_errors");
- ethtool_puts(&buff, "tx_aborted_errors");
- ethtool_puts(&buff, "tx_carrier_errors");
- ethtool_puts(&buff, "tx_fifo_errors");
- ethtool_puts(&buff, "tx_heartbeat_errors");
- ethtool_puts(&buff, "rx_length_errors");
- ethtool_puts(&buff, "tx_window_errors");
- ethtool_puts(&buff, "rx_compressed");
- ethtool_puts(&buff, "tx_compressed");
- ethtool_puts(&buff, "netdev_rx_dropped");
- ethtool_puts(&buff, "netdev_tx_dropped");
-
- ethtool_puts(&buff, "netdev_tx_timeout");
-
- h->dev->ops->get_strings(h, stringset, buff);
+ ethtool_puts(&data, "rx_packets");
+ ethtool_puts(&data, "tx_packets");
+ ethtool_puts(&data, "rx_bytes");
+ ethtool_puts(&data, "tx_bytes");
+ ethtool_puts(&data, "rx_errors");
+ ethtool_puts(&data, "tx_errors");
+ ethtool_puts(&data, "rx_dropped");
+ ethtool_puts(&data, "tx_dropped");
+ ethtool_puts(&data, "multicast");
+ ethtool_puts(&data, "collisions");
+ ethtool_puts(&data, "rx_over_errors");
+ ethtool_puts(&data, "rx_crc_errors");
+ ethtool_puts(&data, "rx_frame_errors");
+ ethtool_puts(&data, "rx_fifo_errors");
+ ethtool_puts(&data, "rx_missed_errors");
+ ethtool_puts(&data, "tx_aborted_errors");
+ ethtool_puts(&data, "tx_carrier_errors");
+ ethtool_puts(&data, "tx_fifo_errors");
+ ethtool_puts(&data, "tx_heartbeat_errors");
+ ethtool_puts(&data, "rx_length_errors");
+ ethtool_puts(&data, "tx_window_errors");
+ ethtool_puts(&data, "rx_compressed");
+ ethtool_puts(&data, "tx_compressed");
+ ethtool_puts(&data, "netdev_rx_dropped");
+ ethtool_puts(&data, "netdev_tx_dropped");
+
+ ethtool_puts(&data, "netdev_tx_timeout");
+
+ h->dev->ops->get_strings(h, stringset, &data);
}
}
@@ -970,7 +969,7 @@ static int hns_get_sset_count(struct net_device *netdev, int stringset)
return -EOPNOTSUPP;
}
if (stringset == ETH_SS_TEST) {
- u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN);
+ u32 cnt = ARRAY_SIZE(hns_nic_test_strs);
if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
cnt--;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 27dbe367f3d3..710a8f9f2248 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -677,7 +677,7 @@ struct hnae3_ae_ops {
void (*get_mac_stats)(struct hnae3_handle *handle,
struct hns3_mac_stats *mac_stats);
void (*get_strings)(struct hnae3_handle *handle,
- u32 stringset, u8 *data);
+ u32 stringset, u8 **data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
void (*get_regs)(struct hnae3_handle *handle, u32 *version,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index 2b31188ff555..f9a3d6fc4416 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -36,27 +36,22 @@ int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
}
EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_sset_count);
-u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+void hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 **data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "txq%u_pktnum_rcd", tqp->index);
}
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index);
- buff += ETH_GSTRING_LEN;
+ ethtool_sprintf(data, "rxq%u_pktnum_rcd", tqp->index);
}
-
- return buff;
}
EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_strings);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
index a46350162ee8..b9ff424c0bc2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
@@ -32,7 +32,7 @@ struct hclge_comm_tqp {
u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data);
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle);
-u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data);
+void hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 **data);
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle);
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4cbc4d069a1f..43377a7b2426 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -4448,7 +4448,7 @@ static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
rx_group->total_bytes, &sample);
- net_dim(&rx_group->dim, sample);
+ net_dim(&rx_group->dim, &sample);
}
static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
@@ -4461,7 +4461,7 @@ static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
tx_group->total_bytes, &sample);
- net_dim(&tx_group->dim, sample);
+ net_dim(&tx_group->dim, &sample);
}
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b1e988347347..b771a2daba43 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -509,54 +509,37 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
}
}
-static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
- u32 stat_count, u32 num_tqps, const char *prefix)
+static void hns3_update_strings(u8 **data, const struct hns3_stats *stats,
+ u32 stat_count, u32 num_tqps,
+ const char *prefix)
{
-#define MAX_PREFIX_SIZE (6 + 4)
- u32 size_left;
u32 i, j;
- u32 n1;
- for (i = 0; i < num_tqps; i++) {
- for (j = 0; j < stat_count; j++) {
- data[ETH_GSTRING_LEN - 1] = '\0';
-
- /* first, prepend the prefix string */
- n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%u_",
- prefix, i);
- size_left = (ETH_GSTRING_LEN - 1) - n1;
-
- /* now, concatenate the stats string to it */
- strncat(data, stats[j].stats_string, size_left);
- data += ETH_GSTRING_LEN;
- }
- }
-
- return data;
+ for (i = 0; i < num_tqps; i++)
+ for (j = 0; j < stat_count; j++)
+ ethtool_sprintf(data, "%s%u_%s", prefix, i,
+ stats[j].stats_string);
}
-static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
+static void hns3_get_strings_tqps(struct hnae3_handle *handle, u8 **data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
const char tx_prefix[] = "txq";
const char rx_prefix[] = "rxq";
/* get strings for Tx */
- data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
- kinfo->num_tqps, tx_prefix);
+ hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
+ kinfo->num_tqps, tx_prefix);
/* get strings for Rx */
- data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
- kinfo->num_tqps, rx_prefix);
-
- return data;
+ hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
+ kinfo->num_tqps, rx_prefix);
}
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
- char *buff = (char *)data;
int i;
if (!ops->get_strings)
@@ -564,18 +547,15 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
- buff = hns3_get_strings_tqps(h, buff);
- ops->get_strings(h, stringset, (u8 *)buff);
+ hns3_get_strings_tqps(h, &data);
+ ops->get_strings(h, stringset, &data);
break;
case ETH_SS_TEST:
- ops->get_strings(h, stringset, data);
+ ops->get_strings(h, stringset, &data);
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) {
- snprintf(buff, ETH_GSTRING_LEN, "%s",
- hns3_priv_flags[i].name);
- buff += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++)
+ ethtool_puts(&data, hns3_priv_flags[i].name);
break;
default:
break;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index bd86efd92a5a..05942fa78b11 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -594,25 +594,21 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
return buf;
}
-static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
- const struct hclge_comm_stats_str strs[],
- int size, u8 *data)
+static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
+ const struct hclge_comm_stats_str strs[],
+ int size, u8 **data)
{
- char *buff = (char *)data;
u32 i;
if (stringset != ETH_SS_STATS)
- return buff;
+ return;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
continue;
- snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
- buff = buff + ETH_GSTRING_LEN;
+ ethtool_puts(data, strs[i].desc);
}
-
- return (u8 *)buff;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -717,44 +713,38 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
}
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
- u8 *data)
+ u8 **data)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u8 *p = (char *)data;
+ const char *str;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
- size, p);
- p = hclge_comm_tqps_get_strings(handle, p);
+ hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
+ size, data);
+ hclge_comm_tqps_get_strings(handle, data);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_APP];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES];
+ ethtool_puts(data, str);
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
- memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = hns3_nic_test_strs[HNAE3_LOOP_PHY];
+ ethtool_puts(data, str);
}
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 094a7c7b5592..2f6ffb88e700 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -130,12 +130,10 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
}
static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
- u8 *data)
+ u8 **data)
{
- u8 *p = (char *)data;
-
if (strset == ETH_SS_STATS)
- p = hclge_comm_tqps_get_strings(handle, p);
+ hclge_comm_tqps_get_strings(handle, data);
}
static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 8a047145f0c5..a1aa6c1f966e 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -636,7 +636,7 @@ MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
static struct platform_driver hns_mdio_driver = {
.probe = hns_mdio_probe,
- .remove_new = hns_mdio_remove,
+ .remove = hns_mdio_remove,
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 813403c2628f..baa598988f47 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -168,7 +168,7 @@ static void sni_82596_driver_remove(struct platform_device *pdev)
static struct platform_driver sni_82596_driver = {
.probe = sni_82596_probe,
- .remove_new = sni_82596_driver_remove,
+ .remove = sni_82596_driver_remove,
.driver = {
.name = sni_82596_string,
},
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index c41c3f1cc506..9b006bc353a1 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -121,7 +121,7 @@ static struct platform_driver ehea_driver = {
.of_match_table = ehea_device_table,
},
.probe = ehea_probe_adapter,
- .remove_new = ehea_remove,
+ .remove = ehea_remove,
};
void ehea_dump(void *adr, int len, char *msg)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index dac570f3c110..25b8a3556004 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1727,6 +1727,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
/* NAPI poll context */
static int emac_poll_rx(void *param, int budget)
{
+ LIST_HEAD(rx_list);
struct emac_instance *dev = param;
int slot = dev->rx_slot, received = 0;
@@ -1783,8 +1784,7 @@ static int emac_poll_rx(void *param, int budget)
skb->protocol = eth_type_trans(skb, dev->ndev);
emac_rx_csum(dev, skb, ctrl);
- if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
- ++dev->estats.rx_dropped_stack;
+ list_add_tail(&skb->list, &rx_list);
next:
++dev->stats.rx_packets;
skip:
@@ -1828,6 +1828,8 @@ static int emac_poll_rx(void *param, int budget)
goto next;
}
+ netif_receive_skb_list(&rx_list);
+
if (received) {
DBG2(dev, "rx %d BDs" NL, received);
dev->rx_slot = slot;
@@ -2935,9 +2937,12 @@ static int emac_init_config(struct emac_instance *dev)
/* Read MAC-address */
err = of_get_ethdev_address(np, dev->ndev);
- if (err)
- return dev_err_probe(&dev->ofdev->dev, err,
- "Can't get valid [local-]mac-address from OF !\n");
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (err) {
+ dev_warn(&dev->ofdev->dev, "Can't get valid mac-address. Generating random.");
+ eth_hw_addr_random(dev->ndev);
+ }
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
@@ -3019,8 +3024,14 @@ static int emac_probe(struct platform_device *ofdev)
SET_NETDEV_DEV(ndev, &ofdev->dev);
/* Initialize some embedded data structures */
- mutex_init(&dev->mdio_lock);
- mutex_init(&dev->link_lock);
+ err = devm_mutex_init(&ofdev->dev, &dev->mdio_lock);
+ if (err)
+ goto err_gone;
+
+ err = devm_mutex_init(&ofdev->dev, &dev->link_lock);
+ if (err)
+ goto err_gone;
+
spin_lock_init(&dev->lock);
INIT_WORK(&dev->reset_work, emac_reset_work);
@@ -3029,15 +3040,8 @@ static int emac_probe(struct platform_device *ofdev)
if (err)
goto err_gone;
- /* Get interrupts. EMAC irq is mandatory */
- dev->emac_irq = irq_of_parse_and_map(np, 0);
- if (!dev->emac_irq) {
- printk(KERN_ERR "%pOF: Can't map main interrupt\n", np);
- err = -ENODEV;
- goto err_gone;
- }
-
/* Setup error IRQ handler */
+ dev->emac_irq = platform_get_irq(ofdev, 0);
err = devm_request_irq(&ofdev->dev, dev->emac_irq, emac_irq, 0, "EMAC",
dev);
if (err) {
@@ -3048,12 +3052,10 @@ static int emac_probe(struct platform_device *ofdev)
ndev->irq = dev->emac_irq;
- /* Map EMAC regs */
- // TODO : platform_get_resource() and devm_ioremap_resource()
- dev->emacp = devm_of_iomap(&ofdev->dev, np, 0, NULL);
- if (!dev->emacp) {
+ dev->emacp = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(dev->emacp)) {
dev_err(&ofdev->dev, "can't map device registers");
- err = -ENOMEM;
+ err = PTR_ERR(dev->emacp);
goto err_gone;
}
@@ -3241,7 +3243,7 @@ static struct platform_driver emac_driver = {
.of_match_table = emac_match,
},
.probe = emac_probe,
- .remove_new = emac_remove,
+ .remove = emac_remove,
};
static void __init emac_make_bootlist(void)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 99d5f83f7c60..7d70056e9008 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -524,7 +524,8 @@ static int mal_probe(struct platform_device *ofdev)
unsigned long irqflags;
irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
- mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
+ mal = devm_kzalloc(&ofdev->dev, sizeof(struct mal_instance),
+ GFP_KERNEL);
if (!mal)
return -ENOMEM;
@@ -539,8 +540,7 @@ static int mal_probe(struct platform_device *ofdev)
printk(KERN_ERR
"mal%d: can't find MAL num-tx-chans property!\n",
index);
- err = -ENODEV;
- goto fail;
+ return -ENODEV;
}
mal->num_tx_chans = prop[0];
@@ -549,8 +549,7 @@ static int mal_probe(struct platform_device *ofdev)
printk(KERN_ERR
"mal%d: can't find MAL num-rx-chans property!\n",
index);
- err = -ENODEV;
- goto fail;
+ return -ENODEV;
}
mal->num_rx_chans = prop[0];
@@ -558,15 +557,13 @@ static int mal_probe(struct platform_device *ofdev)
if (dcr_base == 0) {
printk(KERN_ERR
"mal%d: can't find DCR resource!\n", index);
- err = -ENODEV;
- goto fail;
+ return -ENODEV;
}
mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
if (!DCR_MAP_OK(mal->dcr_host)) {
printk(KERN_ERR
"mal%d: failed to map DCRs !\n", index);
- err = -ENODEV;
- goto fail;
+ return -ENODEV;
}
if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
@@ -582,25 +579,6 @@ static int mal_probe(struct platform_device *ofdev)
#endif
}
- mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
- mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
-
- if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
- mal->txde_irq = mal->rxde_irq = mal->serr_irq;
- } else {
- mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
- mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
- }
-
- if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
- !mal->txde_irq || !mal->rxde_irq) {
- printk(KERN_ERR
- "mal%d: failed to map interrupts !\n", index);
- err = -ENODEV;
- goto fail_unmap;
- }
-
INIT_LIST_HEAD(&mal->poll_list);
INIT_LIST_HEAD(&mal->list);
spin_lock_init(&mal->lock);
@@ -654,31 +632,43 @@ static int mal_probe(struct platform_device *ofdev)
sizeof(struct mal_descriptor) *
mal_rx_bd_offset(mal, i));
+ mal->txeob_irq = platform_get_irq(ofdev, 0);
+ mal->rxeob_irq = platform_get_irq(ofdev, 1);
+ mal->serr_irq = platform_get_irq(ofdev, 2);
+
if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
+ mal->txde_irq = mal->rxde_irq = mal->serr_irq;
irqflags = IRQF_SHARED;
hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
} else {
+ mal->txde_irq = platform_get_irq(ofdev, 3);
+ mal->rxde_irq = platform_get_irq(ofdev, 4);
irqflags = 0;
hdlr_serr = mal_serr;
hdlr_txde = mal_txde;
hdlr_rxde = mal_rxde;
}
- err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
+ err = devm_request_irq(&ofdev->dev, mal->serr_irq, hdlr_serr, irqflags,
+ "MAL SERR", mal);
if (err)
goto fail2;
- err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
+ err = devm_request_irq(&ofdev->dev, mal->txde_irq, hdlr_txde, irqflags,
+ "MAL TX DE", mal);
if (err)
- goto fail3;
- err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
+ goto fail2;
+ err = devm_request_irq(&ofdev->dev, mal->txeob_irq, mal_txeob, 0,
+ "MAL TX EOB", mal);
if (err)
- goto fail4;
- err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
+ goto fail2;
+ err = devm_request_irq(&ofdev->dev, mal->rxde_irq, hdlr_rxde, irqflags,
+ "MAL RX DE", mal);
if (err)
- goto fail5;
- err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
+ goto fail2;
+ err = devm_request_irq(&ofdev->dev, mal->rxeob_irq, mal_rxeob, 0,
+ "MAL RX EOB", mal);
if (err)
- goto fail6;
+ goto fail2;
/* Enable all MAL SERR interrupt sources */
set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
@@ -697,23 +687,12 @@ static int mal_probe(struct platform_device *ofdev)
return 0;
- fail6:
- free_irq(mal->rxde_irq, mal);
- fail5:
- free_irq(mal->txeob_irq, mal);
- fail4:
- free_irq(mal->txde_irq, mal);
- fail3:
- free_irq(mal->serr_irq, mal);
fail2:
dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
fail_dummy:
free_netdev(mal->dummy_dev);
fail_unmap:
dcr_unmap(mal->dcr_host, 0x100);
- fail:
- kfree(mal);
-
return err;
}
@@ -732,12 +711,6 @@ static void mal_remove(struct platform_device *ofdev)
"mal%d: commac list is not empty on remove!\n",
mal->index);
- free_irq(mal->serr_irq, mal);
- free_irq(mal->txde_irq, mal);
- free_irq(mal->txeob_irq, mal);
- free_irq(mal->rxde_irq, mal);
- free_irq(mal->rxeob_irq, mal);
-
mal_reset(mal);
free_netdev(mal->dummy_dev);
@@ -746,10 +719,9 @@ static void mal_remove(struct platform_device *ofdev)
dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
- (NUM_TX_BUFF * mal->num_tx_chans +
- NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
- mal->bd_dma);
- kfree(mal);
+ (NUM_TX_BUFF * mal->num_tx_chans +
+ NUM_RX_BUFF * mal->num_rx_chans),
+ mal->bd_virt, mal->bd_dma);
}
static const struct of_device_id mal_platform_match[] =
@@ -778,7 +750,7 @@ static struct platform_driver mal_of_driver = {
.of_match_table = mal_platform_match,
},
.probe = mal_probe,
- .remove_new = mal_remove,
+ .remove = mal_remove,
};
int __init mal_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index e1712fdc3c31..b544dd8633b7 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -216,31 +216,24 @@ void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
static int rgmii_probe(struct platform_device *ofdev)
{
- struct device_node *np = ofdev->dev.of_node;
struct rgmii_instance *dev;
- struct resource regs;
- int rc;
+ int err;
- rc = -ENOMEM;
- dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL);
- if (dev == NULL)
- goto err_gone;
+ dev = devm_kzalloc(&ofdev->dev, sizeof(struct rgmii_instance),
+ GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
- mutex_init(&dev->lock);
- dev->ofdev = ofdev;
+ err = devm_mutex_init(&ofdev->dev, &dev->lock);
+ if (err)
+ return err;
- rc = -ENXIO;
- if (of_address_to_resource(np, 0, &regs)) {
- printk(KERN_ERR "%pOF: Can't get registers address\n", np);
- goto err_free;
- }
+ dev->ofdev = ofdev;
- rc = -ENOMEM;
- dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start,
- sizeof(struct rgmii_regs));
- if (dev->base == NULL) {
- printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
- goto err_free;
+ dev->base = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(dev->base)) {
+ dev_err(&ofdev->dev, "can't map device registers");
+ return PTR_ERR(dev->base);
}
/* Check for RGMII flags */
@@ -266,21 +259,6 @@ static int rgmii_probe(struct platform_device *ofdev)
platform_set_drvdata(ofdev, dev);
return 0;
-
- err_free:
- kfree(dev);
- err_gone:
- return rc;
-}
-
-static void rgmii_remove(struct platform_device *ofdev)
-{
- struct rgmii_instance *dev = platform_get_drvdata(ofdev);
-
- WARN_ON(dev->users != 0);
-
- iounmap(dev->base);
- kfree(dev);
}
static const struct of_device_id rgmii_match[] =
@@ -300,7 +278,6 @@ static struct platform_driver rgmii_driver = {
.of_match_table = rgmii_match,
},
.probe = rgmii_probe,
- .remove_new = rgmii_remove,
};
int __init rgmii_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index fa3488258ca2..09f6373ed2f9 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -87,31 +87,24 @@ void *tah_dump_regs(struct platform_device *ofdev, void *buf)
static int tah_probe(struct platform_device *ofdev)
{
- struct device_node *np = ofdev->dev.of_node;
struct tah_instance *dev;
- struct resource regs;
- int rc;
+ int err;
- rc = -ENOMEM;
- dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL);
- if (dev == NULL)
- goto err_gone;
+ dev = devm_kzalloc(&ofdev->dev, sizeof(struct tah_instance),
+ GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
- mutex_init(&dev->lock);
- dev->ofdev = ofdev;
+ err = devm_mutex_init(&ofdev->dev, &dev->lock);
+ if (err)
+ return err;
- rc = -ENXIO;
- if (of_address_to_resource(np, 0, &regs)) {
- printk(KERN_ERR "%pOF: Can't get registers address\n", np);
- goto err_free;
- }
+ dev->ofdev = ofdev;
- rc = -ENOMEM;
- dev->base = (struct tah_regs __iomem *)ioremap(regs.start,
- sizeof(struct tah_regs));
- if (dev->base == NULL) {
- printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
- goto err_free;
+ dev->base = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(dev->base)) {
+ dev_err(&ofdev->dev, "can't map device registers");
+ return PTR_ERR(dev->base);
}
platform_set_drvdata(ofdev, dev);
@@ -123,21 +116,6 @@ static int tah_probe(struct platform_device *ofdev)
wmb();
return 0;
-
- err_free:
- kfree(dev);
- err_gone:
- return rc;
-}
-
-static void tah_remove(struct platform_device *ofdev)
-{
- struct tah_instance *dev = platform_get_drvdata(ofdev);
-
- WARN_ON(dev->users != 0);
-
- iounmap(dev->base);
- kfree(dev);
}
static const struct of_device_id tah_match[] =
@@ -158,7 +136,6 @@ static struct platform_driver tah_driver = {
.of_match_table = tah_match,
},
.probe = tah_probe,
- .remove_new = tah_remove,
};
int __init tah_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 26e86cdee2f6..69ca6065de1c 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -232,32 +232,25 @@ void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
static int zmii_probe(struct platform_device *ofdev)
{
- struct device_node *np = ofdev->dev.of_node;
struct zmii_instance *dev;
- struct resource regs;
- int rc;
+ int err;
- rc = -ENOMEM;
- dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL);
- if (dev == NULL)
- goto err_gone;
+ dev = devm_kzalloc(&ofdev->dev, sizeof(struct zmii_instance),
+ GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ err = devm_mutex_init(&ofdev->dev, &dev->lock);
+ if (err)
+ return err;
- mutex_init(&dev->lock);
dev->ofdev = ofdev;
dev->mode = PHY_INTERFACE_MODE_NA;
- rc = -ENXIO;
- if (of_address_to_resource(np, 0, &regs)) {
- printk(KERN_ERR "%pOF: Can't get registers address\n", np);
- goto err_free;
- }
-
- rc = -ENOMEM;
- dev->base = (struct zmii_regs __iomem *)ioremap(regs.start,
- sizeof(struct zmii_regs));
- if (dev->base == NULL) {
- printk(KERN_ERR "%pOF: Can't map device registers!\n", np);
- goto err_free;
+ dev->base = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(dev->base)) {
+ dev_err(&ofdev->dev, "can't map device registers");
+ return PTR_ERR(dev->base);
}
/* We may need FER value for autodetection later */
@@ -271,21 +264,6 @@ static int zmii_probe(struct platform_device *ofdev)
platform_set_drvdata(ofdev, dev);
return 0;
-
- err_free:
- kfree(dev);
- err_gone:
- return rc;
-}
-
-static void zmii_remove(struct platform_device *ofdev)
-{
- struct zmii_instance *dev = platform_get_drvdata(ofdev);
-
- WARN_ON(dev->users != 0);
-
- iounmap(dev->base);
- kfree(dev);
}
static const struct of_device_id zmii_match[] =
@@ -306,7 +284,6 @@ static struct platform_driver zmii_driver = {
.of_match_table = zmii_match,
},
.probe = zmii_probe,
- .remove_new = zmii_remove,
};
int __init zmii_init(void)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 97425c06e1ed..e95ae0d39948 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2310,7 +2310,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_buff = &tx_pool->tx_buff[index];
adapter->netdev->stats.tx_packets--;
adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
- adapter->tx_stats_buffers[queue_num].packets--;
+ adapter->tx_stats_buffers[queue_num].batched_packets--;
adapter->tx_stats_buffers[queue_num].bytes -=
tx_buff->skb->len;
dev_kfree_skb_any(tx_buff->skb);
@@ -2402,7 +2402,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_map_failed = 0;
union sub_crq indir_arr[16];
unsigned int tx_dropped = 0;
- unsigned int tx_packets = 0;
+ unsigned int tx_dpackets = 0;
+ unsigned int tx_bpackets = 0;
unsigned int tx_bytes = 0;
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
@@ -2575,6 +2576,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (lpar_rc != H_SUCCESS)
goto tx_err;
+ tx_dpackets++;
goto early_exit;
}
@@ -2603,6 +2605,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
goto tx_err;
}
+ tx_bpackets++;
+
early_exit:
if (atomic_add_return(num_entries, &tx_scrq->used)
>= adapter->req_tx_entries_per_subcrq) {
@@ -2610,7 +2614,6 @@ early_exit:
netif_stop_subqueue(netdev, queue_num);
}
- tx_packets++;
tx_bytes += skb->len;
txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
@@ -2640,10 +2643,11 @@ out:
rcu_read_unlock();
netdev->stats.tx_dropped += tx_dropped;
netdev->stats.tx_bytes += tx_bytes;
- netdev->stats.tx_packets += tx_packets;
+ netdev->stats.tx_packets += tx_bpackets + tx_dpackets;
adapter->tx_send_failed += tx_send_failed;
adapter->tx_map_failed += tx_map_failed;
- adapter->tx_stats_buffers[queue_num].packets += tx_packets;
+ adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
+ adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
@@ -3804,29 +3808,20 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
+ ethtool_puts(&data, ibmvnic_stats[i].name);
for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "tx%d_batched_packets", i);
+ ethtool_sprintf(&data, "tx%d_direct_packets", i);
+ ethtool_sprintf(&data, "tx%d_bytes", i);
+ ethtool_sprintf(&data, "tx%d_dropped_packets", i);
}
for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
-
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "rx%d_packets", i);
+ ethtool_sprintf(&data, "rx%d_bytes", i);
+ ethtool_sprintf(&data, "rx%d_interrupts", i);
}
}
@@ -3873,7 +3868,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
(adapter, ibmvnic_stats[i].offset));
for (j = 0; j < adapter->req_tx_queues; j++) {
- data[i] = adapter->tx_stats_buffers[j].packets;
+ data[i] = adapter->tx_stats_buffers[j].batched_packets;
+ i++;
+ data[i] = adapter->tx_stats_buffers[j].direct_packets;
i++;
data[i] = adapter->tx_stats_buffers[j].bytes;
i++;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 94ac36b1408b..a189038d88df 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -213,7 +213,8 @@ struct ibmvnic_statistics {
#define NUM_TX_STATS 3
struct ibmvnic_tx_queue_stats {
- u64 packets;
+ u64 batched_packets;
+ u64 direct_packets;
u64 bytes;
u64 dropped_packets;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0375c7448a57..20bc40eec487 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -258,6 +258,7 @@ config I40E_DCB
config IAVF
tristate
select LIBIE
+ select NET_SHAPER
config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index ab7ae418d294..3f089c3d47b2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -513,6 +513,8 @@ void e1000_down(struct e1000_adapter *adapter)
*/
netif_carrier_off(netdev);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
@@ -1392,7 +1394,10 @@ int e1000_open(struct net_device *netdev)
/* From here on the code is the same as e1000_up() */
clear_bit(__E1000_DOWN, &adapter->flags);
+ netif_napi_set_irq(&adapter->napi, adapter->pdev->irq);
napi_enable(&adapter->napi);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi);
e1000_irq_enable(adapter);
@@ -3504,7 +3509,9 @@ static void e1000_reset_task(struct work_struct *work)
container_of(work, struct e1000_adapter, reset_task);
e_err(drv, "Reset adapter\n");
+ rtnl_lock();
e1000_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -5069,7 +5076,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
usleep_range(10000, 20000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ rtnl_lock();
e1000_down(adapter);
+ rtnl_unlock();
}
status = er32(STATUS);
@@ -5230,16 +5239,20 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
e1000_down(adapter);
if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 07e903346358..286155efcedf 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2928,11 +2928,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
- writel(0, tx_ring->head);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring, 0);
- else
- writel(0, tx_ring->tail);
/* Set the Tx Interrupt Delay register */
ew32(TIDV, adapter->tx_int_delay);
@@ -3253,11 +3250,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
- writel(0, rx_ring->head);
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, 0);
- else
- writel(0, rx_ring->tail);
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
@@ -4613,6 +4607,7 @@ int e1000e_open(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
int err;
+ int irq;
/* disallow open during test */
if (test_bit(__E1000_TESTING, &adapter->state))
@@ -4676,7 +4671,15 @@ int e1000e_open(struct net_device *netdev)
/* From here on the code is the same as e1000e_up() */
clear_bit(__E1000_DOWN, &adapter->state);
+ if (adapter->int_mode == E1000E_INT_MODE_MSIX)
+ irq = adapter->msix_entries[0].vector;
+ else
+ irq = adapter->pdev->irq;
+
+ netif_napi_set_irq(&adapter->napi, irq);
napi_enable(&adapter->napi);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi);
e1000_irq_enable(adapter);
@@ -4735,6 +4738,8 @@ int e1000e_close(struct net_device *netdev)
netdev_info(netdev, "NIC Link is Down\n");
}
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(&adapter->napi);
e1000e_free_tx_resources(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index f2506511bbff..bce5b76f1e7a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5299,7 +5299,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
}
flags_complete:
- bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS);
+ bitmap_xor(changed_flags, new_flags, orig_flags, I40E_PF_FLAGS_NBITS);
if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags))
reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 55fb362eb508..ab5febf83ec3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -13095,12 +13095,13 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @notified: whether notification was emitted
* @extack: netlink extended ack, unused currently
*/
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 48cd1d06761c..532a0a595fe8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -34,6 +34,7 @@
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_skbedit.h>
+#include <net/net_shaper.h>
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
@@ -250,6 +251,9 @@ struct iavf_cloud_filter {
#define IAVF_RESET_WAIT_DETECTED_COUNT 500
#define IAVF_RESET_WAIT_COMPLETE_COUNT 2000
+#define IAVF_MAX_QOS_TC_NUM 8
+#define IAVF_DEFAULT_QUANTA_SIZE 1024
+
/* board specific private data structure */
struct iavf_adapter {
struct workqueue_struct *wq;
@@ -336,6 +340,9 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION BIT_ULL(36)
#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37)
#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38)
+#define IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW BIT_ULL(39)
+#define IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE BIT_ULL(40)
+#define IAVF_FLAG_AQ_GET_QOS_CAPS BIT_ULL(41)
/* flags for processing extended capability messages during
* __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
@@ -408,6 +415,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_FDIR_PF)
#define ADV_RSS_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
+#define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_QOS)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
@@ -416,6 +425,7 @@ struct iavf_adapter {
struct virtchnl_vlan_caps vlan_v2_caps;
u16 msg_enable;
struct iavf_eth_stats current_stats;
+ struct virtchnl_qos_cap_list *qos_caps;
struct iavf_vsi vsi;
u32 aq_wait_count;
/* RSS stuff */
@@ -529,22 +539,16 @@ static inline void iavf_change_state(struct iavf_adapter *adapter,
iavf_state_str(adapter->state));
}
-int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags);
void iavf_schedule_finish_config(struct iavf_adapter *adapter);
-void iavf_reset(struct iavf_adapter *adapter);
void iavf_set_ethtool_ops(struct net_device *netdev);
-void iavf_update_stats(struct iavf_adapter *adapter);
void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
-void iavf_napi_add_all(struct iavf_adapter *adapter);
-void iavf_napi_del_all(struct iavf_adapter *adapter);
-
int iavf_send_api_ver(struct iavf_adapter *adapter);
int iavf_verify_api_ver(struct iavf_adapter *adapter);
int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
@@ -555,11 +559,9 @@ void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
-void iavf_deconfigure_queues(struct iavf_adapter *adapter);
void iavf_enable_queues(struct iavf_adapter *adapter);
void iavf_disable_queues(struct iavf_adapter *adapter);
void iavf_map_queues(struct iavf_adapter *adapter);
-int iavf_request_queues(struct iavf_adapter *adapter, int num);
void iavf_add_ether_addrs(struct iavf_adapter *adapter);
void iavf_del_ether_addrs(struct iavf_adapter *adapter);
void iavf_add_vlans(struct iavf_adapter *adapter);
@@ -579,8 +581,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
enum virtchnl_ops v_opcode,
enum iavf_status v_retval, u8 *msg, u16 msglen);
int iavf_config_rss(struct iavf_adapter *adapter);
-int iavf_lan_add_device(struct iavf_adapter *adapter);
-int iavf_lan_del_device(struct iavf_adapter *adapter);
+void iavf_cfg_queues_bw(struct iavf_adapter *adapter);
+void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter);
+void iavf_get_qos_caps(struct iavf_adapter *adapter);
void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index f782402cd789..12ef160425aa 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1972,8 +1972,11 @@ static void iavf_finish_config(struct work_struct *work)
adapter = container_of(work, struct iavf_adapter, finish_config);
- /* Always take RTNL first to prevent circular lock dependency */
+ /* Always take RTNL first to prevent circular lock dependency;
+ * The dev->lock is needed to update the queue number
+ */
rtnl_lock();
+ mutex_lock(&adapter->netdev->lock);
mutex_lock(&adapter->crit_lock);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
@@ -2017,6 +2020,7 @@ static void iavf_finish_config(struct work_struct *work)
out:
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&adapter->netdev->lock);
rtnl_unlock();
}
@@ -2085,6 +2089,21 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW) {
+ iavf_cfg_queues_bw(adapter);
+ return 0;
+ }
+
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_QOS_CAPS) {
+ iavf_get_qos_caps(adapter);
+ return 0;
+ }
+
+ if (adapter->aq_required & IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE) {
+ iavf_cfg_queues_quanta_size(adapter);
+ return 0;
+ }
+
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
iavf_configure_queues(adapter);
return 0;
@@ -2670,6 +2689,9 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
/* request initial VLAN offload settings */
iavf_set_vlan_offload_features(adapter, 0, netdev->features);
+ if (QOS_ALLOWED(adapter))
+ adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS;
+
iavf_schedule_finish_config(adapter);
return;
@@ -2919,6 +2941,30 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
}
/**
+ * iavf_reconfig_qs_bw - Call-back task to handle hardware reset
+ * @adapter: board private structure
+ *
+ * After a reset, the shaper parameters of queues need to be replayed again.
+ * Since the net_shaper object inside TX rings persists across reset,
+ * set the update flag for all queues so that the virtchnl message is triggered
+ * for all queues.
+ **/
+static void iavf_reconfig_qs_bw(struct iavf_adapter *adapter)
+{
+ int i, num = 0;
+
+ for (i = 0; i < adapter->num_active_queues; i++)
+ if (adapter->tx_rings[i].q_shaper.bw_min ||
+ adapter->tx_rings[i].q_shaper.bw_max) {
+ adapter->tx_rings[i].q_shaper_update = true;
+ num++;
+ }
+
+ if (num)
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+}
+
+/**
* iavf_reset_task - Call-back task to handle hardware reset
* @work: pointer to work_struct
*
@@ -2944,10 +2990,12 @@ static void iavf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
+ mutex_lock(&netdev->lock);
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state != __IAVF_REMOVE)
queue_work(adapter->wq, &adapter->reset_task);
+ mutex_unlock(&netdev->lock);
return;
}
@@ -2995,6 +3043,7 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -3124,6 +3173,8 @@ continue_reset:
iavf_up_complete(adapter);
iavf_irq_enable(adapter, true);
+
+ iavf_reconfig_qs_bw(adapter);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
@@ -3133,6 +3184,7 @@ continue_reset:
wake_up(&adapter->reset_waitqueue);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return;
reset_err:
@@ -3143,6 +3195,7 @@ reset_err:
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3614,8 +3667,10 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
+ mutex_lock(&netdev->lock);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
+ mutex_unlock(&netdev->lock);
return ret;
}
@@ -4893,6 +4948,98 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
return iavf_fix_strip_features(adapter, features);
}
+static int
+iavf_verify_shaper(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(binding->netdev);
+ u64 vf_max;
+
+ if (shaper->handle.scope == NET_SHAPER_SCOPE_QUEUE) {
+ vf_max = adapter->qos_caps->cap[0].shaper.peak;
+ if (vf_max && shaper->bw_max > vf_max) {
+ NL_SET_ERR_MSG_FMT(extack, "Max rate (%llu) of queue %d can't exceed max TX rate of VF (%llu kbps)",
+ shaper->bw_max, shaper->handle.id,
+ vf_max);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int
+iavf_shaper_set(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(binding->netdev);
+ const struct net_shaper_handle *handle = &shaper->handle;
+ struct iavf_ring *tx_ring;
+ int ret = 0;
+
+ mutex_lock(&adapter->crit_lock);
+ if (handle->id >= adapter->num_active_queues)
+ goto unlock;
+
+ ret = iavf_verify_shaper(binding, shaper, extack);
+ if (ret)
+ goto unlock;
+
+ tx_ring = &adapter->tx_rings[handle->id];
+
+ tx_ring->q_shaper.bw_min = div_u64(shaper->bw_min, 1000);
+ tx_ring->q_shaper.bw_max = div_u64(shaper->bw_max, 1000);
+ tx_ring->q_shaper_update = true;
+
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+
+unlock:
+ mutex_unlock(&adapter->crit_lock);
+ return ret;
+}
+
+static int iavf_shaper_del(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(binding->netdev);
+ struct iavf_ring *tx_ring;
+
+ mutex_lock(&adapter->crit_lock);
+ if (handle->id >= adapter->num_active_queues)
+ goto unlock;
+
+ tx_ring = &adapter->tx_rings[handle->id];
+ tx_ring->q_shaper.bw_min = 0;
+ tx_ring->q_shaper.bw_max = 0;
+ tx_ring->q_shaper_update = true;
+
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+
+unlock:
+ mutex_unlock(&adapter->crit_lock);
+ return 0;
+}
+
+static void iavf_shaper_cap(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope,
+ unsigned long *flags)
+{
+ if (scope != NET_SHAPER_SCOPE_QUEUE)
+ return;
+
+ *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
+}
+
+static const struct net_shaper_ops iavf_shaper_ops = {
+ .set = iavf_shaper_set,
+ .delete = iavf_shaper_del,
+ .capabilities = iavf_shaper_cap,
+};
+
static const struct net_device_ops iavf_netdev_ops = {
.ndo_open = iavf_open,
.ndo_stop = iavf_close,
@@ -4908,6 +5055,7 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_fix_features = iavf_fix_features,
.ndo_set_features = iavf_set_features,
.ndo_setup_tc = iavf_setup_tc,
+ .net_shaper_ops = &iavf_shaper_ops,
};
/**
@@ -5054,7 +5202,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct iavf_adapter *adapter = NULL;
struct iavf_hw *hw = NULL;
- int err;
+ int err, len;
err = pci_enable_device(pdev);
if (err)
@@ -5122,6 +5270,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.func = PCI_FUNC(pdev->devfn);
hw->bus.bus_id = pdev->bus->number;
+ len = struct_size(adapter->qos_caps, cap, IAVF_MAX_QOS_TC_NUM);
+ adapter->qos_caps = kzalloc(len, GFP_KERNEL);
+ if (!adapter->qos_caps) {
+ err = -ENOMEM;
+ goto err_alloc_qos_cap;
+ }
+
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
*/
@@ -5160,6 +5315,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Initialization goes on in the work. Do not add more of it below. */
return 0;
+err_alloc_qos_cap:
+ iounmap(hw->hw_addr);
err_ioremap:
destroy_workqueue(adapter->wq);
err_alloc_wq:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index 48c3901381b4..cac9d1a35a52 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -18,7 +18,6 @@
/* adminq functions */
enum iavf_status iavf_init_adminq(struct iavf_hw *hw);
enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
-void iavf_adminq_init_ring_data(struct iavf_hw *hw);
enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
struct iavf_arq_event_info *e,
u16 *events_pending);
@@ -33,8 +32,6 @@ bool iavf_asq_done(struct iavf_hw *hw);
void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
-void iavf_idle_aq(struct iavf_hw *hw);
-void iavf_resume_aq(struct iavf_hw *hw);
bool iavf_check_asq_alive(struct iavf_hw *hw);
enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index d7b5587aeb8e..f97c702c0802 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -296,6 +296,8 @@ struct iavf_ring {
*/
u32 rx_buf_len;
+ struct net_shaper q_shaper;
+ bool q_shaper_update;
} ____cacheline_internodealigned_in_smp;
#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 7e810b65380c..15d388b431c5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -151,7 +151,8 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_USO |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
- VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+ VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
+ VIRTCHNL_VF_OFFLOAD_QOS;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
@@ -1508,6 +1509,130 @@ iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
}
/**
+ * iavf_get_qos_caps - get qos caps support
+ * @adapter: iavf adapter struct instance
+ *
+ * This function requests PF for Supported QoS Caps.
+ */
+void iavf_get_qos_caps(struct iavf_adapter *adapter)
+{
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev,
+ "Cannot get qos caps, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ adapter->current_op = VIRTCHNL_OP_GET_QOS_CAPS;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_QOS_CAPS;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_QOS_CAPS, NULL, 0);
+}
+
+/**
+ * iavf_set_quanta_size - set quanta size of queue chunk
+ * @adapter: iavf adapter struct instance
+ * @quanta_size: quanta size in bytes
+ * @queue_index: starting index of queue chunk
+ * @num_queues: number of queues in the queue chunk
+ *
+ * This function requests PF to set quanta size of queue chunk
+ * starting at queue_index.
+ */
+static void
+iavf_set_quanta_size(struct iavf_adapter *adapter, u16 quanta_size,
+ u16 queue_index, u16 num_queues)
+{
+ struct virtchnl_quanta_cfg quanta_cfg;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev,
+ "Cannot set queue quanta size, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ adapter->current_op = VIRTCHNL_OP_CONFIG_QUANTA;
+ quanta_cfg.quanta_size = quanta_size;
+ quanta_cfg.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
+ quanta_cfg.queue_select.start_queue_id = queue_index;
+ quanta_cfg.queue_select.num_queues = num_queues;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUANTA,
+ (u8 *)&quanta_cfg, sizeof(quanta_cfg));
+}
+
+/**
+ * iavf_cfg_queues_quanta_size - configure quanta size of queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF configure quanta size of allocated queues.
+ **/
+void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter)
+{
+ int quanta_size = IAVF_DEFAULT_QUANTA_SIZE;
+
+ /* Set Queue Quanta Size to default */
+ iavf_set_quanta_size(adapter, quanta_size, 0,
+ adapter->num_active_queues);
+}
+
+/**
+ * iavf_cfg_queues_bw - configure bandwidth of allocated queues
+ * @adapter: iavf adapter structure instance
+ *
+ * This function requests PF to configure queue bandwidth of allocated queues
+ */
+void iavf_cfg_queues_bw(struct iavf_adapter *adapter)
+{
+ struct virtchnl_queues_bw_cfg *qs_bw_cfg;
+ struct net_shaper *q_shaper;
+ int qs_to_update = 0;
+ int i, inx = 0;
+ size_t len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev,
+ "Cannot set tc queue bw, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ for (i = 0; i < adapter->num_active_queues; i++) {
+ if (adapter->tx_rings[i].q_shaper_update)
+ qs_to_update++;
+ }
+ len = struct_size(qs_bw_cfg, cfg, qs_to_update);
+ qs_bw_cfg = kzalloc(len, GFP_KERNEL);
+ if (!qs_bw_cfg)
+ return;
+
+ qs_bw_cfg->vsi_id = adapter->vsi.id;
+ qs_bw_cfg->num_queues = qs_to_update;
+
+ for (i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *tx_ring = &adapter->tx_rings[i];
+
+ q_shaper = &tx_ring->q_shaper;
+ if (tx_ring->q_shaper_update) {
+ qs_bw_cfg->cfg[inx].queue_id = i;
+ qs_bw_cfg->cfg[inx].shaper.peak = q_shaper->bw_max;
+ qs_bw_cfg->cfg[inx].shaper.committed = q_shaper->bw_min;
+ qs_bw_cfg->cfg[inx].tc = 0;
+ inx++;
+ }
+ }
+
+ adapter->current_op = VIRTCHNL_OP_CONFIG_QUEUE_BW;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ (u8 *)qs_bw_cfg, len);
+ kfree(qs_bw_cfg);
+}
+
+/**
* iavf_enable_channels
* @adapter: adapter structure
*
@@ -2227,6 +2352,18 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
break;
+ case VIRTCHNL_OP_GET_QOS_CAPS:
+ dev_warn(&adapter->pdev->dev, "Failed to Get Qos CAPs, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ dev_warn(&adapter->pdev->dev, "Failed to Config Quanta, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+ dev_warn(&adapter->pdev->dev, "Failed to Config Queue BW, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
@@ -2569,6 +2706,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_netdev_features_vlan_strip_set(netdev, false);
break;
+ case VIRTCHNL_OP_GET_QOS_CAPS: {
+ u16 len = struct_size(adapter->qos_caps, cap,
+ IAVF_MAX_QOS_TC_NUM);
+
+ memcpy(adapter->qos_caps, msg, min(msglen, len));
+
+ adapter->aq_required |= IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW: {
+ int i;
+ /* shaper configuration is successful for all queues */
+ for (i = 0; i < adapter->num_active_queues; i++)
+ adapter->tx_rings[i].q_shaper_update = false;
+ }
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index d6f80da30dec..2f5d6f974185 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -181,11 +181,9 @@
#define ice_for_each_chnl_tc(i) \
for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
-#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX)
+#define ICE_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX
-#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
- ICE_PROMISC_UCAST_RX | \
- ICE_PROMISC_VLAN_TX | \
+#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \
ICE_PROMISC_VLAN_RX)
#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
@@ -207,6 +205,7 @@ enum ice_feature {
ICE_F_GNSS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
+ ICE_F_MBX_LIMIT,
ICE_F_MAX
};
@@ -371,9 +370,6 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
- u16 max_frame;
- u16 rx_buf_len;
-
struct ice_aqc_vsi_props info; /* VSI properties */
struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */
@@ -669,6 +665,8 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
struct device *hwmon_dev;
+
+ u8 num_quanta_prof_used;
};
extern struct workqueue_struct *ice_lag_wq;
@@ -1047,5 +1045,10 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
+static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
+{
+ return hw->ptp.phy_model;
+}
+
extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index ad84d8ad49a6..01a08cfd0090 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -9,12 +9,14 @@
#include <linux/spinlock.h>
#include <linux/xarray.h>
#include "ice_adapter.h"
+#include "ice.h"
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
+#define INDEX_FIELD_DEV GENMASK(31, 16)
#define INDEX_FIELD_BUS GENMASK(12, 5)
#define INDEX_FIELD_SLOT GENMASK(4, 0)
@@ -24,9 +26,17 @@ static unsigned long ice_adapter_index(const struct pci_dev *pdev)
WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
- return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
- FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
- FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
+ switch (pdev->device) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
+ default:
+ return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
+ FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
+ FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
+ }
}
static struct ice_adapter *ice_adapter_new(void)
@@ -40,11 +50,17 @@ static struct ice_adapter *ice_adapter_new(void)
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
refcount_set(&adapter->refcount, 1);
+ mutex_init(&adapter->ports.lock);
+ INIT_LIST_HEAD(&adapter->ports.ports);
+
return adapter;
}
static void ice_adapter_free(struct ice_adapter *adapter)
{
+ WARN_ON(!list_empty(&adapter->ports.ports));
+ mutex_destroy(&adapter->ports.lock);
+
kfree(adapter);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index 9d11014ec02f..e233225848b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -4,22 +4,42 @@
#ifndef _ICE_ADAPTER_H_
#define _ICE_ADAPTER_H_
+#include <linux/types.h>
#include <linux/spinlock_types.h>
#include <linux/refcount_types.h>
struct pci_dev;
+struct ice_pf;
+
+/**
+ * struct ice_port_list - data used to store the list of adapter ports
+ *
+ * This structure contains data used to maintain a list of adapter ports
+ *
+ * @ports: list of ports
+ * @lock: protect access to the ports list
+ */
+struct ice_port_list {
+ struct list_head ports;
+ /* To synchronize the ports list operations */
+ struct mutex lock;
+};
/**
* struct ice_adapter - PCI adapter resources shared across PFs
* @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
* register of the PTP clock.
* @refcount: Reference count. struct ice_pf objects hold the references.
+ * @ctrl_pf: Control PF of the adapter
+ * @ports: Ports list
*/
struct ice_adapter {
+ refcount_t refcount;
/* For access to the GLTSYN_TIME register */
spinlock_t ptp_gltsyn_time_lock;
- refcount_t refcount;
+ struct ice_pf *ctrl_pf;
+ struct ice_port_list ports;
};
struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 0be1a98d7cc1..1489a8ceec51 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1492,6 +1492,23 @@ struct ice_aqc_dnl_equa_param {
#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_CTLE_BW (0x23 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_GAIN (0x30 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_GAIN2 (0x31 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_2 (0x32 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_3 (0x33 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_4 (0x34 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_5 (0x35 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_6 (0x36 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_7 (0x37 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_8 (0x38 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_9 (0x39 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_10 (0x3A << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_11 (0x3B << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DFE_12 (0x3C << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_TX_EQU_PRE1 0x0
#define ICE_AQC_TX_EQU_PRE3 0x3
#define ICE_AQC_TX_EQU_ATTEN 0x4
@@ -1742,6 +1759,15 @@ struct ice_aqc_nvm {
};
#define ICE_AQC_NVM_START_POINT 0
+#define ICE_AQC_NVM_SECTOR_UNIT 4096
+#define ICE_AQC_NVM_SDP_AC_PTR_OFFSET 0xD8
+#define ICE_AQC_NVM_SDP_AC_PTR_M GENMASK(14, 0)
+#define ICE_AQC_NVM_SDP_AC_PTR_INVAL 0x7FFF
+#define ICE_AQC_NVM_SDP_AC_PTR_TYPE_M BIT(15)
+#define ICE_AQC_NVM_SDP_AC_SDP_NUM_M GENMASK(2, 0)
+#define ICE_AQC_NVM_SDP_AC_DIR_M BIT(3)
+#define ICE_AQC_NVM_SDP_AC_PIN_M GENMASK(15, 6)
+#define ICE_AQC_NVM_SDP_AC_MAX_SIZE 7
#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 4a9a6899fc45..82a9cd4ec7ae 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -156,7 +156,8 @@ skip_alloc:
* handler here (i.e. resume, reset/rebuild, etc.)
*/
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
+ netif_napi_add_config(vsi->netdev, &q_vector->napi,
+ ice_napi_poll, v_idx);
out:
/* tie q_vector and VSI together */
@@ -347,6 +348,8 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
break;
}
+ tlan_ctx->quanta_prof_idx = ring->quanta_prof_id;
+
tlan_ctx->tso_ena = ICE_TX_LEGACY;
tlan_ctx->tso_qnum = pf_q;
@@ -445,7 +448,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
- rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
+ rlan_ctx.rxmax = min_t(u32, ring->max_frame,
ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
@@ -541,8 +544,6 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
int err;
- ring->rx_buf_len = ring->vsi->rx_buf_len;
-
if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
@@ -641,21 +642,25 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
+ * @ring: Rx ring to configure
+ *
+ * Determine the maximum frame size and Rx buffer length to use for a PF VSI.
+ * Set these in the associated Rx ring structure.
*/
-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
+ ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ ring->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
#endif
} else {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
+ ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ ring->rx_buf_len = ICE_RXBUF_3072;
}
}
@@ -670,15 +675,15 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
u16 i;
- if (vsi->type == ICE_VSI_VF)
- goto setup_rings;
-
- ice_vsi_cfg_frame_size(vsi);
-setup_rings:
/* set up individual rings */
ice_for_each_rxq(vsi, i) {
- int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
+ int err;
+
+ if (vsi->type != ICE_VSI_VF)
+ ice_vsi_cfg_frame_size(vsi, ring);
+ err = ice_vsi_cfg_rxq(ring);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 009716a12a26..b22e71dc59d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2437,6 +2437,25 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
}
/**
+ * ice_func_id_to_logical_id - map from function id to logical pf id
+ * @active_function_bitmap: active function bitmap
+ * @pf_id: function number of device
+ *
+ * Return: logical PF ID.
+ */
+static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id)
+{
+ u8 logical_id = 0;
+ u8 i;
+
+ for (i = 0; i < pf_id; i++)
+ if (active_function_bitmap & BIT(i))
+ logical_id++;
+
+ return logical_id;
+}
+
+/**
* ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2453,6 +2472,8 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
dev_p->num_funcs = hweight32(number);
ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
dev_p->num_funcs);
+
+ hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 272fd823a825..03988be03729 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1211,6 +1211,131 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
+ * @buf: pointer to buffer header
+ * Return: whether given @buf is a metadata one.
+ */
+static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
+{
+ return le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF;
+}
+
+/**
+ * struct ice_ddp_send_ctx - sending context of current DDP segment
+ * @hw: pointer to the hardware struct
+ *
+ * Keeps current sending state (header, error) for the purpose of proper "last"
+ * bit setting in ice_aq_download_pkg(). Use via calls to ice_ddp_send_hunk().
+ */
+struct ice_ddp_send_ctx {
+ struct ice_hw *hw;
+/* private: only for ice_ddp_send_hunk() */
+ struct ice_buf_hdr *hdr;
+ int err;
+};
+
+static void ice_ddp_send_ctx_set_err(struct ice_ddp_send_ctx *ctx, int err)
+{
+ ctx->err = err;
+}
+
+/**
+ * ice_ddp_send_hunk - send one hunk of data to FW
+ * @ctx: current segment sending context
+ * @hunk: next hunk to send, size is always ICE_PKG_BUF_SIZE
+ *
+ * Send the next hunk of data to FW, retrying if needed.
+ *
+ * Notice: must be called once more with a NULL @hunk to finish up; such call
+ * will set up the "last" bit of an AQ request. After such call @ctx.hdr is
+ * cleared, @hw is still valid.
+ *
+ * Return: %ICE_DDP_PKG_SUCCESS if there were no problems; a sticky @err
+ * otherwise.
+ */
+static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx,
+ struct ice_buf_hdr *hunk)
+{
+ struct ice_buf_hdr *prev_hunk = ctx->hdr;
+ struct ice_hw *hw = ctx->hw;
+ bool prev_was_last = !hunk;
+ enum ice_aq_err aq_err;
+ u32 offset, info;
+ int attempt, err;
+
+ if (ctx->err)
+ return ctx->err;
+
+ ctx->hdr = hunk;
+ if (!prev_hunk)
+ return ICE_DDP_PKG_SUCCESS; /* no problem so far */
+
+ for (attempt = 0; attempt < 5; attempt++) {
+ if (attempt)
+ msleep(20);
+
+ err = ice_aq_download_pkg(hw, prev_hunk, ICE_PKG_BUF_SIZE,
+ prev_was_last, &offset, &info, NULL);
+
+ aq_err = hw->adminq.sq_last_status;
+ if (aq_err != ICE_AQ_RC_ENOSEC && aq_err != ICE_AQ_RC_EBADSIG)
+ break;
+ }
+
+ if (err) {
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
+ err, offset, info);
+ ctx->err = ice_map_aq_err_to_ddp_state(aq_err);
+ } else if (attempt) {
+ dev_dbg(ice_hw_to_dev(hw),
+ "ice_aq_download_pkg number of retries: %d\n", attempt);
+ }
+
+ return ctx->err;
+}
+
+/**
+ * ice_dwnld_cfg_bufs_no_lock
+ * @ctx: context of the current buffers section to send
+ * @bufs: pointer to an array of buffers
+ * @start: buffer index of first buffer to download
+ * @count: the number of buffers to download
+ *
+ * Downloads package configuration buffers to the firmware. Metadata buffers
+ * are skipped, and the first metadata buffer found indicates that the rest
+ * of the buffers are all metadata buffers.
+ */
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs_no_lock(struct ice_ddp_send_ctx *ctx, struct ice_buf *bufs,
+ u32 start, u32 count)
+{
+ struct ice_buf_hdr *bh;
+ enum ice_ddp_state err;
+
+ if (!bufs || !count) {
+ ice_ddp_send_ctx_set_err(ctx, ICE_DDP_PKG_ERR);
+ return ICE_DDP_PKG_ERR;
+ }
+
+ bufs += start;
+
+ for (int i = 0; i < count; i++, bufs++) {
+ bh = (struct ice_buf_hdr *)bufs;
+ /* Metadata buffers should not be sent to FW,
+ * their presence means "we are done here".
+ */
+ if (ice_is_buffer_metadata(bh))
+ break;
+
+ err = ice_ddp_send_hunk(ctx, bh);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_pkg_seg_by_idx
* @pkg_hdr: pointer to the package header to be searched
* @idx: index of segment
@@ -1270,136 +1395,20 @@ ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
}
/**
- * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
- * @buf: pointer to buffer header
- */
-static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
-{
- if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF)
- return true;
-
- return false;
-}
-
-/**
- * ice_is_last_download_buffer
- * @buf: pointer to current buffer header
- * @idx: index of the buffer in the current sequence
- * @count: the buffer count in the current sequence
- *
- * Note: this routine should only be called if the buffer is not the last buffer
- */
-static bool
-ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
-{
- struct ice_buf *next_buf;
-
- if ((idx + 1) == count)
- return true;
-
- /* A set metadata flag in the next buffer will signal that the current
- * buffer will be the last buffer downloaded
- */
- next_buf = ((struct ice_buf *)buf) + 1;
-
- return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
-}
-
-/**
- * ice_dwnld_cfg_bufs_no_lock
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @start: buffer index of first buffer to download
- * @count: the number of buffers to download
- * @indicate_last: if true, then set last buffer flag on last buffer download
- *
- * Downloads package configuration buffers to the firmware. Metadata buffers
- * are skipped, and the first metadata buffer found indicates that the rest
- * of the buffers are all metadata buffers.
- */
-static enum ice_ddp_state
-ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
- u32 count, bool indicate_last)
-{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_buf_hdr *bh;
- enum ice_aq_err err;
- u32 offset, info, i;
-
- if (!bufs || !count)
- return ICE_DDP_PKG_ERR;
-
- /* If the first buffer's first section has its metadata bit set
- * then there are no buffers to be downloaded, and the operation is
- * considered a success.
- */
- bh = (struct ice_buf_hdr *)(bufs + start);
- if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return ICE_DDP_PKG_SUCCESS;
-
- for (i = 0; i < count; i++) {
- bool last = false;
- int try_cnt = 0;
- int status;
-
- bh = (struct ice_buf_hdr *)(bufs + start + i);
-
- if (indicate_last)
- last = ice_is_last_download_buffer(bh, i, count);
-
- while (1) {
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
- last, &offset, &info,
- NULL);
- if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC &&
- hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG)
- break;
-
- try_cnt++;
-
- if (try_cnt == 5)
- break;
-
- msleep(20);
- }
-
- if (try_cnt)
- dev_dbg(ice_hw_to_dev(hw),
- "ice_aq_download_pkg number of retries: %d\n",
- try_cnt);
-
- /* Save AQ status from download package */
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
- status, offset, info);
- err = hw->adminq.sq_last_status;
- state = ice_map_aq_err_to_ddp_state(err);
- break;
- }
-
- if (last)
- break;
- }
-
- return state;
-}
-
-/**
* ice_download_pkg_sig_seg - download a signature segment
- * @hw: pointer to the hardware structure
+ * @ctx: context of the current buffers section to send
* @seg: pointer to signature segment
*/
static enum ice_ddp_state
-ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
+ice_download_pkg_sig_seg(struct ice_ddp_send_ctx *ctx, struct ice_sign_seg *seg)
{
- return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
- le32_to_cpu(seg->buf_tbl.buf_count),
- false);
+ return ice_dwnld_cfg_bufs_no_lock(ctx, seg->buf_tbl.buf_array, 0,
+ le32_to_cpu(seg->buf_tbl.buf_count));
}
/**
* ice_download_pkg_config_seg - download a config segment
- * @hw: pointer to the hardware structure
+ * @ctx: context of the current buffers section to send
* @pkg_hdr: pointer to package header
* @idx: segment index
* @start: starting buffer
@@ -1408,8 +1417,9 @@ ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
* Note: idx must reference a ICE segment
*/
static enum ice_ddp_state
-ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
- u32 idx, u32 start, u32 count)
+ice_download_pkg_config_seg(struct ice_ddp_send_ctx *ctx,
+ struct ice_pkg_hdr *pkg_hdr, u32 idx, u32 start,
+ u32 count)
{
struct ice_buf_table *bufs;
struct ice_seg *seg;
@@ -1425,46 +1435,56 @@ ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
if (start >= buf_count || start + count > buf_count)
return ICE_DDP_PKG_ERR;
- return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
- true);
+ return ice_dwnld_cfg_bufs_no_lock(ctx, bufs->buf_array, start, count);
+}
+
+static bool ice_is_last_sign_seg(u32 flags)
+{
+ return !(flags & ICE_SIGN_SEG_FLAGS_VALID) || /* behavior prior to valid */
+ (flags & ICE_SIGN_SEG_FLAGS_LAST);
}
/**
* ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
- * @hw: pointer to the hardware structure
+ * @ctx: context of the current buffers section to send
* @pkg_hdr: pointer to package header
* @idx: segment index (must be a signature segment)
*
* Note: idx must reference a signature segment
*/
static enum ice_ddp_state
-ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
- u32 idx)
+ice_dwnld_sign_and_cfg_segs(struct ice_ddp_send_ctx *ctx,
+ struct ice_pkg_hdr *pkg_hdr, u32 idx)
{
+ u32 conf_idx, start, count, flags;
enum ice_ddp_state state;
struct ice_sign_seg *seg;
- u32 conf_idx;
- u32 start;
- u32 count;
seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
if (!seg) {
state = ICE_DDP_PKG_ERR;
- goto exit;
+ ice_ddp_send_ctx_set_err(ctx, state);
+ return state;
}
count = le32_to_cpu(seg->signed_buf_count);
- state = ice_download_pkg_sig_seg(hw, seg);
+ state = ice_download_pkg_sig_seg(ctx, seg);
if (state || !count)
- goto exit;
+ return state;
conf_idx = le32_to_cpu(seg->signed_seg_idx);
start = le32_to_cpu(seg->signed_buf_start);
- state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
+ state = ice_download_pkg_config_seg(ctx, pkg_hdr, conf_idx, start,
count);
-exit:
+ /* finish up by sending last hunk with "last" flag set if requested by
+ * DDP content
+ */
+ flags = le32_to_cpu(seg->flags);
+ if (ice_is_last_sign_seg(flags))
+ state = ice_ddp_send_hunk(ctx, NULL);
+
return state;
}
@@ -1519,6 +1539,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
enum ice_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
+ struct ice_ddp_send_ctx ctx = { .hw = hw };
int status;
u32 i;
@@ -1539,7 +1560,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
hw->pkg_sign_type))
continue;
- state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
+ state = ice_dwnld_sign_and_cfg_segs(&ctx, pkg_hdr, i);
if (state)
break;
}
@@ -1564,6 +1585,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
static enum ice_ddp_state
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
+ struct ice_ddp_send_ctx ctx = { .hw = hw };
enum ice_ddp_state state;
struct ice_buf_hdr *bh;
int status;
@@ -1576,7 +1598,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* considered a success.
*/
bh = (struct ice_buf_hdr *)bufs;
- if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ if (ice_is_buffer_metadata(bh))
return ICE_DDP_PKG_SUCCESS;
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
@@ -1586,7 +1608,9 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
}
- state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
+ ice_dwnld_cfg_bufs_no_lock(&ctx, bufs, 0, count);
+ /* finish up by sending last hunk with "last" flag set */
+ state = ice_ddp_send_hunk(&ctx, NULL);
if (!state)
state = ice_post_dwnld_pkg_actions(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index 79551da2a4b0..8a2d57fc5dae 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -181,7 +181,10 @@ struct ice_sign_seg {
__le32 signed_seg_idx;
__le32 signed_buf_start;
__le32 signed_buf_count;
-#define ICE_SIGN_SEG_RESERVED_COUNT 44
+#define ICE_SIGN_SEG_FLAGS_VALID 0x80000000
+#define ICE_SIGN_SEG_FLAGS_LAST 0x00000001
+ __le32 flags;
+#define ICE_SIGN_SEG_RESERVED_COUNT 40
u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
struct ice_buf_table buf_tbl;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 20ce32dda69c..ac7db100e2cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -60,11 +60,6 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
static inline void
ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { }
-static inline int ice_eswitch_configure(struct ice_pf *pf)
-{
- return 0;
-}
-
static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
return DEVLINK_ESWITCH_MODE_LEGACY;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index d5cc934d1359..3072634bf049 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -693,75 +693,53 @@ static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
struct ice_serdes_equalization_to_ethtool *ptr)
{
+ static const int tx = ICE_AQC_OP_CODE_TX_EQU;
+ static const int rx = ICE_AQC_OP_CODE_RX_EQU;
+ struct {
+ int data_in;
+ int opcode;
+ int *out;
+ } aq_params[] = {
+ { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 },
+ { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 },
+ { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten },
+ { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 },
+ { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 },
+ { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 },
+ { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 },
+ { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
+ { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
+ { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
+ { ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate },
+ { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
+ { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
+ { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
+ { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw },
+ { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain },
+ { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 },
+ { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 },
+ { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 },
+ { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 },
+ { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 },
+ { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 },
+ { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 },
+ { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 },
+ { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 },
+ { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 },
+ { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 },
+ { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 },
+ };
int err;
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE1,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_pre1);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE3,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_pre3);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_ATTEN,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_atten);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_POST1,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_post1);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE2,
- ICE_AQC_OP_CODE_TX_EQU, serdes_num,
- &ptr->tx_equalization_pre2);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE2,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_pre2);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE1,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_pre1);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_POST1,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_post1);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFLF,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_bflf);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFHF,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_bfhf);
- if (err)
- return err;
-
- err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_DRATE,
- ICE_AQC_OP_CODE_RX_EQU, serdes_num,
- &ptr->rx_equalization_drate);
- if (err)
- return err;
+ for (int i = 0; i < ARRAY_SIZE(aq_params); i++) {
+ err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in,
+ aq_params[i].opcode,
+ serdes_num, aq_params[i].out);
+ if (err)
+ break;
+ }
- return 0;
+ return err;
}
/**
@@ -4716,6 +4694,81 @@ static void ice_get_fec_stats(struct net_device *netdev,
pi->lport, err);
}
+#define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \
+ ETH_RESET_FILTER | ETH_RESET_OFFLOAD)
+
+#define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \
+ ETH_RESET_SHARED_SHIFT)
+
+#define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \
+ (ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \
+ (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))
+
+#define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR
+
+/**
+ * ice_ethtool_reset - triggers a given type of reset
+ * @dev: network interface device structure
+ * @flags: set of reset flags
+ *
+ * Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags.
+ */
+static int ice_ethtool_reset(struct net_device *dev, u32 *flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_pf *pf = np->vsi->back;
+ enum ice_reset_req reset;
+
+ switch (*flags) {
+ case ICE_ETHTOOL_CORER:
+ reset = ICE_RESET_CORER;
+ break;
+ case ICE_ETHTOOL_GLOBR:
+ reset = ICE_RESET_GLOBR;
+ break;
+ case ICE_ETHTOOL_PFR:
+ reset = ICE_RESET_PFR;
+ break;
+ default:
+ netdev_info(dev, "Unsupported set of ethtool flags");
+ return -EOPNOTSUPP;
+ }
+
+ ice_schedule_reset(pf, reset);
+
+ *flags = 0;
+
+ return 0;
+}
+
+/**
+ * ice_repr_ethtool_reset - triggers a VF reset
+ * @dev: network interface device structure
+ * @flags: set of reset flags
+ *
+ * Return: 0 on success,
+ * -EOPNOTSUPP when using unsupported set of flags
+ * -EBUSY when VF is not ready for reset.
+ */
+static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+ struct ice_vf *vf;
+
+ if (repr->type != ICE_REPR_TYPE_VF ||
+ *flags != ICE_ETHTOOL_VFR)
+ return -EOPNOTSUPP;
+
+ vf = repr->vf;
+
+ if (ice_check_vf_ready_for_cfg(vf))
+ return -EBUSY;
+
+ *flags = 0;
+
+ return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -4752,6 +4805,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.nway_reset = ice_nway_reset,
.get_pauseparam = ice_get_pauseparam,
.set_pauseparam = ice_set_pauseparam,
+ .reset = ice_ethtool_reset,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
@@ -4804,6 +4858,7 @@ static const struct ethtool_ops ice_ethtool_repr_ops = {
.get_strings = ice_repr_get_strings,
.get_ethtool_stats = ice_repr_get_ethtool_stats,
.get_sset_count = ice_repr_get_sset_count,
+ .reset = ice_repr_ethtool_reset,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index 9acccae38625..8f2ad1c172c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -10,17 +10,34 @@ struct ice_phy_type_to_ethtool {
};
struct ice_serdes_equalization_to_ethtool {
- int rx_equalization_pre2;
- int rx_equalization_pre1;
- int rx_equalization_post1;
- int rx_equalization_bflf;
- int rx_equalization_bfhf;
- int rx_equalization_drate;
- int tx_equalization_pre1;
- int tx_equalization_pre3;
- int tx_equalization_atten;
- int tx_equalization_post1;
- int tx_equalization_pre2;
+ int rx_equ_pre2;
+ int rx_equ_pre1;
+ int rx_equ_post1;
+ int rx_equ_bflf;
+ int rx_equ_bfhf;
+ int rx_equ_drate;
+ int rx_equ_ctle_gainhf;
+ int rx_equ_ctle_gainlf;
+ int rx_equ_ctle_gaindc;
+ int rx_equ_ctle_bw;
+ int rx_equ_dfe_gain;
+ int rx_equ_dfe_gain_2;
+ int rx_equ_dfe_2;
+ int rx_equ_dfe_3;
+ int rx_equ_dfe_4;
+ int rx_equ_dfe_5;
+ int rx_equ_dfe_6;
+ int rx_equ_dfe_7;
+ int rx_equ_dfe_8;
+ int rx_equ_dfe_9;
+ int rx_equ_dfe_10;
+ int rx_equ_dfe_11;
+ int rx_equ_dfe_12;
+ int tx_equ_pre1;
+ int tx_equ_pre3;
+ int tx_equ_atten;
+ int tx_equ_post1;
+ int tx_equ_pre2;
};
struct ice_regdump_to_ethtool {
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 90b9b0993122..28b0897adf32 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -23,9 +23,6 @@ int
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list);
int
-ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
-u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
-int
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index c8ea1af51ad3..f02e8ca55375 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -397,8 +397,8 @@ bool ice_gnss_is_gps_present(struct ice_hw *hw)
int err;
u8 data;
- err = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, &data);
- if (err || !!(data & ICE_E810T_P0_GNSS_PRSNT_N))
+ err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
} else {
return false;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 91cbae1eec89..dc88aea9f473 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,6 +6,14 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
+#define GLCOMM_QUANTA_PROF(_i) (0x002D2D68 + ((_i) * 4))
+#define GLCOMM_QUANTA_PROF_MAX_INDEX 15
+#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_S 0
+#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_M ICE_M(0x3FFF, 0)
+#define GLCOMM_QUANTA_PROF_MAX_CMD_S 16
+#define GLCOMM_QUANTA_PROF_MAX_CMD_M ICE_M(0xFF, 16)
+#define GLCOMM_QUANTA_PROF_MAX_DESC_S 24
+#define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24)
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD_HEAD_S 0
@@ -539,5 +547,8 @@
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
+#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4)
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 06e712cdc3d9..a7d45a8ce7ac 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2777,8 +2777,10 @@ void ice_napi_add(struct ice_vsi *vsi)
return;
ice_for_each_q_vector(vsi, v_idx)
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll);
+ netif_napi_add_config(vsi->netdev,
+ &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll,
+ v_idx);
}
/**
@@ -3880,6 +3882,9 @@ void ice_init_feature_support(struct ice_pf *pf)
default:
break;
}
+
+ if (pf->hw.mac_type == ICE_MAC_E830)
+ ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 1a6cfc8693ce..10d6fc479a32 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -88,8 +88,6 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
-int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
-
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index b1e7727b8677..1eaa4428fd24 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1546,12 +1546,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vf_lan_overflow_event(pf, &event);
break;
case ice_mbx_opc_send_msg_to_pf:
- data.num_msg_proc = i;
- data.num_pending_arq = pending;
- data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
- data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) {
+ ice_vc_process_vf_msg(pf, &event, NULL);
+ ice_mbx_vf_dec_trig_e830(hw, &event);
+ } else {
+ u16 val = hw->mailboxq.num_rq_entries;
+
+ data.max_num_msgs_mbx = val;
+ val = ICE_MBX_OVERFLOW_WATERMARK;
+ data.async_watermark_val = val;
+ data.num_msg_proc = i;
+ data.num_pending_arq = pending;
- ice_vc_process_vf_msg(pf, &event, &data);
+ ice_vc_process_vf_msg(pf, &event, &data);
+ }
break;
case ice_aqc_opc_fw_logs_event:
ice_get_fwlog_data(pf, &event);
@@ -4082,7 +4090,11 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->vfs.table_lock);
hash_init(pf->vfs.table);
- ice_mbx_init_snapshot(&pf->hw);
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH,
+ ICE_MBX_OVERFLOW_WATERMARK);
+ else
+ ice_mbx_init_snapshot(&pf->hw);
xa_init(&pf->dyn_ports);
xa_init(&pf->sf_nums);
@@ -4543,6 +4555,34 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
}
/**
+ * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
+ * @hw: pointer to the hardware structure
+ * @pf: pointer to pf structure
+ *
+ * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
+ * formats the PF hardware supports. The exact list of supported RXDIDs
+ * depends on the loaded DDP package. The IDs can be determined by reading the
+ * GLFLXP_RXDID_FLAGS register after the DDP package is loaded.
+ *
+ * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
+ * in the DDP package. The 16-byte legacy descriptor is never supported by
+ * VFs.
+ */
+static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf)
+{
+ pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1);
+
+ for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ u32 regval;
+
+ regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
+ if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+ & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
+ pf->supported_rxdids |= BIT(i);
+ }
+}
+
+/**
* ice_init_ddp_config - DDP related configuration
* @hw: pointer to the hardware structure
* @pf: pointer to pf structure
@@ -4576,6 +4616,9 @@ static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
ice_load_pkg(firmware, pf);
release_firmware(firmware);
+ /* Initialize the supported Rx descriptor IDs after loading DDP */
+ ice_init_supported_rxdids(hw, pf);
+
return 0;
}
@@ -5888,7 +5931,7 @@ static int __init ice_module_init(void)
ice_adv_lnk_speed_maps_init();
- ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return status;
@@ -6113,12 +6156,14 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @notified: whether notification was emitted
* @extack: netlink extended ack
*/
static int
ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
struct net_device *dev, const unsigned char *addr, u16 vid,
- u16 flags, struct netlink_ext_ack __always_unused *extack)
+ u16 flags, bool *notified,
+ struct netlink_ext_ack __always_unused *extack)
{
int err;
@@ -6152,12 +6197,14 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN ID
+ * @notified: whether notification was emitted
* @extack: netlink extended ack
*/
static int
ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid, struct netlink_ext_ack *extack)
+ __always_unused u16 vid, bool *notified,
+ struct netlink_ext_ack *extack)
{
int err;
@@ -6512,8 +6559,7 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (changed & NETIF_F_HW_TC) {
bool ena = !!(features & NETIF_F_HW_TC);
- ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
- clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena);
}
if (changed & NETIF_F_LOOPBACK)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index ef2e858f49bb..a999fface272 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -4,253 +4,187 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_trace.h"
+#include "ice_cgu_regs.h"
+
+static const char ice_pin_names[][64] = {
+ "SDP0",
+ "SDP1",
+ "SDP2",
+ "SDP3",
+ "TIME_SYNC",
+ "1PPS"
+};
-#define E810_OUT_PROP_DELAY_NS 1
+static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
+ /* name, gpio */
+ { TIME_SYNC, { 4, -1 }},
+ { ONE_PPS, { -1, 5 }},
+};
-static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
- /* name idx func chan */
- { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
- { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
- { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
- { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
- { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
+ /* name, gpio */
+ { SDP0, { 0, 0 }},
+ { SDP1, { 1, 1 }},
+ { SDP2, { 2, 2 }},
+ { SDP3, { 3, 3 }},
+ { TIME_SYNC, { 4, -1 }},
+ { ONE_PPS, { -1, 5 }},
};
-/**
- * ice_get_sma_config_e810t
- * @hw: pointer to the hw struct
- * @ptp_pins: pointer to the ptp_pin_desc struture
- *
- * Read the configuration of the SMA control logic and put it into the
- * ptp_pin_desc structure
- */
-static int
-ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
-{
- u8 data, i;
- int status;
+static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
+ /* name, gpio */
+ { SDP0, { 0, 0 }},
+ { SDP1, { 1, 1 }},
+ { SDP2, { 2, 2 }},
+ { SDP3, { 3, 3 }},
+ { ONE_PPS, { -1, 5 }},
+};
- /* Read initial pin state */
- status = ice_read_sma_ctrl_e810t(hw, &data);
- if (status)
- return status;
+static const char ice_pin_names_nvm[][64] = {
+ "GNSS",
+ "SMA1",
+ "U.FL1",
+ "SMA2",
+ "U.FL2",
+};
- /* initialize with defaults */
- for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
- strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
- sizeof(ptp_pins[i].name));
- ptp_pins[i].index = ice_pin_desc_e810t[i].index;
- ptp_pins[i].func = ice_pin_desc_e810t[i].func;
- ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
- }
+static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
+ /* name, gpio */
+ { GNSS, { 1, -1 }},
+ { SMA1, { 1, 0 }},
+ { UFL1, { -1, 0 }},
+ { SMA2, { 3, 2 }},
+ { UFL2, { 3, -1 }},
+};
- /* Parse SMA1/UFL1 */
- switch (data & ICE_SMA1_MASK_E810T) {
- case ICE_SMA1_MASK_E810T:
- default:
- ptp_pins[SMA1].func = PTP_PF_NONE;
- ptp_pins[UFL1].func = PTP_PF_NONE;
- break;
- case ICE_SMA1_DIR_EN_E810T:
- ptp_pins[SMA1].func = PTP_PF_PEROUT;
- ptp_pins[UFL1].func = PTP_PF_NONE;
- break;
- case ICE_SMA1_TX_EN_E810T:
- ptp_pins[SMA1].func = PTP_PF_EXTTS;
- ptp_pins[UFL1].func = PTP_PF_NONE;
- break;
- case 0:
- ptp_pins[SMA1].func = PTP_PF_EXTTS;
- ptp_pins[UFL1].func = PTP_PF_PEROUT;
- break;
- }
+static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
+{
+ return !pf->adapter ? NULL : pf->adapter->ctrl_pf;
+}
- /* Parse SMA2/UFL2 */
- switch (data & ICE_SMA2_MASK_E810T) {
- case ICE_SMA2_MASK_E810T:
- default:
- ptp_pins[SMA2].func = PTP_PF_NONE;
- ptp_pins[UFL2].func = PTP_PF_NONE;
- break;
- case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
- ptp_pins[SMA2].func = PTP_PF_EXTTS;
- ptp_pins[UFL2].func = PTP_PF_NONE;
- break;
- case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
- ptp_pins[SMA2].func = PTP_PF_PEROUT;
- ptp_pins[UFL2].func = PTP_PF_NONE;
- break;
- case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
- ptp_pins[SMA2].func = PTP_PF_NONE;
- ptp_pins[UFL2].func = PTP_PF_EXTTS;
- break;
- case ICE_SMA2_DIR_EN_E810T:
- ptp_pins[SMA2].func = PTP_PF_PEROUT;
- ptp_pins[UFL2].func = PTP_PF_EXTTS;
- break;
- }
+static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf)
+{
+ struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf);
- return 0;
+ return !ctrl_pf ? NULL : &ctrl_pf->ptp;
}
/**
- * ice_ptp_set_sma_config_e810t
- * @hw: pointer to the hw struct
- * @ptp_pins: pointer to the ptp_pin_desc struture
+ * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc
+ * @pf: Board private structure
+ * @func: Pin function
+ * @chan: GPIO channel
*
- * Set the configuration of the SMA control logic based on the configuration in
- * num_pins parameter
+ * Return: positive pin number when pin is present, -1 otherwise
*/
-static int
-ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
- const struct ptp_pin_desc *ptp_pins)
+static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
+ unsigned int chan)
{
- int status;
- u8 data;
+ const struct ptp_clock_info *info = &pf->ptp.info;
+ int i;
- /* SMA1 and UFL1 cannot be set to TX at the same time */
- if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
- ptp_pins[UFL1].func == PTP_PF_PEROUT)
- return -EINVAL;
+ for (i = 0; i < info->n_pins; i++) {
+ if (info->pin_config[i].func == func &&
+ info->pin_config[i].chan == chan)
+ return i;
+ }
- /* SMA2 and UFL2 cannot be set to RX at the same time */
- if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
- ptp_pins[UFL2].func == PTP_PF_EXTTS)
- return -EINVAL;
+ return -1;
+}
- /* Read initial pin state value */
- status = ice_read_sma_ctrl_e810t(hw, &data);
- if (status)
- return status;
-
- /* Set the right sate based on the desired configuration */
- data &= ~ICE_SMA1_MASK_E810T;
- if (ptp_pins[SMA1].func == PTP_PF_NONE &&
- ptp_pins[UFL1].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
- data |= ICE_SMA1_MASK_E810T;
- } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
- ptp_pins[UFL1].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA1 RX");
- data |= ICE_SMA1_TX_EN_E810T;
- } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
- ptp_pins[UFL1].func == PTP_PF_PEROUT) {
- /* U.FL 1 TX will always enable SMA 1 RX */
- dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
- } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
- ptp_pins[UFL1].func == PTP_PF_PEROUT) {
- dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
- } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
- ptp_pins[UFL1].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA1 TX");
- data |= ICE_SMA1_DIR_EN_E810T;
- }
-
- data &= ~ICE_SMA2_MASK_E810T;
- if (ptp_pins[SMA2].func == PTP_PF_NONE &&
- ptp_pins[UFL2].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
- data |= ICE_SMA2_MASK_E810T;
- } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
- ptp_pins[UFL2].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA2 RX");
- data |= (ICE_SMA2_TX_EN_E810T |
- ICE_SMA2_UFL2_RX_DIS_E810T);
- } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
- ptp_pins[UFL2].func == PTP_PF_EXTTS) {
- dev_info(ice_hw_to_dev(hw), "UFL2 RX");
- data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
- } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
- ptp_pins[UFL2].func == PTP_PF_NONE) {
- dev_info(ice_hw_to_dev(hw), "SMA2 TX");
- data |= (ICE_SMA2_DIR_EN_E810T |
- ICE_SMA2_UFL2_RX_DIS_E810T);
- } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
- ptp_pins[UFL2].func == PTP_PF_EXTTS) {
- dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
- data |= ICE_SMA2_DIR_EN_E810T;
- }
-
- return ice_write_sma_ctrl_e810t(hw, data);
-}
-
-/**
- * ice_ptp_set_sma_e810t
- * @info: the driver's PTP info structure
- * @pin: pin index in kernel structure
- * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
- *
- * Set the configuration of a single SMA pin
+/**
+ * ice_ptp_update_sma_data - update SMA pins data according to pins setup
+ * @pf: Board private structure
+ * @sma_pins: parsed SMA pins status
+ * @data: SMA data to update
*/
-static int
-ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
- enum ptp_pin_function func)
+static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[],
+ u8 *data)
{
- struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
- struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_hw *hw = &pf->hw;
- int err;
+ const char *state1, *state2;
- if (pin < SMA1 || func > PTP_PF_PEROUT)
- return -EOPNOTSUPP;
-
- err = ice_get_sma_config_e810t(hw, ptp_pins);
- if (err)
- return err;
-
- /* Disable the same function on the other pin sharing the channel */
- if (pin == SMA1 && ptp_pins[UFL1].func == func)
- ptp_pins[UFL1].func = PTP_PF_NONE;
- if (pin == UFL1 && ptp_pins[SMA1].func == func)
- ptp_pins[SMA1].func = PTP_PF_NONE;
-
- if (pin == SMA2 && ptp_pins[UFL2].func == func)
- ptp_pins[UFL2].func = PTP_PF_NONE;
- if (pin == UFL2 && ptp_pins[SMA2].func == func)
- ptp_pins[SMA2].func = PTP_PF_NONE;
+ /* Set the right state based on the desired configuration.
+ * When bit is set, functionality is disabled.
+ */
+ *data &= ~ICE_ALL_SMA_MASK;
+ if (!sma_pins[UFL1 - 1]) {
+ if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) {
+ state1 = "SMA1 Rx, U.FL1 disabled";
+ *data |= ICE_SMA1_TX_EN;
+ } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) {
+ state1 = "SMA1 Tx U.FL1 disabled";
+ *data |= ICE_SMA1_DIR_EN;
+ } else {
+ state1 = "SMA1 disabled, U.FL1 disabled";
+ *data |= ICE_SMA1_MASK;
+ }
+ } else {
+ /* U.FL1 Tx will always enable SMA1 Rx */
+ state1 = "SMA1 Rx, U.FL1 Tx";
+ }
- /* Set up new pin function in the temp table */
- ptp_pins[pin].func = func;
+ if (!sma_pins[UFL2 - 1]) {
+ if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) {
+ state2 = "SMA2 Rx, U.FL2 disabled";
+ *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS;
+ } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) {
+ state2 = "SMA2 Tx, U.FL2 disabled";
+ *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS;
+ } else {
+ state2 = "SMA2 disabled, U.FL2 disabled";
+ *data |= ICE_SMA2_MASK;
+ }
+ } else {
+ if (!sma_pins[SMA2 - 1]) {
+ state2 = "SMA2 disabled, U.FL2 Rx";
+ *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN;
+ } else {
+ state2 = "SMA2 Tx, U.FL2 Rx";
+ *data |= ICE_SMA2_DIR_EN;
+ }
+ }
- return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
+ dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2);
}
/**
- * ice_verify_pin_e810t
- * @info: the driver's PTP info structure
- * @pin: Pin index
- * @func: Assigned function
- * @chan: Assigned channel
+ * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic
+ * @pf: Board private structure
*
- * Verify if pin supports requested pin function. If the Check pins consistency.
- * Reconfigure the SMA logic attached to the given pin to enable its
- * desired functionality
+ * Return: 0 on success, negative error code otherwise
*/
-static int
-ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
- enum ptp_pin_function func, unsigned int chan)
+static int ice_ptp_set_sma_cfg(struct ice_pf *pf)
{
- /* Don't allow channel reassignment */
- if (chan != ice_pin_desc_e810t[pin].chan)
- return -EOPNOTSUPP;
+ const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc;
+ struct ptp_pin_desc *pins = pf->ptp.pin_desc;
+ unsigned int sma_pins[ICE_SMA_PINS_NUM] = {};
+ int err;
+ u8 data;
- /* Check if functions are properly assigned */
- switch (func) {
- case PTP_PF_NONE:
- break;
- case PTP_PF_EXTTS:
- if (pin == UFL1)
- return -EOPNOTSUPP;
- break;
- case PTP_PF_PEROUT:
- if (pin == UFL2 || pin == GNSS)
- return -EOPNOTSUPP;
- break;
- case PTP_PF_PHYSYNC:
- return -EOPNOTSUPP;
- }
+ /* Read initial pin state value */
+ err = ice_read_sma_ctrl(&pf->hw, &data);
+ if (err)
+ return err;
- return ice_ptp_set_sma_e810t(info, pin, func);
+ /* Get SMA/U.FL pins states */
+ for (int i = 0; i < pf->ptp.info.n_pins; i++)
+ if (pins[i].func) {
+ int name_idx = ice_pins[i].name_idx;
+
+ switch (name_idx) {
+ case SMA1:
+ case UFL1:
+ case SMA2:
+ case UFL2:
+ sma_pins[name_idx - 1] = pins[i].func;
+ break;
+ default:
+ continue;
+ }
+ }
+
+ ice_ptp_update_sma_data(pf, sma_pins, &data);
+ return ice_write_sma_ctrl(&pf->hw, data);
}
/**
@@ -800,8 +734,8 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
struct ice_ptp_port *port;
unsigned int i;
- mutex_lock(&pf->ptp.ports_owner.lock);
- list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
+ mutex_lock(&pf->adapter->ports.lock);
+ list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
struct ice_ptp_tx *tx = &port->tx;
if (!tx || !tx->init)
@@ -809,7 +743,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
ice_ptp_process_tx_tstamp(tx);
}
- mutex_unlock(&pf->ptp.ports_owner.lock);
+ mutex_unlock(&pf->adapter->ports.lock);
for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
u64 tstamp_ready;
@@ -974,7 +908,7 @@ ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
{
struct ice_ptp_port *port;
- list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
+ list_for_each_entry(port, &pf->adapter->ports.ports, list_node)
ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
}
@@ -1363,7 +1297,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
mutex_lock(&ptp_port->ps_lock);
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
err = ice_stop_phy_timer_eth56g(hw, port, true);
break;
@@ -1409,7 +1343,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
mutex_lock(&ptp_port->ps_lock);
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
err = ice_start_phy_timer_eth56g(hw, port);
break;
@@ -1480,8 +1414,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
/* Skip HW writes if reset is in progress */
if (pf->hw.reset_ongoing)
return;
-
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_E810:
/* Do not reconfigure E810 PHY */
return;
@@ -1514,7 +1447,7 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G: {
int port;
@@ -1553,7 +1486,7 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
case ICE_PHY_UNSUP:
default:
dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
- hw->ptp.phy_model);
+ ice_get_phy_model(hw));
return -EOPNOTSUPP;
}
}
@@ -1575,10 +1508,10 @@ static void ice_ptp_restart_all_phy(struct ice_pf *pf)
{
struct list_head *entry;
- list_for_each(entry, &pf->ptp.ports_owner.ports) {
+ list_for_each(entry, &pf->adapter->ports.ports) {
struct ice_ptp_port *port = list_entry(entry,
struct ice_ptp_port,
- list_member);
+ list_node);
if (port->link_up)
ice_ptp_port_phy_restart(port);
@@ -1651,33 +1584,41 @@ void ice_ptp_extts_event(struct ice_pf *pf)
/**
* ice_ptp_cfg_extts - Configure EXTTS pin and channel
* @pf: Board private structure
- * @chan: GPIO channel (0-3)
- * @config: desired EXTTS configuration.
- * @store: If set to true, the values will be stored
+ * @rq: External timestamp request
+ * @on: Enable/disable flag
*
* Configure an external timestamp event on the requested channel.
*
- * Return: 0 on success, -EOPNOTUSPP on unsupported flags
+ * Return: 0 on success, negative error code otherwise
*/
-static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
- struct ice_extts_channel *config, bool store)
+static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
+ int on)
{
- u32 func, aux_reg, gpio_reg, irq_reg;
+ u32 aux_reg, gpio_reg, irq_reg;
struct ice_hw *hw = &pf->hw;
+ unsigned int chan, gpio_pin;
+ int pin_desc_idx;
u8 tmr_idx;
/* Reject requests with unsupported flags */
- if (config->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
+
+ if (rq->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ chan = rq->index;
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
+ if (pin_desc_idx < 0)
+ return -EIO;
+
+ gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0];
irq_reg = rd32(hw, PFINT_OICR_ENA);
- if (config->ena) {
+ if (on) {
/* Enable the interrupt */
irq_reg |= PFINT_OICR_TSYN_EVNT_M;
aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
@@ -1686,33 +1627,38 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
/* set event level to requested edge */
- if (config->flags & PTP_FALLING_EDGE)
+ if (rq->flags & PTP_FALLING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
- if (config->flags & PTP_RISING_EDGE)
+ if (rq->flags & PTP_RISING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
/* Write GPIO CTL reg.
* 0x1 is input sampled by EVENT register(channel)
* + num_in_channels * tmr_idx
*/
- func = 1 + chan + (tmr_idx * 3);
- gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
- pf->ptp.ext_ts_chan |= (1 << chan);
+ gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
+ 1 + chan + (tmr_idx * 3));
} else {
+ bool last_enabled = true;
+
/* clear the values we set to reset defaults */
aux_reg = 0;
gpio_reg = 0;
- pf->ptp.ext_ts_chan &= ~(1 << chan);
- if (!pf->ptp.ext_ts_chan)
+
+ for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++)
+ if ((pf->ptp.extts_rqs[i].flags &
+ PTP_ENABLE_FEATURE) &&
+ i != chan) {
+ last_enabled = false;
+ }
+
+ if (last_enabled)
irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
}
wr32(hw, PFINT_OICR_ENA, irq_reg);
wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
- wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg);
-
- if (store)
- memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config));
+ wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
return 0;
}
@@ -1723,16 +1669,10 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
*/
static void ice_ptp_disable_all_extts(struct ice_pf *pf)
{
- struct ice_extts_channel extts_cfg = {};
- int i;
-
- for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
- if (pf->ptp.extts_channels[i].ena) {
- extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin;
- extts_cfg.ena = false;
- ice_ptp_cfg_extts(pf, i, &extts_cfg, false);
- }
- }
+ for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
+ if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
+ ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
+ false);
synchronize_irq(pf->oicr_irq.virq);
}
@@ -1745,273 +1685,322 @@ static void ice_ptp_disable_all_extts(struct ice_pf *pf)
*/
static void ice_ptp_enable_all_extts(struct ice_pf *pf)
{
- int i;
-
- for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
- if (pf->ptp.extts_channels[i].ena)
- ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i],
- false);
- }
+ for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++)
+ if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE)
+ ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i],
+ true);
}
/**
- * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
- * @pf: Board private structure
- * @chan: GPIO channel (0-3)
- * @config: desired periodic clk configuration. NULL will disable channel
- * @store: If set to true the values will be stored
+ * ice_ptp_write_perout - Write periodic wave parameters to HW
+ * @hw: pointer to the HW struct
+ * @chan: target channel
+ * @gpio_pin: target GPIO pin
+ * @start: target time to start periodic output
+ * @period: target period
*
- * Configure the internal clock generator modules to generate the clock wave of
- * specified period.
+ * Return: 0 on success, negative error code otherwise
*/
-static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
- struct ice_perout_channel *config, bool store)
+static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
+ unsigned int gpio_pin, u64 start, u64 period)
{
- u64 current_time, period, start_time, phase;
- struct ice_hw *hw = &pf->hw;
- u32 func, val, gpio_pin;
- u8 tmr_idx;
- if (config && config->flags & ~PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ u32 val = 0;
/* 0. Reset mode & out_en in AUX_OUT */
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
- /* If we're disabling the output, clear out CLKO and TGT and keep
- * output level low
- */
- if (!config || !config->ena) {
- wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
- wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
- wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
-
- val = GLGEN_GPIO_CTL_PIN_DIR_M;
- gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
- wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
-
- /* Store the value if requested */
- if (store)
- memset(&pf->ptp.perout_channels[chan], 0,
- sizeof(struct ice_perout_channel));
-
- return 0;
- }
- period = config->period;
- start_time = config->start_time;
- div64_u64_rem(start_time, period, &phase);
- gpio_pin = config->gpio_pin;
+ if (ice_is_e825c(hw)) {
+ int err;
- /* 1. Write clkout with half of required period value */
- if (period & 0x1) {
- dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
- goto err;
+ /* Enable/disable CGU 1PPS output for E825C */
+ err = ice_cgu_cfg_pps_out(hw, !!period);
+ if (err)
+ return err;
}
+ /* 1. Write perout with half of required period value.
+ * HW toggles output when source clock hits the TGT and then adds
+ * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle.
+ */
period >>= 1;
- /* For proper operation, the GLTSYN_CLKO must be larger than clock tick
+ /* For proper operation, GLTSYN_CLKO must be larger than clock tick and
+ * period has to fit in 32 bit register.
*/
#define MIN_PULSE 3
- if (period <= MIN_PULSE || period > U32_MAX) {
- dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
- MIN_PULSE * 2);
- goto err;
+ if (!!period && (period <= MIN_PULSE || period > U32_MAX)) {
+ dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32",
+ MIN_PULSE);
+ return -EIO;
}
wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
- /* Allow time for programming before start_time is hit */
- current_time = ice_ptp_read_src_clk_reg(pf, NULL);
-
- /* if start time is in the past start the timer at the nearest second
- * maintaining phase
- */
- if (start_time < current_time)
- start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase;
-
- if (ice_is_e810(hw))
- start_time -= E810_OUT_PROP_DELAY_NS;
- else
- start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
-
/* 2. Write TARGET time */
- wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
- wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
+ wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start));
+ wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start));
/* 3. Write AUX_OUT register */
- val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
+ if (!!period)
+ val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
/* 4. write GPIO CTL reg */
- func = 8 + chan + (tmr_idx * 4);
- val = GLGEN_GPIO_CTL_PIN_DIR_M |
- FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
+ val = GLGEN_GPIO_CTL_PIN_DIR_M;
+ if (!!period)
+ val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M,
+ 8 + chan + (tmr_idx * 4));
+
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
- /* Store the value if requested */
- if (store) {
- memcpy(&pf->ptp.perout_channels[chan], config,
- sizeof(struct ice_perout_channel));
- pf->ptp.perout_channels[chan].start_time = phase;
+ return 0;
+}
+
+/**
+ * ice_ptp_cfg_perout - Configure clock to generate periodic wave
+ * @pf: Board private structure
+ * @rq: Periodic output request
+ * @on: Enable/disable flag
+ *
+ * Configure the internal clock generator modules to generate the clock wave of
+ * specified period.
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
+ int on)
+{
+ u64 clk, period, start, phase;
+ struct ice_hw *hw = &pf->hw;
+ unsigned int gpio_pin;
+ int pin_desc_idx;
+
+ if (rq->flags & ~PTP_PEROUT_PHASE)
+ return -EOPNOTSUPP;
+
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
+ if (pin_desc_idx < 0)
+ return -EIO;
+
+ gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
+ period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
+
+ /* If we're disabling the output or period is 0, clear out CLKO and TGT
+ * and keep output level low.
+ */
+ if (!on || !period)
+ return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
+
+ if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
+ period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
+ dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
+ return -EOPNOTSUPP;
}
- return 0;
-err:
- dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
- return -EFAULT;
+ if (period & 0x1) {
+ dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
+ return -EIO;
+ }
+
+ start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
+
+ /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */
+ if (rq->flags & PTP_PEROUT_PHASE)
+ phase = start;
+ else
+ div64_u64_rem(start, period, &phase);
+
+ /* If we have only phase or start time is in the past, start the timer
+ * at the next multiple of period, maintaining phase.
+ */
+ clk = ice_ptp_read_src_clk_reg(pf, NULL);
+ if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw))
+ start = div64_u64(clk + period - 1, period) * period + phase;
+
+ /* Compensate for propagation delay from the generator to the pin. */
+ start -= ice_prop_delay(hw);
+
+ return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
/**
- * ice_ptp_disable_all_clkout - Disable all currently configured outputs
- * @pf: pointer to the PF structure
+ * ice_ptp_disable_all_perout - Disable all currently configured outputs
+ * @pf: Board private structure
*
* Disable all currently configured clock outputs. This is necessary before
- * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
+ * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to
* re-enable the clocks again.
*/
-static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
+static void ice_ptp_disable_all_perout(struct ice_pf *pf)
{
- uint i;
-
- for (i = 0; i < pf->ptp.info.n_per_out; i++)
- if (pf->ptp.perout_channels[i].ena)
- ice_ptp_cfg_clkout(pf, i, NULL, false);
+ for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
+ if (pf->ptp.perout_rqs[i].period.sec ||
+ pf->ptp.perout_rqs[i].period.nsec)
+ ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
+ false);
}
/**
- * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
- * @pf: pointer to the PF structure
+ * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs
+ * @pf: Board private structure
*
* Enable all currently configured clock outputs. Use this after
- * ice_ptp_disable_all_clkout to reconfigure the output signals according to
+ * ice_ptp_disable_all_perout to reconfigure the output signals according to
* their configuration.
*/
-static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
+static void ice_ptp_enable_all_perout(struct ice_pf *pf)
{
- uint i;
-
- for (i = 0; i < pf->ptp.info.n_per_out; i++)
- if (pf->ptp.perout_channels[i].ena)
- ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
- false);
+ for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++)
+ if (pf->ptp.perout_rqs[i].period.sec ||
+ pf->ptp.perout_rqs[i].period.nsec)
+ ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i],
+ true);
}
/**
- * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
- * @info: the driver's PTP info structure
- * @rq: The requested feature to change
- * @on: Enable/disable flag
+ * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO
+ * @pf: Board private structure
+ * @pin: Pin index
+ * @func: Assigned function
+ *
+ * Return: 0 on success, negative error code otherwise
*/
-static int
-ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
- struct ptp_clock_request *rq, int on)
+static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin,
+ enum ptp_pin_function func)
{
- struct ice_pf *pf = ptp_info_to_pf(info);
- bool sma_pres = false;
- unsigned int chan;
- u32 gpio_pin;
+ unsigned int gpio_pin;
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
- sma_pres = true;
+ switch (func) {
+ case PTP_PF_PEROUT:
+ gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1];
+ break;
+ case PTP_PF_EXTTS:
+ gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- switch (rq->type) {
- case PTP_CLK_REQ_PEROUT:
- {
- struct ice_perout_channel clk_cfg = {};
-
- chan = rq->perout.index;
- if (sma_pres) {
- if (chan == ice_pin_desc_e810t[SMA1].chan)
- clk_cfg.gpio_pin = GPIO_20;
- else if (chan == ice_pin_desc_e810t[SMA2].chan)
- clk_cfg.gpio_pin = GPIO_22;
- else
- return -1;
- } else if (ice_is_e810t(&pf->hw)) {
- if (chan == 0)
- clk_cfg.gpio_pin = GPIO_20;
- else
- clk_cfg.gpio_pin = GPIO_22;
- } else if (chan == PPS_CLK_GEN_CHAN) {
- clk_cfg.gpio_pin = PPS_PIN_INDEX;
- } else {
- clk_cfg.gpio_pin = chan;
- }
+ for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
+ struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i];
+ unsigned int chan = pin_desc->chan;
- clk_cfg.flags = rq->perout.flags;
- clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
- rq->perout.period.nsec);
- clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
- rq->perout.start.nsec);
- clk_cfg.ena = !!on;
+ /* Skip pin idx from the request */
+ if (i == pin)
+ continue;
- return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
- }
- case PTP_CLK_REQ_EXTTS:
- {
- struct ice_extts_channel extts_cfg = {};
-
- chan = rq->extts.index;
- if (sma_pres) {
- if (chan < ice_pin_desc_e810t[SMA2].chan)
- gpio_pin = GPIO_21;
- else
- gpio_pin = GPIO_23;
- } else if (ice_is_e810t(&pf->hw)) {
- if (chan == 0)
- gpio_pin = GPIO_21;
- else
- gpio_pin = GPIO_23;
- } else {
- gpio_pin = chan;
+ if (pin_desc->func == PTP_PF_PEROUT &&
+ pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) {
+ pf->ptp.perout_rqs[chan].period.sec = 0;
+ pf->ptp.perout_rqs[chan].period.nsec = 0;
+ pin_desc->func = PTP_PF_NONE;
+ pin_desc->chan = 0;
+ dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n",
+ i, gpio_pin);
+ return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan],
+ false);
+ } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS &&
+ pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) {
+ pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE;
+ pin_desc->func = PTP_PF_NONE;
+ pin_desc->chan = 0;
+ dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n",
+ i, gpio_pin);
+ return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan],
+ false);
}
+ }
- extts_cfg.flags = rq->extts.flags;
- extts_cfg.gpio_pin = gpio_pin;
- extts_cfg.ena = !!on;
+ return 0;
+}
- return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true);
- }
+/**
+ * ice_verify_pin - verify if pin supports requested pin function
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Return: 0 on success, -EOPNOTSUPP when function is not supported.
+ */
+static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ const struct ice_ptp_pin_desc *pin_desc;
+
+ pin_desc = &pf->ptp.ice_pin_desc[pin];
+
+ /* Is assigned function allowed? */
+ switch (func) {
+ case PTP_PF_EXTTS:
+ if (pin_desc->gpio[0] < 0)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PEROUT:
+ if (pin_desc->gpio[1] < 0)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_PHYSYNC:
default:
return -EOPNOTSUPP;
}
+
+ /* On adapters with SMA_CTRL disable other pins that share same GPIO */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ ice_ptp_disable_shared_pin(pf, pin, func);
+ pf->ptp.pin_desc[pin].func = func;
+ pf->ptp.pin_desc[pin].chan = chan;
+ return ice_ptp_set_sma_cfg(pf);
+ }
+
+ return 0;
}
/**
- * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
- * @info: the driver's PTP info structure
+ * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC
+ * @info: The driver's PTP info structure
* @rq: The requested feature to change
* @on: Enable/disable flag
+ *
+ * Return: 0 on success, negative error code otherwise
*/
-static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
- struct ptp_clock_request *rq, int on)
+static int ice_ptp_gpio_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
{
struct ice_pf *pf = ptp_info_to_pf(info);
+ int err;
switch (rq->type) {
- case PTP_CLK_REQ_PPS:
+ case PTP_CLK_REQ_PEROUT:
{
- struct ice_perout_channel clk_cfg = {};
+ struct ptp_perout_request *cached =
+ &pf->ptp.perout_rqs[rq->perout.index];
- clk_cfg.flags = rq->perout.flags;
- clk_cfg.gpio_pin = PPS_PIN_INDEX;
- clk_cfg.period = NSEC_PER_SEC;
- clk_cfg.ena = !!on;
-
- return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
+ err = ice_ptp_cfg_perout(pf, &rq->perout, on);
+ if (!err) {
+ *cached = rq->perout;
+ } else {
+ cached->period.sec = 0;
+ cached->period.nsec = 0;
+ }
+ return err;
}
case PTP_CLK_REQ_EXTTS:
{
- struct ice_extts_channel extts_cfg = {};
-
- extts_cfg.flags = rq->extts.flags;
- extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX;
- extts_cfg.ena = !!on;
+ struct ptp_extts_request *cached =
+ &pf->ptp.extts_rqs[rq->extts.index];
- return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true);
+ err = ice_ptp_cfg_extts(pf, &rq->extts, on);
+ if (!err)
+ *cached = rq->extts;
+ else
+ cached->flags &= ~PTP_ENABLE_FEATURE;
+ return err;
}
default:
return -EOPNOTSUPP;
@@ -2059,7 +2048,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* For Vernier mode on E82X, we need to recalibrate after new settime.
* Start with marking timestamps as invalid.
*/
- if (hw->ptp.phy_model == ICE_PHY_E82X) {
+ if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
@@ -2071,7 +2060,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
}
/* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
+ ice_ptp_disable_all_perout(pf);
err = ice_ptp_write_init(pf, &ts64);
ice_ptp_unlock(hw);
@@ -2080,10 +2069,10 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_reset_cached_phctime(pf);
/* Reenable periodic outputs */
- ice_ptp_enable_all_clkout(pf);
+ ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (hw->ptp.phy_model == ICE_PHY_E82X)
+ if (ice_get_phy_model(hw) == ICE_PHY_E82X)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2142,12 +2131,12 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
}
/* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
+ ice_ptp_disable_all_perout(pf);
err = ice_ptp_write_adj(pf, delta);
/* Reenable periodic outputs */
- ice_ptp_enable_all_clkout(pf);
+ ice_ptp_enable_all_perout(pf);
ice_ptp_unlock(hw);
@@ -2405,20 +2394,41 @@ u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
}
/**
- * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
+ * ice_ptp_setup_pin_cfg - setup PTP pin_config structure
+ * @pf: Board private structure
+ */
+static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
+{
+ for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
+ const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
+ struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
+ const char *name = NULL;
+
+ if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ name = ice_pin_names[desc->name_idx];
+ else if (desc->name_idx != GPIO_NA)
+ name = ice_pin_names_nvm[desc->name_idx];
+ if (name)
+ strscpy(pin->name, name, sizeof(pin->name));
+
+ pin->index = i;
+ }
+
+ pf->ptp.info.pin_config = pf->ptp.pin_desc;
+}
+
+/**
+ * ice_ptp_disable_pins - Disable PTP pins
* @pf: pointer to the PF structure
- * @info: PTP clock info structure
*
* Disable the OS access to the SMA pins. Called to clear out the OS
- * indications of pin support when we fail to setup the E810-T SMA control
- * register.
+ * indications of pin support when we fail to setup the SMA control register.
*/
-static void
-ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+static void ice_ptp_disable_pins(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
+ struct ptp_clock_info *info = &pf->ptp.info;
- dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
+ dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n");
info->enable = NULL;
info->verify = NULL;
@@ -2428,126 +2438,158 @@ ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
- * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
+ * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM
* @pf: pointer to the PF structure
- * @info: PTP clock info structure
+ * @entries: SDP connection section from NVM
+ * @num_entries: number of valid entries in sdp_entries
+ * @pins: PTP pins array to update
*
- * Finish setting up the SMA pins by allocating pin_config, and setting it up
- * according to the current status of the SMA. On failure, disable all of the
- * extended SMA pin support.
+ * Return: 0 on success, negative error code otherwise.
*/
-static void
-ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
+ unsigned int num_entries,
+ struct ice_ptp_pin_desc *pins)
{
- struct device *dev = ice_pf_to_dev(pf);
- int err;
+ unsigned int n_pins = 0;
+ unsigned int i;
- /* Allocate memory for kernel pins interface */
- info->pin_config = devm_kcalloc(dev, info->n_pins,
- sizeof(*info->pin_config), GFP_KERNEL);
- if (!info->pin_config) {
- ice_ptp_disable_sma_pins_e810t(pf, info);
- return;
- }
+ /* Setup ice_pin_desc array */
+ for (i = 0; i < ICE_N_PINS_MAX; i++) {
+ pins[i].name_idx = -1;
+ pins[i].gpio[0] = -1;
+ pins[i].gpio[1] = -1;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ u16 entry = le16_to_cpu(entries[i]);
+ DECLARE_BITMAP(bitmap, GPIO_NA);
+ unsigned int bitmap_idx;
+ bool dir;
+ u16 gpio;
+
+ *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
+ dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
+ gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
+ for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) {
+ unsigned int idx;
+
+ /* Check if entry's pin bit is valid */
+ if (bitmap_idx >= NUM_PTP_PINS_NVM &&
+ bitmap_idx != GPIO_NA)
+ continue;
- /* Read current SMA status */
- err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
- if (err)
- ice_ptp_disable_sma_pins_e810t(pf, info);
-}
+ /* Check if pin already exists */
+ for (idx = 0; idx < ICE_N_PINS_MAX; idx++)
+ if (pins[idx].name_idx == bitmap_idx)
+ break;
+
+ if (idx == ICE_N_PINS_MAX) {
+ /* Pin not found, setup its entry and name */
+ idx = n_pins++;
+ pins[idx].name_idx = bitmap_idx;
+ if (bitmap_idx == GPIO_NA)
+ strscpy(pf->ptp.pin_desc[idx].name,
+ ice_pin_names[gpio],
+ sizeof(pf->ptp.pin_desc[idx]
+ .name));
+ }
-/**
- * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
- * @pf: pointer to the PF instance
- * @info: PTP clock capabilities
- */
-static void
-ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
-{
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- info->n_ext_ts = N_EXT_TS_E810;
- info->n_per_out = N_PER_OUT_E810T;
- info->n_pins = NUM_PTP_PINS_E810T;
- info->verify = ice_verify_pin_e810t;
-
- /* Complete setup of the SMA pins */
- ice_ptp_setup_sma_pins_e810t(pf, info);
- } else if (ice_is_e810t(&pf->hw)) {
- info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
- info->n_per_out = N_PER_OUT_NO_SMA_E810T;
- } else {
- info->n_per_out = N_PER_OUT_E810;
- info->n_ext_ts = N_EXT_TS_E810;
+ /* Setup in/out GPIO number */
+ pins[idx].gpio[dir] = gpio;
+ }
}
-}
-/**
- * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
- * @pf: pointer to the PF instance
- * @info: PTP clock capabilities
- */
-static void
-ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
-{
- info->pps = 1;
- info->n_per_out = 0;
- info->n_ext_ts = 1;
+ for (i = 0; i < n_pins; i++) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n",
+ i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]);
+ }
+
+ pf->ptp.info.n_pins = n_pins;
+ return 0;
}
/**
- * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
+ * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support
* @pf: Board private structure
- * @info: PTP info to fill
*
- * Assign functions to the PTP capabiltiies structure for E82x devices.
+ * Assign functions to the PTP capabilities structure for E82X devices.
* Functions which operate across all device families should be set directly
- * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E82X
* devices.
*/
-static void
-ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
+static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
#ifdef CONFIG_ICE_HWTS
if (boot_cpu_has(X86_FEATURE_ART) &&
boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
+
#endif /* CONFIG_ICE_HWTS */
+ if (ice_is_e825c(&pf->hw)) {
+ pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
+ pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
+ } else {
+ pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
+ pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x);
+ }
+ ice_ptp_setup_pin_cfg(pf);
}
/**
* ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
* @pf: Board private structure
- * @info: PTP info to fill
*
* Assign functions to the PTP capabiltiies structure for E810 devices.
* Functions which operate across all device families should be set directly
- * in ice_ptp_set_caps. Only add functions here which are distinct for e810
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E810
* devices.
*/
-static void
-ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
+static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
{
- info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(pf, info);
-}
+ __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE];
+ struct ice_ptp_pin_desc *desc = NULL;
+ struct ice_ptp *ptp = &pf->ptp;
+ unsigned int num_entries;
+ int err;
-/**
- * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
- * @pf: Board private structure
- * @info: PTP info to fill
- *
- * Assign functions to the PTP capabiltiies structure for E823 devices.
- * Functions which operate across all device families should be set directly
- * in ice_ptp_set_caps. Only add functions here which are distinct for e823
- * devices.
- */
-static void
-ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
-{
- ice_ptp_set_funcs_e82x(pf, info);
+ err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries);
+ if (err) {
+ /* SDP section does not exist in NVM or is corrupted */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ ptp->ice_pin_desc = ice_pin_desc_e810_sma;
+ ptp->info.n_pins =
+ ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma);
+ } else {
+ pf->ptp.ice_pin_desc = ice_pin_desc_e810;
+ pf->ptp.info.n_pins =
+ ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
+ err = 0;
+ }
+ } else {
+ desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
+ sizeof(struct ice_ptp_pin_desc),
+ GFP_KERNEL);
+ if (!desc)
+ goto err;
- info->enable = ice_ptp_gpio_enable_e823;
- ice_ptp_setup_pins_e823(pf, info);
+ err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc);
+ if (err)
+ goto err;
+
+ ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc;
+ }
+
+ ptp->info.pin_config = ptp->pin_desc;
+ ice_ptp_setup_pin_cfg(pf);
+
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ err = ice_ptp_set_sma_cfg(pf);
+err:
+ if (err) {
+ devm_kfree(ice_pf_to_dev(pf), desc);
+ ice_ptp_disable_pins(pf);
+ }
}
/**
@@ -2567,13 +2609,15 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->adjfine = ice_ptp_adjfine;
info->gettimex64 = ice_ptp_gettimex64;
info->settime64 = ice_ptp_settime64;
+ info->n_per_out = GLTSYN_TGT_H_IDX_MAX;
+ info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX;
+ info->enable = ice_ptp_gpio_enable;
+ info->verify = ice_verify_pin;
if (ice_is_e810(&pf->hw))
- ice_ptp_set_funcs_e810(pf, info);
- else if (ice_is_e823(&pf->hw))
- ice_ptp_set_funcs_e823(pf, info);
+ ice_ptp_set_funcs_e810(pf);
else
- ice_ptp_set_funcs_e82x(pf, info);
+ ice_ptp_set_funcs_e82x(pf);
}
/**
@@ -2775,7 +2819,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
/* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
+ ice_ptp_disable_all_perout(pf);
src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
@@ -2813,10 +2857,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
- if (err) {
- ice_ptp_unlock(hw);
- return err;
- }
+ if (err)
+ goto err_unlock;
/* Write the initial Time value to PHY and LAN using the cached PHC
* time before the reset and time difference between stopping and
@@ -2829,10 +2871,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
ts = ktime_to_timespec64(ktime_get_real());
}
err = ice_ptp_write_init(pf, &ts);
- if (err) {
- ice_ptp_unlock(hw);
- return err;
- }
+ if (err)
+ goto err_unlock;
/* Release the global hardware lock */
ice_ptp_unlock(hw);
@@ -2852,10 +2892,14 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
}
/* Re-enable all periodic outputs and external timestamp events */
- ice_ptp_enable_all_clkout(pf);
+ ice_ptp_enable_all_perout(pf);
ice_ptp_enable_all_extts(pf);
return 0;
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
}
/**
@@ -2895,187 +2939,49 @@ err:
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
-/**
- * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
- * @aux_dev: auxiliary device to get the auxiliary PF for
- */
-static struct ice_pf *
-ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
-{
- struct ice_ptp_port *aux_port;
- struct ice_ptp *aux_ptp;
-
- aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
- aux_ptp = container_of(aux_port, struct ice_ptp, port);
-
- return container_of(aux_ptp, struct ice_pf, ptp);
-}
-
-/**
- * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
- * @aux_dev: auxiliary device to get the PF for
- */
-static struct ice_pf *
-ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
+static bool ice_is_primary(struct ice_hw *hw)
{
- struct ice_ptp_port_owner *ports_owner;
- const struct auxiliary_driver *aux_drv;
- struct ice_ptp *owner_ptp;
-
- if (!aux_dev->dev.driver)
- return NULL;
-
- aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
- ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
- aux_driver);
- owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
- return container_of(owner_ptp, struct ice_pf, ptp);
+ return ice_is_e825c(hw) && ice_is_dual(hw) ?
+ !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
}
-/**
- * ice_ptp_auxbus_probe - Probe auxiliary devices
- * @aux_dev: PF's auxiliary device
- * @id: Auxiliary device ID
- */
-static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
- const struct auxiliary_device_id *id)
+static int ice_ptp_setup_adapter(struct ice_pf *pf)
{
- struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
- struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
-
- if (WARN_ON(!owner_pf))
- return -ENODEV;
+ if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
+ return -EPERM;
- INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
- mutex_lock(&owner_pf->ptp.ports_owner.lock);
- list_add(&aux_pf->ptp.port.list_member,
- &owner_pf->ptp.ports_owner.ports);
- mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+ pf->adapter->ctrl_pf = pf;
return 0;
}
-/**
- * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
- * @aux_dev: PF's auxiliary device
- */
-static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
+static int ice_ptp_setup_pf(struct ice_pf *pf)
{
- struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
- struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+ struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
+ struct ice_ptp *ptp = &pf->ptp;
- mutex_lock(&owner_pf->ptp.ports_owner.lock);
- list_del(&aux_pf->ptp.port.list_member);
- mutex_unlock(&owner_pf->ptp.ports_owner.lock);
-}
+ if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
+ return -ENODEV;
-/**
- * ice_ptp_auxbus_shutdown
- * @aux_dev: PF's auxiliary device
- */
-static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
-{
- /* Doing nothing here, but handle to auxbus driver must be satisfied */
-}
+ INIT_LIST_HEAD(&ptp->port.list_node);
+ mutex_lock(&pf->adapter->ports.lock);
-/**
- * ice_ptp_auxbus_suspend
- * @aux_dev: PF's auxiliary device
- * @state: power management state indicator
- */
-static int
-ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
-{
- /* Doing nothing here, but handle to auxbus driver must be satisfied */
- return 0;
-}
+ list_add(&ptp->port.list_node,
+ &pf->adapter->ports.ports);
+ mutex_unlock(&pf->adapter->ports.lock);
-/**
- * ice_ptp_auxbus_resume
- * @aux_dev: PF's auxiliary device
- */
-static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
-{
- /* Doing nothing here, but handle to auxbus driver must be satisfied */
return 0;
}
-/**
- * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
- * @pf: Board private structure
- * @name: auxiliary bus driver name
- */
-static struct auxiliary_device_id *
-ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
-{
- struct auxiliary_device_id *ids;
-
- /* Second id left empty to terminate the array */
- ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
- sizeof(struct auxiliary_device_id), GFP_KERNEL);
- if (!ids)
- return NULL;
-
- snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
-
- return ids;
-}
-
-/**
- * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
- * @pf: Board private structure
- */
-static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
+static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
- struct auxiliary_driver *aux_driver;
- struct ice_ptp *ptp;
- struct device *dev;
- char *name;
- int err;
-
- ptp = &pf->ptp;
- dev = ice_pf_to_dev(pf);
- aux_driver = &ptp->ports_owner.aux_driver;
- INIT_LIST_HEAD(&ptp->ports_owner.ports);
- mutex_init(&ptp->ports_owner.lock);
- name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
- pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
- ice_get_ptp_src_clock_index(&pf->hw));
- if (!name)
- return -ENOMEM;
-
- aux_driver->name = name;
- aux_driver->shutdown = ice_ptp_auxbus_shutdown;
- aux_driver->suspend = ice_ptp_auxbus_suspend;
- aux_driver->remove = ice_ptp_auxbus_remove;
- aux_driver->resume = ice_ptp_auxbus_resume;
- aux_driver->probe = ice_ptp_auxbus_probe;
- aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
- if (!aux_driver->id_table)
- return -ENOMEM;
+ struct ice_ptp *ptp = &pf->ptp;
- err = auxiliary_driver_register(aux_driver);
- if (err) {
- devm_kfree(dev, aux_driver->id_table);
- dev_err(dev, "Failed registering aux_driver, name <%s>\n",
- name);
+ if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
+ mutex_lock(&pf->adapter->ports.lock);
+ list_del(&ptp->port.list_node);
+ mutex_unlock(&pf->adapter->ports.lock);
}
-
- return err;
-}
-
-/**
- * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
- * @pf: Board private structure
- */
-static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
-{
- struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
-
- auxiliary_driver_unregister(aux_driver);
- devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
-
- mutex_destroy(&pf->ptp.ports_owner.lock);
}
/**
@@ -3087,15 +2993,12 @@ static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
*/
int ice_ptp_clock_index(struct ice_pf *pf)
{
- struct auxiliary_device *aux_dev;
- struct ice_pf *owner_pf;
+ struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ptp_clock *clock;
- aux_dev = &pf->ptp.port.aux_dev;
- owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
- if (!owner_pf)
+ if (!ctrl_ptp)
return -1;
- clock = owner_pf->ptp.clock;
+ clock = ctrl_ptp->clock;
return clock ? ptp_clock_index(clock) : -1;
}
@@ -3129,18 +3032,14 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
- if (err) {
- ice_ptp_unlock(hw);
- goto err_exit;
- }
+ if (err)
+ goto err_unlock;
ts = ktime_to_timespec64(ktime_get_real());
/* Write the initial Time value to PHY and LAN */
err = ice_ptp_write_init(pf, &ts);
- if (err) {
- ice_ptp_unlock(hw);
- goto err_exit;
- }
+ if (err)
+ goto err_unlock;
/* Release the global hardware lock */
ice_ptp_unlock(hw);
@@ -3155,19 +3054,15 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
if (err)
goto err_clk;
- err = ice_ptp_register_auxbus_driver(pf);
- if (err) {
- dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
- goto err_aux;
- }
-
return 0;
-err_aux:
- ptp_clock_unregister(pf->ptp.clock);
err_clk:
pf->ptp.clock = NULL;
err_exit:
return err;
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
}
/**
@@ -3209,7 +3104,7 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
ptp_port->port_num);
@@ -3227,76 +3122,6 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
}
/**
- * ice_ptp_release_auxbus_device
- * @dev: device that utilizes the auxbus
- */
-static void ice_ptp_release_auxbus_device(struct device *dev)
-{
- /* Doing nothing here, but handle to auxbux device must be satisfied */
-}
-
-/**
- * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
- * @pf: Board private structure
- */
-static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
-{
- struct auxiliary_device *aux_dev;
- struct ice_ptp *ptp;
- struct device *dev;
- char *name;
- int err;
- u32 id;
-
- ptp = &pf->ptp;
- id = ptp->port.port_num;
- dev = ice_pf_to_dev(pf);
-
- aux_dev = &ptp->port.aux_dev;
-
- name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
- pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
- ice_get_ptp_src_clock_index(&pf->hw));
- if (!name)
- return -ENOMEM;
-
- aux_dev->name = name;
- aux_dev->id = id;
- aux_dev->dev.release = ice_ptp_release_auxbus_device;
- aux_dev->dev.parent = dev;
-
- err = auxiliary_device_init(aux_dev);
- if (err)
- goto aux_err;
-
- err = auxiliary_device_add(aux_dev);
- if (err) {
- auxiliary_device_uninit(aux_dev);
- goto aux_err;
- }
-
- return 0;
-aux_err:
- dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
- devm_kfree(dev, name);
- return err;
-}
-
-/**
- * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
- * @pf: Board private structure
- */
-static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
-{
- struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
-
- auxiliary_device_delete(aux_dev);
- auxiliary_device_uninit(aux_dev);
-
- memset(aux_dev, 0, sizeof(*aux_dev));
-}
-
-/**
* ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
* @pf: Board private structure
*
@@ -3307,7 +3132,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (pf->hw.ptp.phy_model) {
+ switch (ice_get_phy_model(&pf->hw)) {
case ICE_PHY_E82X:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
@@ -3350,19 +3175,26 @@ void ice_ptp_init(struct ice_pf *pf)
/* If this function owns the clock hardware, it must allocate and
* configure the PTP clock device to represent it.
*/
- if (ice_pf_src_tmr_owned(pf)) {
+ if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) {
+ err = ice_ptp_setup_adapter(pf);
+ if (err)
+ goto err_exit;
err = ice_ptp_init_owner(pf);
if (err)
- goto err;
+ goto err_exit;
}
+ err = ice_ptp_setup_pf(pf);
+ if (err)
+ goto err_exit;
+
ptp->port.port_num = hw->pf_id;
if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
ptp->port.port_num = hw->pf_id * 2;
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
- goto err;
+ goto err_exit;
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
@@ -3370,20 +3202,16 @@ void ice_ptp_init(struct ice_pf *pf)
/* Configure initial Tx interrupt settings */
ice_ptp_cfg_tx_interrupt(pf);
- err = ice_ptp_create_auxbus_device(pf);
- if (err)
- goto err;
-
ptp->state = ICE_PTP_READY;
err = ice_ptp_init_work(pf, ptp);
if (err)
- goto err;
+ goto err_exit;
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
-err:
+err_exit:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
ptp_clock_unregister(ptp->clock);
@@ -3410,7 +3238,7 @@ void ice_ptp_release(struct ice_pf *pf)
/* Disable timestamping for both Tx and Rx */
ice_ptp_disable_timestamp_mode(pf);
- ice_ptp_remove_auxbus_device(pf);
+ ice_ptp_cleanup_pf(pf);
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
@@ -3425,14 +3253,11 @@ void ice_ptp_release(struct ice_pf *pf)
pf->ptp.kworker = NULL;
}
- if (ice_pf_src_tmr_owned(pf))
- ice_ptp_unregister_auxbus_driver(pf);
-
if (!pf->ptp.clock)
return;
/* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
+ ice_ptp_disable_all_perout(pf);
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 2db2257a0fb2..824e73b677a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -9,37 +9,6 @@
#include "ice_ptp_hw.h"
-enum ice_ptp_pin_e810 {
- GPIO_20 = 0,
- GPIO_21,
- GPIO_22,
- GPIO_23,
- NUM_PTP_PIN_E810
-};
-
-enum ice_ptp_pin_e810t {
- GNSS = 0,
- SMA1,
- UFL1,
- SMA2,
- UFL2,
- NUM_PTP_PINS_E810T
-};
-
-struct ice_perout_channel {
- bool ena;
- u32 gpio_pin;
- u32 flags;
- u64 period;
- u64 start_time;
-};
-
-struct ice_extts_channel {
- bool ena;
- u32 gpio_pin;
- u32 flags;
-};
-
/* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
* is stored in a buffer of registers. Depending on the specific hardware,
* this buffer might be shared across multiple PHY ports.
@@ -169,9 +138,8 @@ struct ice_ptp_tx {
* ready for PTP functionality. It is used to track the port initialization
* and determine when the port's PHY offset is valid.
*
- * @list_member: list member structure of auxiliary device
+ * @list_node: list member structure
* @tx: Tx timestamp tracking for this port
- * @aux_dev: auxiliary device associated with this port
* @ov_work: delayed work task for tracking when PHY offset is valid
* @ps_lock: mutex used to protect the overall PTP PHY start procedure
* @link_up: indicates whether the link is up
@@ -179,9 +147,8 @@ struct ice_ptp_tx {
* @port_num: the port number this structure represents
*/
struct ice_ptp_port {
- struct list_head list_member;
+ struct list_head list_node;
struct ice_ptp_tx tx;
- struct auxiliary_device aux_dev;
struct kthread_delayed_work ov_work;
struct mutex ps_lock; /* protects overall PTP PHY start procedure */
bool link_up;
@@ -195,22 +162,6 @@ enum ice_ptp_tx_interrupt {
ICE_PTP_TX_INTERRUPT_ALL,
};
-/**
- * struct ice_ptp_port_owner - data used to handle the PTP clock owner info
- *
- * This structure contains data necessary for the PTP clock owner to correctly
- * handle the timestamping feature for all attached ports.
- *
- * @aux_driver: the structure carring the auxiliary driver information
- * @ports: list of porst handled by this port owner
- * @lock: protect access to ports list
- */
-struct ice_ptp_port_owner {
- struct auxiliary_driver aux_driver;
- struct list_head ports;
- struct mutex lock;
-};
-
#define GLTSYN_TGT_H_IDX_MAX 4
enum ice_ptp_state {
@@ -221,20 +172,69 @@ enum ice_ptp_state {
ICE_PTP_ERROR,
};
+enum ice_ptp_pin {
+ SDP0 = 0,
+ SDP1,
+ SDP2,
+ SDP3,
+ TIME_SYNC,
+ ONE_PPS
+};
+
+enum ice_ptp_pin_nvm {
+ GNSS = 0,
+ SMA1,
+ UFL1,
+ SMA2,
+ UFL2,
+ NUM_PTP_PINS_NVM,
+ GPIO_NA = 9
+};
+
+/* Per-channel register definitions */
+#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
+#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
+#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8))
+#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_L(_chan, _idx) (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H(_chan, _idx) (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16))
+#define GLTSYN_EVNT_H_IDX_MAX 3
+
+/* Pin definitions for PTP */
+#define ICE_N_PINS_MAX 6
+#define ICE_SMA_PINS_NUM 4
+#define ICE_PIN_DESC_ARR_LEN(_arr) (sizeof(_arr) / \
+ sizeof(struct ice_ptp_pin_desc))
+
+/**
+ * struct ice_ptp_pin_desc - hardware pin description data
+ * @name_idx: index of the name of pin in ice_pin_names
+ * @gpio: the associated GPIO input and output pins
+ *
+ * Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array
+ * for the device. Device families have separate sets of available pins with
+ * varying restrictions.
+ */
+struct ice_ptp_pin_desc {
+ int name_idx;
+ int gpio[2];
+};
+
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
* @state: current state of PTP state machine
* @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
- * @ports_owner: data for the auxiliary driver owner
* @work: delayed work function for periodic tasks
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
* @cached_phc_jiffies: jiffies when cached_phc_time was last updated
- * @ext_ts_chan: the external timestamp channel in use
- * @ext_ts_irq: the external timestamp IRQ in use
* @kworker: kwork thread for handling periodic work
- * @perout_channels: periodic output data
- * @extts_channels: channels for external timestamps
+ * @ext_ts_irq: the external timestamp IRQ in use
+ * @pin_desc: structure defining pins
+ * @ice_pin_desc: internal structure describing pin relations
+ * @perout_rqs: cached periodic output requests
+ * @extts_rqs: cached external timestamp requests
* @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
@@ -250,15 +250,15 @@ struct ice_ptp {
enum ice_ptp_state state;
enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
- struct ice_ptp_port_owner ports_owner;
struct kthread_delayed_work work;
u64 cached_phc_time;
unsigned long cached_phc_jiffies;
- u8 ext_ts_chan;
- u8 ext_ts_irq;
struct kthread_worker *kworker;
- struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
- struct ice_extts_channel extts_channels[GLTSYN_TGT_H_IDX_MAX];
+ u8 ext_ts_irq;
+ struct ptp_pin_desc pin_desc[ICE_N_PINS_MAX];
+ const struct ice_ptp_pin_desc *ice_pin_desc;
+ struct ptp_perout_request perout_rqs[GLTSYN_TGT_H_IDX_MAX];
+ struct ptp_extts_request extts_rqs[GLTSYN_EVNT_H_IDX_MAX];
struct ptp_clock_info info;
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
@@ -289,27 +289,6 @@ struct ice_ptp {
#define FIFO_EMPTY BIT(2)
#define FIFO_OK 0xFF
#define ICE_PTP_FIFO_NUM_CHECKS 5
-/* Per-channel register definitions */
-#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
-#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
-#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8))
-#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16))
-#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16))
-#define GLTSYN_EVNT_L(_chan, _idx) (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16))
-#define GLTSYN_EVNT_H(_chan, _idx) (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16))
-#define GLTSYN_EVNT_H_IDX_MAX 3
-
-/* Pin definitions for PTP PPS out */
-#define PPS_CLK_GEN_CHAN 3
-#define PPS_CLK_SRC_CHAN 2
-#define PPS_PIN_INDEX 5
-#define TIME_SYNC_PIN_INDEX 4
-#define N_EXT_TS_E810 3
-#define N_PER_OUT_E810 4
-#define N_PER_OUT_E810T 3
-#define N_PER_OUT_NO_SMA_E810T 2
-#define N_EXT_TS_NO_SMA_E810T 2
-#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
int ice_ptp_clock_index(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index e6980b94a6c1..585ce200c60f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -334,7 +334,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
* reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ec8db830ac73..dfd49732bd5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -659,6 +659,29 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
return 0;
}
+#define ICE_ONE_PPS_OUT_AMP_MAX 3
+
+/**
+ * ice_cgu_cfg_pps_out - Configure 1PPS output from CGU
+ * @hw: pointer to the HW struct
+ * @enable: true to enable 1PPS output, false to disable it
+ *
+ * Return: 0 on success, other negative error code when CGU read/write failed
+ */
+int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable)
+{
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ dw9.one_pps_out_en = enable;
+ dw9.one_pps_out_amp = enable * ICE_ONE_PPS_OUT_AMP_MAX;
+ return ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
+}
+
/**
* ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits
* @hw: pointer to the HW struct
@@ -804,7 +827,7 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
/* Certain hardware families share the same register values for the
* port register and source timer register.
*/
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_E810:
return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
default:
@@ -5148,9 +5171,9 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
return 0;
}
-/* E810T SMA functions
+/* E810 SMA functions
*
- * The following functions operate specifically on E810T hardware and are used
+ * The following functions operate specifically on E810 hardware and are used
* to access the extended GPIOs available.
*/
@@ -5217,14 +5240,14 @@ ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
}
/**
- * ice_read_sma_ctrl_e810t
+ * ice_read_sma_ctrl
* @hw: pointer to the hw struct
* @data: pointer to data to be read from the GPIO controller
*
* Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
* PCA9575 expander, so only bits 3-7 in data are valid.
*/
-int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data)
{
int status;
u16 handle;
@@ -5236,7 +5259,7 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
*data = 0;
- for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) {
bool pin;
status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
@@ -5250,14 +5273,14 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
}
/**
- * ice_write_sma_ctrl_e810t
+ * ice_write_sma_ctrl
* @hw: pointer to the hw struct
* @data: data to be written to the GPIO controller
*
* Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
* of the PCA9575 expander, so only bits 3-7 in data are valid.
*/
-int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
{
int status;
u16 handle;
@@ -5267,7 +5290,7 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
if (status)
return status;
- for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) {
bool pin;
pin = !(data & (1 << i));
@@ -5281,14 +5304,14 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
}
/**
- * ice_read_pca9575_reg_e810t
+ * ice_read_pca9575_reg
* @hw: pointer to the hw struct
* @offset: GPIO controller register offset
* @data: pointer to data to be read from the GPIO controller
*
* Read the register from the GPIO controller
*/
-int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
{
struct ice_aqc_link_topo_addr link_topo;
__le16 addr;
@@ -5312,6 +5335,66 @@ int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
}
/**
+ * ice_ptp_read_sdp_ac - read SDP available connections section from NVM
+ * @hw: pointer to the HW struct
+ * @entries: returns the SDP available connections section from NVM
+ * @num_entries: returns the number of valid entries
+ *
+ * Return: 0 on success, negative error code if NVM read failed or section does
+ * not exist or is corrupted
+ */
+int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries)
+{
+ __le16 data;
+ u32 offset;
+ int err;
+
+ err = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (err)
+ goto exit;
+
+ /* Read the offset of SDP_AC */
+ offset = ICE_AQC_NVM_SDP_AC_PTR_OFFSET;
+ err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true,
+ NULL);
+ if (err)
+ goto exit;
+
+ /* Check if section exist */
+ offset = FIELD_GET(ICE_AQC_NVM_SDP_AC_PTR_M, le16_to_cpu(data));
+ if (offset == ICE_AQC_NVM_SDP_AC_PTR_INVAL) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (offset & ICE_AQC_NVM_SDP_AC_PTR_TYPE_M) {
+ offset &= ICE_AQC_NVM_SDP_AC_PTR_M;
+ offset *= ICE_AQC_NVM_SECTOR_UNIT;
+ } else {
+ offset *= sizeof(data);
+ }
+
+ /* Skip reading section length and read the number of valid entries */
+ offset += sizeof(data);
+ err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true,
+ NULL);
+ if (err)
+ goto exit;
+ *num_entries = le16_to_cpu(data);
+
+ /* Read SDP configuration section */
+ offset += sizeof(data);
+ err = ice_aq_read_nvm(hw, 0, offset, *num_entries * sizeof(data),
+ entries, false, true, NULL);
+
+exit:
+ if (err)
+ dev_dbg(ice_hw_to_dev(hw), "Failed to configure SDP connection section\n");
+ ice_release_nvm(hw);
+ return err;
+}
+
+/**
* ice_ptp_init_phy_e810 - initialize PHY parameters
* @ptp: pointer to the PTP HW struct
*/
@@ -5417,7 +5500,7 @@ void ice_ptp_init_hw(struct ice_hw *hw)
static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
case ICE_PHY_E82X:
@@ -5482,7 +5565,7 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
u32 port;
/* PHY models which can program all ports simultaneously */
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_E810:
return ice_ptp_port_cmd_e810(hw, cmd);
default:
@@ -5561,7 +5644,7 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
err = ice_ptp_prep_phy_time_eth56g(hw,
(u32)(time & 0xFFFFFFFF));
@@ -5607,7 +5690,7 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
break;
@@ -5676,7 +5759,7 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
break;
@@ -5709,7 +5792,7 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
case ICE_PHY_E810:
@@ -5739,7 +5822,7 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
case ICE_PHY_E810:
@@ -5802,7 +5885,7 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
ice_ptp_reset_ts_memory_eth56g(hw);
break;
@@ -5831,7 +5914,7 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
return ice_ptp_init_phc_eth56g(hw);
case ICE_PHY_E810:
@@ -5856,7 +5939,7 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (hw->ptp.phy_model) {
+ switch (ice_get_phy_model(hw)) {
case ICE_PHY_ETH56G:
return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
tstamp_ready);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 6cedc1a906af..47af7c5c79b8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -316,7 +316,7 @@ ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES];
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ];
/* Table of constants for Vernier calibration on E822 */
extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
@@ -326,10 +326,12 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
-#define E810_OUT_PROP_DELAY_NS 1
+#define ICE_E810_OUT_PROP_DELAY_NS 1
+#define ICE_E825C_OUT_PROP_DELAY_NS 11
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
+int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable);
bool ice_ptp_lock(struct ice_hw *hw);
void ice_ptp_unlock(struct ice_hw *hw);
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd);
@@ -358,7 +360,7 @@ void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw)
+static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
@@ -379,17 +381,17 @@ ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
{
- return e822_time_ref[time_ref].pll_freq;
+ return e82x_time_ref[time_ref].pll_freq;
}
static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
{
- return e822_time_ref[time_ref].nominal_incval;
+ return e82x_time_ref[time_ref].nominal_incval;
}
static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
{
- return e822_time_ref[time_ref].pps_delay;
+ return e82x_time_ref[time_ref].pps_delay;
}
/* E822 Vernier calibration functions */
@@ -400,10 +402,10 @@ int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port);
int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
-int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
-int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
-int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
-bool ice_is_pca9575_present(struct ice_hw *hw);
+int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
+int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
+int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
struct dpll_pin_frequency *
@@ -421,8 +423,6 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status);
int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port);
int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold);
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
@@ -432,6 +432,20 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
+static inline u64 ice_prop_delay(const struct ice_hw *hw)
+{
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ICE_E825C_OUT_PROP_DELAY_NS;
+ case ICE_PHY_E810:
+ return ICE_E810_OUT_PROP_DELAY_NS;
+ case ICE_PHY_E82X:
+ return ice_e82x_pps_delay(ice_e82x_time_ref(hw));
+ default:
+ return 0;
+ }
+}
+
/**
* ice_get_base_incval - Get base clock increment value
* @hw: pointer to the HW struct
@@ -452,6 +466,11 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
}
}
+static inline bool ice_is_dual(struct ice_hw *hw)
+{
+ return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
+}
+
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
@@ -689,30 +708,27 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
#define LOW_TX_MEMORY_BANK_START 0x03090000
#define HIGH_TX_MEMORY_BANK_START 0x03090004
-/* E810T SMA controller pin control */
-#define ICE_SMA1_DIR_EN_E810T BIT(4)
-#define ICE_SMA1_TX_EN_E810T BIT(5)
-#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3)
-#define ICE_SMA2_DIR_EN_E810T BIT(6)
-#define ICE_SMA2_TX_EN_E810T BIT(7)
-
-#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \
- ICE_SMA1_TX_EN_E810T)
-#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \
- ICE_SMA2_DIR_EN_E810T | \
- ICE_SMA2_TX_EN_E810T)
-#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \
- ICE_SMA2_MASK_E810T)
-
-#define ICE_SMA_MIN_BIT_E810T 3
-#define ICE_SMA_MAX_BIT_E810T 7
+/* SMA controller pin control */
+#define ICE_SMA1_DIR_EN BIT(4)
+#define ICE_SMA1_TX_EN BIT(5)
+#define ICE_SMA2_UFL2_RX_DIS BIT(3)
+#define ICE_SMA2_DIR_EN BIT(6)
+#define ICE_SMA2_TX_EN BIT(7)
+
+#define ICE_SMA1_MASK (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)
+#define ICE_SMA2_MASK (ICE_SMA2_UFL2_RX_DIS | ICE_SMA2_DIR_EN | \
+ ICE_SMA2_TX_EN)
+#define ICE_ALL_SMA_MASK (ICE_SMA1_MASK | ICE_SMA2_MASK)
+
+#define ICE_SMA_MIN_BIT 3
+#define ICE_SMA_MAX_BIT 7
#define ICE_PCA9575_P1_OFFSET 8
-/* E810T PCA9575 IO controller registers */
+/* PCA9575 IO controller registers */
#define ICE_PCA9575_P0_IN 0x0
-/* E810T PCA9575 IO controller pin control */
-#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
+/* PCA9575 IO controller pin control */
+#define ICE_P0_GNSS_PRSNT_N BIT(4)
/* ETH56G PHY register addresses */
/* Timestamp PHY incval registers */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 91cb393f616f..b83f99c01d91 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -194,7 +194,8 @@ void ice_free_vfs(struct ice_pf *pf)
}
/* clear malicious info since the VF is getting released */
- list_del(&vf->mbx_info.list_entry);
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ list_del(&vf->mbx_info.list_entry);
mutex_unlock(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 8208055d6e7f..5d2d7736fd5f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1352,14 +1352,14 @@ static void ice_net_dim(struct ice_q_vector *q_vector)
struct dim_sample dim_sample;
__ice_update_sample(q_vector, tx, &dim_sample, true);
- net_dim(&tx->dim, dim_sample);
+ net_dim(&tx->dim, &dim_sample);
}
if (ITR_IS_DYNAMIC(rx)) {
struct dim_sample dim_sample;
__ice_update_sample(q_vector, rx, &dim_sample, false);
- net_dim(&rx->dim, dim_sample);
+ net_dim(&rx->dim, &dim_sample);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index feba314a3fe4..cb347c852ba9 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -359,8 +359,9 @@ struct ice_rx_ring {
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
u32 nr_frags;
- dma_addr_t dma; /* physical address of ring */
+ u16 max_frame;
u16 rx_buf_len;
+ dma_addr_t dma; /* physical address of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
@@ -406,6 +407,7 @@ struct ice_tx_ring {
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
+ u16 quanta_prof_id;
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index afcead4baef4..79f960c6680d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -154,7 +154,6 @@ static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
}
void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
bool frame);
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 45768796691f..adb168860711 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -905,6 +905,7 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
+ u8 logical_pf_id;
u16 max_burst_size; /* driver sets this value */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 8c434689e3f7..c7c0c2f50c26 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -717,6 +717,23 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
}
/**
+ * ice_reset_vf_mbx_cnt - reset VF mailbox message count
+ * @vf: pointer to the VF structure
+ *
+ * This function clears the VF mailbox message count, and should be called on
+ * VF reset.
+ */
+static void ice_reset_vf_mbx_cnt(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
+ else
+ ice_mbx_clear_malvf(&vf->mbx_info);
+}
+
+/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
*
@@ -742,7 +759,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
/* clear all malicious info if the VFs are getting reset */
ice_for_each_vf(pf, bkt, vf)
- ice_mbx_clear_malvf(&vf->mbx_info);
+ ice_reset_vf_mbx_cnt(vf);
/* If VFs have been disabled, there is no need to reset */
if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
@@ -958,7 +975,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_eswitch_update_repr(&vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
- ice_mbx_clear_malvf(&vf->mbx_info);
+ ice_reset_vf_mbx_cnt(vf);
out_unlock:
if (lag && lag->bonded && lag->primary &&
@@ -1011,7 +1028,10 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
ice_vf_fdir_init(vf);
/* Initialize mailbox info for this VF */
- ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
+ if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id);
+ else
+ ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
mutex_init(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index be4266899690..4261fe1c2bcd 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -59,6 +59,13 @@ struct ice_fdir_prof_info {
u64 fdir_active_cnt;
};
+struct ice_vf_qs_bw {
+ u32 committed;
+ u32 peak;
+ u16 queue_id;
+ u8 tc;
+};
+
/* VF operations */
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
@@ -140,6 +147,7 @@ struct ice_vf {
struct devlink_port devlink_port;
u16 num_msix; /* num of MSI-X configured on this VF */
+ struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
};
/* Flags for controlling behavior of ice_reset_vf */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index 40cb4ba0789c..75c8113e58ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -211,6 +211,38 @@ ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
}
/**
+ * ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter
+ * @hw: pointer to the HW struct
+ * @event: pointer to the control queue receive event
+ *
+ * This function triggers to decrement the counter
+ * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes
+ * the buffers at the PF mailbox queue.
+ */
+void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
+ const struct ice_rq_event_info *event)
+{
+ u16 vfid = le16_to_cpu(event->desc.retval);
+
+ wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1);
+}
+
+/**
+ * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count
+ * @hw: pointer to the HW struct
+ * @vf_id: VF ID in the PF space
+ *
+ * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should
+ * be called when a VF is created and on VF reset.
+ */
+void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id)
+{
+ u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id));
+
+ wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg);
+}
+
+/**
* ice_mbx_vf_state_handler - Handle states of the overflow algorithm
* @hw: pointer to the HW struct
* @mbx_data: pointer to structure containing mailbox data
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
index 44bc030d17e0..684de89e5c5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
@@ -19,6 +19,9 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
+ const struct ice_rq_event_info *event);
+void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id);
int
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
struct ice_mbx_vf_info *vf_info, bool *report_malvf);
@@ -47,5 +50,11 @@ static inline void ice_mbx_init_snapshot(struct ice_hw *hw)
{
}
+static inline void
+ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw,
+ const struct ice_rq_event_info *event)
+{
+}
+
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VF_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 59f62306b9cb..f445e33b2028 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -495,6 +495,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS;
+
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
@@ -1035,6 +1038,191 @@ error_param:
}
/**
+ * ice_vc_get_qos_caps - Get current QoS caps from PF
+ * @vf: pointer to the VF info
+ *
+ * Get VF's QoS capabilities, such as TC number, arbiter and
+ * bandwidth from PF.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vc_get_qos_caps(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_qos_cap_list *cap_list = NULL;
+ u8 tc_prio[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct virtchnl_qos_cap_elem *cfg = NULL;
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_pf *pf = vf->pf;
+ struct ice_port_info *pi;
+ struct ice_vsi *vsi;
+ u8 numtc, tc;
+ u16 len = 0;
+ int ret, i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ pi = pf->hw.port_info;
+ numtc = vsi->tc_cfg.numtc;
+
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vf->lan_vsi_idx);
+ if (!vsi_ctx) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = struct_size(cap_list, cap, numtc);
+ cap_list = kzalloc(len, GFP_KERNEL);
+ if (!cap_list) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ cap_list->vsi_id = vsi->vsi_num;
+ cap_list->num_elem = numtc;
+
+ /* Store the UP2TC configuration from DCB to a user priority bitmap
+ * of each TC. Each element of prio_of_tc represents one TC. Each
+ * bitmap indicates the user priorities belong to this TC.
+ */
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ tc = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[i];
+ tc_prio[tc] |= BIT(i);
+ }
+
+ for (i = 0; i < numtc; i++) {
+ cfg = &cap_list->cap[i];
+ cfg->tc_num = i;
+ cfg->tc_prio = tc_prio[i];
+ cfg->arbiter = pi->qos_cfg.local_dcbx_cfg.etscfg.tsatable[i];
+ cfg->weight = VIRTCHNL_STRICT_WEIGHT;
+ cfg->type = VIRTCHNL_BW_SHAPER;
+ cfg->shaper.committed = vsi_ctx->sched.bw_t_info[i].cir_bw.bw;
+ cfg->shaper.peak = vsi_ctx->sched.bw_t_info[i].eir_bw.bw;
+ }
+
+err:
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_QOS_CAPS, v_ret,
+ (u8 *)cap_list, len);
+ kfree(cap_list);
+ return ret;
+}
+
+/**
+ * ice_vf_cfg_qs_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @num_queues: number of queues to be configured
+ *
+ * Configure per queue bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ int ret;
+ u16 i;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ for (i = 0; i < num_queues; i++) {
+ u32 p_rate, min_rate;
+ u8 tc;
+
+ p_rate = vf->qs_bw[i].peak;
+ min_rate = vf->qs_bw[i].committed;
+ tc = vf->qs_bw[i].tc;
+ if (p_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW, p_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW);
+ if (ret)
+ return ret;
+
+ if (min_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW, min_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_cfg_q_quanta_profile - Configure quanta profile
+ * @vf: pointer to the VF info
+ * @quanta_prof_idx: pointer to the quanta profile index
+ * @quanta_size: quanta size to be set
+ *
+ * This function chooses available quanta profile and configures the register.
+ * The quanta profile is evenly divided by the number of device ports, and then
+ * available to the specific PF and VFs. The first profile for each PF is a
+ * reserved default profile. Only quanta size of the rest unused profile can be
+ * modified.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
+ u16 *quanta_prof_idx)
+{
+ const u16 n_desc = calc_quanta_desc(quanta_size);
+ struct ice_hw *hw = &vf->pf->hw;
+ const u16 n_cmd = 2 * n_desc;
+ struct ice_pf *pf = vf->pf;
+ u16 per_pf, begin_id;
+ u8 n_used;
+ u32 reg;
+
+ begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
+ hw->logical_pf_id;
+
+ if (quanta_size == ICE_DFLT_QUANTA) {
+ *quanta_prof_idx = begin_id;
+ } else {
+ per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
+ hw->dev_caps.num_funcs;
+ n_used = pf->num_quanta_prof_used;
+ if (n_used < per_pf) {
+ *quanta_prof_idx = begin_id + 1 + n_used;
+ pf->num_quanta_prof_used++;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
+ wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
+
+ return 0;
+}
+
+/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1636,6 +1824,141 @@ error_param:
}
/**
+ * ice_vc_cfg_q_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queues_bw_cfg *qbw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+ struct ice_vsi *vsi;
+ u16 i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
+ qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate)
+ dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->max_tx_rate);
+ if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate)
+ dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->max_tx_rate);
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
+ vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
+ vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
+ vf->qs_bw[i].tc = qbw->cfg[i].tc;
+ }
+
+ if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_quanta - Configure per queue quanta
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues quanta.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 quanta_prof_id, quanta_size, start_qid, end_qid, i;
+ struct virtchnl_quanta_cfg *qquanta =
+ (struct virtchnl_quanta_cfg *)msg;
+ struct ice_vsi *vsi;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ end_qid = qquanta->queue_select.start_queue_id +
+ qquanta->queue_select.num_queues;
+ if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
+ end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ quanta_size = qquanta->quanta_size;
+ if (quanta_size > ICE_MAX_QUANTA_SIZE ||
+ quanta_size < ICE_MIN_QUANTA_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (quanta_size % 64) {
+ dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
+ &quanta_prof_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto err;
+ }
+
+ start_qid = qquanta->queue_select.start_queue_id;
+ for (i = start_qid; i < end_qid; i++)
+ vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
+ v_ret, NULL, 0);
+}
+
+/**
* ice_vc_cfg_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1715,8 +2038,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Tx queue info from VF into VSI */
if (qpi->txq.ring_len > 0) {
- vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[i]->count = qpi->txq.ring_len;
+ vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
/* Disable any existing queue first */
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
@@ -1725,7 +2048,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* Configure a queue with the requested settings */
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
- vf->vf_id, i);
+ vf->vf_id, q_idx);
goto error_param;
}
}
@@ -1733,39 +2056,37 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Rx queue info from VF into VSI */
if (qpi->rxq.ring_len > 0) {
u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+ struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
u32 rxdid;
- vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
- vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+ ring->dma = qpi->rxq.dma_ring_addr;
+ ring->count = qpi->rxq.ring_len;
if (qpi->rxq.crc_disable)
- vsi->rx_rings[q_idx]->flags |=
- ICE_RX_FLAGS_CRC_STRIP_DIS;
+ ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
else
- vsi->rx_rings[q_idx]->flags &=
- ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+ ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
if (qpi->rxq.databuffer_size != 0 &&
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024))
goto error_param;
- vsi->rx_buf_len = qpi->rxq.databuffer_size;
- vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
+ ring->rx_buf_len = qpi->rxq.databuffer_size;
if (qpi->rxq.max_pkt_size > max_frame_size ||
qpi->rxq.max_pkt_size < 64)
goto error_param;
- vsi->max_frame = qpi->rxq.max_pkt_size;
+ ring->max_frame = qpi->rxq.max_pkt_size;
/* add space for the port VLAN since the VF driver is
* not expected to account for it in the MTU
* calculation
*/
if (ice_vf_is_port_vlan_ena(vf))
- vsi->max_frame += VLAN_HLEN;
+ ring->max_frame += VLAN_HLEN;
if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
- vf->vf_id, i);
+ vf->vf_id, q_idx);
goto error_param;
}
@@ -2233,17 +2554,27 @@ static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
/**
* ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
+ * @vf: VF to enable VLAN promisc on
* @vsi: VF's VSI used to enable VLAN promiscuous mode
* @vlan: VLAN used to enable VLAN promiscuous
*
* This function should only be called if VLAN promiscuous mode is allowed,
* which can be determined via ice_is_vlan_promisc_allowed().
*/
-static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+static int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct ice_vlan *vlan)
{
- u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ u8 promisc_m = 0;
int status;
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ promisc_m |= ICE_UCAST_VLAN_PROMISC_BITS;
+ if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ promisc_m |= ICE_MCAST_VLAN_PROMISC_BITS;
+
+ if (!promisc_m)
+ return 0;
+
status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
vlan->vid);
if (status && status != -EEXIST)
@@ -2262,7 +2593,7 @@ static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
*/
static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ u8 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS | ICE_MCAST_VLAN_PROMISC_BITS;
int status;
status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
@@ -2417,7 +2748,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
} else if (vlan_promisc) {
- status = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ status = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
@@ -2700,12 +3031,8 @@ err:
static int ice_vc_query_rxdid(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_supported_rxdids *rxdid = NULL;
- struct ice_hw *hw = &vf->pf->hw;
+ struct virtchnl_supported_rxdids rxdid = {};
struct ice_pf *pf = vf->pf;
- int len = 0;
- int ret, i;
- u32 regval;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2717,35 +3044,11 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- len = sizeof(struct virtchnl_supported_rxdids);
- rxdid = kzalloc(len, GFP_KERNEL);
- if (!rxdid) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
-
- /* RXDIDs supported by DDP package can be read from the register
- * to get the supported RXDID bitmap. But the legacy 32byte RXDID
- * is not listed in DDP package, add it in the bitmap manually.
- * Legacy 16byte descriptor is not supported.
- */
- rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1);
-
- for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
- regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
- if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
- & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
- rxdid->supported_rxdids |= BIT(i);
- }
-
- pf->supported_rxdids = rxdid->supported_rxdids;
+ rxdid.supported_rxdids = pf->supported_rxdids;
err:
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
- v_ret, (u8 *)rxdid, len);
- kfree(rxdid);
- return ret;
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ v_ret, (u8 *)&rxdid, sizeof(rxdid));
}
/**
@@ -3254,7 +3557,7 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
return err;
if (vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
if (err)
return err;
}
@@ -3282,7 +3585,8 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
*/
if (!ice_is_dvm_ena(&vsi->back->hw)) {
if (vlan_promisc) {
- err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ err = ice_vf_ena_vlan_promisc(vf, vsi,
+ &vlan);
if (err)
return err;
}
@@ -3821,6 +4125,9 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+ .get_qos_caps = ice_vc_get_qos_caps,
+ .cfg_q_bw = ice_vc_cfg_q_bw,
+ .cfg_q_quanta = ice_vc_cfg_q_quanta,
};
/**
@@ -4009,8 +4316,10 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
* @event: pointer to the AQ event
* @mbxdata: information used to detect VF attempting mailbox overflow
*
- * called from the common asq/arq handler to
- * process request from VF
+ * Called from the common asq/arq handler to process request from VF. When this
+ * flow is used for devices with hardware VF to PF message queue overflow
+ * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
+ * check is skipped.
*/
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
struct ice_mbx_data *mbxdata)
@@ -4036,7 +4345,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
mutex_lock(&vf->cfg_lock);
/* Check if the VF is trying to overflow the mailbox */
- if (ice_is_malicious_vf(vf, mbxdata))
+ if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
goto finish;
/* Check if VF is disabled. */
@@ -4177,6 +4486,15 @@ error_handler:
case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
err = ops->dis_vlan_insertion_v2_msg(vf, msg);
break;
+ case VIRTCHNL_OP_GET_QOS_CAPS:
+ err = ops->get_qos_caps(vf);
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+ err = ops->cfg_q_bw(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ err = ops->cfg_q_quanta(vf, msg);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 3a4115869153..0c629aef9baf 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -13,6 +13,13 @@
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
+#define ICE_DFLT_QUANTA 1024
+#define ICE_MAX_QUANTA_SIZE 4096
+#define ICE_MIN_QUANTA_SIZE 256
+
+#define calc_quanta_desc(x) \
+ max_t(u16, 12, min_t(u16, 63, (((x) + 66) / 132) * 2 + 4))
+
/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
* broadcast, and 16 for additional unicast/multicast filters
*/
@@ -61,6 +68,10 @@ struct ice_virtchnl_ops {
int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_qos_caps)(struct ice_vf *vf);
+ int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg);
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index d796dbd2a440..c105a82ee136 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -84,6 +84,11 @@ static const u32 fdir_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
};
+static const u32 tc_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ VIRTCHNL_OP_CONFIG_QUANTA,
+};
+
struct allowlist_opcode_info {
const u32 *opcodes;
size_t size;
@@ -104,6 +109,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index d4e6f0e10487..da2a5becf62f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3679,7 +3679,7 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector)
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
packets, bytes);
- net_dim(&q_vector->tx_dim, dim_sample);
+ net_dim(&q_vector->tx_dim, &dim_sample);
check_rx_itr:
if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
@@ -3698,7 +3698,7 @@ check_rx_itr:
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
packets, bytes);
- net_dim(&q_vector->rx_dim, dim_sample);
+ net_dim(&q_vector->rx_dim, &dim_sample);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index f0537826f840..9c1fe84108ed 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -438,7 +438,8 @@ struct idpf_q_vector {
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_q_vector, 112,
- 424 + 2 * sizeof(struct dim),
+ 24 + sizeof(struct napi_struct) +
+ 2 * sizeof(struct dim),
8 + sizeof(cpumask_var_t));
struct idpf_rx_queue_stats {
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 6e110f28f922..529b7d18b662 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -63,6 +63,5 @@ enum e1000_mng_mode {
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-void e1000_init_function_pointers_82575(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 091cddf4ada8..4f652ab713b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -7,7 +7,6 @@
s32 igb_acquire_nvm(struct e1000_hw *hw);
void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
-s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f1d088168723..08578980b651 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1204,7 +1204,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
/* initialize pointer to rings */
ring = q_vector->ring;
- /* intialize ITR */
+ /* initialize ITR */
if (rxr_count) {
/* rx or rx/tx vector */
if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
@@ -2486,7 +2486,7 @@ static int igb_set_features(struct net_device *netdev,
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
@@ -3906,7 +3906,7 @@ static void igb_remove(struct pci_dev *pdev)
*
* This function initializes the vf specific data storage and then attempts to
* allocate the VFs. The reason for ordering it this way is because it is much
- * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * more expensive time wise to disable SR-IOV than it is to allocate and free
* the memory for the VFs.
**/
static void igb_probe_vfs(struct igb_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 6ad35a00a287..ca6e44245a7b 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -169,8 +169,6 @@ struct igbvf_adapter {
u16 link_speed;
u16 link_duplex;
- spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
-
/* track device up/down/testing state */
unsigned long state;
@@ -220,7 +218,6 @@ struct igbvf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- spinlock_t stats_lock; /* prevent concurrent stats updates */
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 925d7286a8ee..02044aa2181b 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1656,12 +1656,9 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter)
if (igbvf_alloc_queues(adapter))
return -ENOMEM;
- spin_lock_init(&adapter->tx_queue_lock);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
igbvf_irq_disable(adapter);
- spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->hw.mbx_lock);
set_bit(__IGBVF_DOWN, &adapter->state);
diff --git a/drivers/net/ethernet/intel/igc/igc_diag.c b/drivers/net/ethernet/intel/igc/igc_diag.c
index cc621970c0cd..a43d7244ee70 100644
--- a/drivers/net/ethernet/intel/igc/igc_diag.c
+++ b/drivers/net/ethernet/intel/igc/igc_diag.c
@@ -173,8 +173,7 @@ bool igc_link_test(struct igc_adapter *adapter, u64 *data)
*data = 0;
/* add delay to give enough time for autonegotioation to finish */
- if (adapter->hw.mac.autoneg)
- ssleep(5);
+ ssleep(5);
link_up = igc_has_link(adapter);
if (!link_up) {
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 5b0c6f433767..817838677817 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1821,11 +1821,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
/* set autoneg settings */
- if (hw->mac.autoneg == 1) {
- ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Autoneg);
- }
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
/* Set pause flow control settings */
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
@@ -1878,10 +1875,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
cmd->base.speed = speed;
- if (hw->mac.autoneg)
- cmd->base.autoneg = AUTONEG_ENABLE;
- else
- cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if (hw->phy.media_type == igc_media_type_copper)
@@ -1955,7 +1949,6 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
- hw->mac.autoneg = 1;
hw->phy.autoneg_advertised = advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index e1c572e0d4ef..d9d1a1a11daf 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -92,7 +92,6 @@ struct igc_mac_info {
bool asf_firmware_present;
bool arc_subsystem_valid;
- bool autoneg;
bool autoneg_failed;
bool get_link_status;
};
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index a5c4b19d71a2..d344e0a1cd5e 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -386,14 +386,6 @@ s32 igc_check_for_copper_link(struct igc_hw *hw)
*/
igc_check_downshift(hw);
- /* If we are forcing speed/duplex, then we simply return since
- * we have already determined whether we have link or not.
- */
- if (!mac->autoneg) {
- ret_val = -IGC_ERR_CONFIG;
- goto out;
- }
-
/* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
@@ -468,173 +460,171 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
goto out;
}
- /* Check for the case where we have copper media and auto-neg is
- * enabled. In this case, we need to check and see if Auto-Neg
- * has completed, and if so, how the PHY and link partner has
- * flow control configured.
+ /* In auto-neg, we need to check and see if Auto-Neg has completed,
+ * and if so, how the PHY and link partner has flow control
+ * configured.
*/
- if (mac->autoneg) {
- /* Read the MII Status Register and check to see if AutoNeg
- * has completed. We read this twice because this reg has
- * some "sticky" (latched) bits.
- */
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
- &mii_status_reg);
- if (ret_val)
- goto out;
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
- &mii_status_reg);
- if (ret_val)
- goto out;
- if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg has not completed.\n");
- goto out;
- }
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
- /* The AutoNeg process has completed, so we now need to
- * read both the Auto Negotiation Advertisement
- * Register (Address 4) and the Auto_Negotiation Base
- * Page Ability Register (Address 5) to determine how
- * flow control was negotiated.
- */
- ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
- &mii_nway_adv_reg);
- if (ret_val)
- goto out;
- ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
- &mii_nway_lp_ability_reg);
- if (ret_val)
- goto out;
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
- * NOTE: DC = Don't Care
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
- * 0 | 0 | DC | DC | igc_fc_none
- * 0 | 1 | 0 | DC | igc_fc_none
- * 0 | 1 | 1 | 0 | igc_fc_none
- * 0 | 1 | 1 | 1 | igc_fc_tx_pause
- * 1 | 0 | 0 | DC | igc_fc_none
- * 1 | DC | 1 | DC | igc_fc_full
- * 1 | 1 | 0 | 0 | igc_fc_none
- * 1 | 1 | 0 | 1 | igc_fc_rx_pause
- *
- * Are both PAUSE bits set to 1? If so, this implies
- * Symmetric Flow Control is enabled at both ends. The
- * ASM_DIR bits are irrelevant per the spec.
- *
- * For Symmetric Flow Control:
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | DC | 1 | DC | IGC_fc_full
- *
- */
- if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == igc_fc_full) {
- hw->fc.current_mode = igc_fc_full;
- hw_dbg("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = igc_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\n");
- }
- }
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
+ goto out;
+ }
- /* For receiving PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 0 | 1 | 1 | 1 | igc_fc_tx_pause
- */
- else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc.current_mode = igc_fc_tx_pause;
- hw_dbg("Flow Control = TX PAUSE frames only.\n");
- }
- /* For transmitting PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | 1 | 0 | 1 | igc_fc_rx_pause
- */
- else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc.current_mode = igc_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\n");
- }
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ goto out;
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | igc_fc_none
+ * 0 | 1 | 0 | DC | igc_fc_none
+ * 0 | 1 | 1 | 0 | igc_fc_none
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ * 1 | 0 | 0 | DC | igc_fc_none
+ * 1 | DC | 1 | DC | igc_fc_full
+ * 1 | 1 | 0 | 0 | igc_fc_none
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | IGC_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
*/
- else if ((hw->fc.requested_mode == igc_fc_none) ||
- (hw->fc.requested_mode == igc_fc_tx_pause) ||
- (hw->fc.strict_ieee)) {
- hw->fc.current_mode = igc_fc_none;
- hw_dbg("Flow Control = NONE.\n");
+ if (hw->fc.requested_mode == igc_fc_full) {
+ hw->fc.current_mode = igc_fc_full;
+ hw_dbg("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = igc_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
+ }
- /* Now we need to do one last check... If we auto-
- * negotiated to HALF DUPLEX, flow control should not be
- * enabled per IEEE 802.3 spec.
- */
- ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
- if (ret_val) {
- hw_dbg("Error getting link speed and duplex\n");
- goto out;
- }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_tx_pause;
+ hw_dbg("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->fc.requested_mode == igc_fc_none) ||
+ (hw->fc.requested_mode == igc_fc_tx_pause) ||
+ (hw->fc.strict_ieee)) {
+ hw->fc.current_mode = igc_fc_none;
+ hw_dbg("Flow Control = NONE.\n");
+ } else {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
- if (duplex == HALF_DUPLEX)
- hw->fc.current_mode = igc_fc_none;
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ hw_dbg("Error getting link speed and duplex\n");
+ goto out;
+ }
- /* Now we call a subroutine to actually force the MAC
- * controller to use the correct flow control settings.
- */
- ret_val = igc_force_mac_fc(hw);
- if (ret_val) {
- hw_dbg("Error forcing flow control settings\n");
- goto out;
- }
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = igc_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = igc_force_mac_fc(hw);
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ goto out;
}
out:
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 6e70bca15db1..27872bdea9bd 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7108,7 +7108,6 @@ static int igc_probe(struct pci_dev *pdev,
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
- hw->mac.autoneg = true;
hw->phy.autoneg_advertised = 0xaf;
hw->fc.requested_mode = igc_fc_default;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 2801e5f24df9..6c4d204aecfa 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -494,24 +494,12 @@ s32 igc_setup_copper_link(struct igc_hw *hw)
s32 ret_val = 0;
bool link;
- if (hw->mac.autoneg) {
- /* Setup autoneg and flow control advertisement and perform
- * autonegotiation.
- */
- ret_val = igc_copper_link_autoneg(hw);
- if (ret_val)
- goto out;
- } else {
- /* PHY will be set to 10H, 10F, 100H or 100F
- * depending on user settings.
- */
- hw_dbg("Forcing Speed and Duplex\n");
- ret_val = hw->phy.ops.force_speed_duplex(hw);
- if (ret_val) {
- hw_dbg("Error Forcing Speed and Duplex\n");
- goto out;
- }
- }
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = igc_copper_link_autoneg(hw);
+ if (ret_val)
+ goto out;
/* Check link status. Wait up to 100 microseconds for link to become
* valid.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 283a23150a4d..4aaaea3b5f8f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include "ixgbe.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#define IXGBE_82598_MAX_TX_QUEUES 32
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8b8404d8c946..2e38e8f6fac1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -43,6 +43,7 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
@@ -9954,7 +9955,7 @@ static int ixgbe_set_features(struct net_device *netdev,
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- u16 flags,
+ u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
/* guarantee we can provide a unique filter for the unicast address */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index bd205306934b..bf65e82b4c61 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -4,7 +4,7 @@
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
-#include "ixgbe_type.h"
+#include <linux/types.h>
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
@@ -96,6 +96,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+struct ixgbe_hw;
+
int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
int ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
int ixgbe_check_for_msg(struct ixgbe_hw *, u16);
@@ -105,6 +107,18 @@ int ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
+struct ixgbe_mbx_operations {
+ int (*init_params)(struct ixgbe_hw *hw);
+ int (*read)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number);
+ int (*write)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number);
+ int (*read_posted)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+ int (*write_posted)(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id);
+ int (*check_for_msg)(struct ixgbe_hw *hw, u16 vf_number);
+ int (*check_for_ack)(struct ixgbe_hw *hw, u16 vf_number);
+ int (*check_for_rst)(struct ixgbe_hw *hw, u16 vf_number);
+};
+
extern const struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e71715f5da22..9631559a5aea 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -18,6 +18,7 @@
#include "ixgbe.h"
#include "ixgbe_type.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 346e3d9114a8..9baccacd02a1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3601,19 +3601,6 @@ struct ixgbe_phy_info {
u32 nw_mng_if_sel;
};
-#include "ixgbe_mbx.h"
-
-struct ixgbe_mbx_operations {
- int (*init_params)(struct ixgbe_hw *hw);
- int (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- int (*check_for_msg)(struct ixgbe_hw *, u16);
- int (*check_for_ack)(struct ixgbe_hw *, u16);
- int (*check_for_rst)(struct ixgbe_hw *, u16);
-};
-
struct ixgbe_mbx_stats {
u32 msgs_tx;
u32 msgs_rx;
@@ -3623,6 +3610,8 @@ struct ixgbe_mbx_stats {
u32 rsts;
};
+struct ixgbe_mbx_operations;
+
struct ixgbe_mbx_info {
const struct ixgbe_mbx_operations *ops;
struct ixgbe_mbx_stats stats;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f1ffa398f6df..81e1df83f136 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include "ixgbe.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
#include "ixgbe_x540.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index a5f644934445..d9a8cf018d3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -4,6 +4,7 @@
#include "ixgbe_x540.h"
#include "ixgbe_type.h"
#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 81cf3361a1e5..87c7e6251a4f 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1403,7 +1403,7 @@ static struct platform_driver korina_driver = {
.of_match_table = of_match_ptr(korina_match),
},
.probe = korina_probe,
- .remove_new = korina_remove,
+ .remove = korina_remove,
};
module_platform_driver(korina_driver);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 7179271f63b6..660dff5426e7 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -734,7 +734,7 @@ static void ltq_etop_remove(struct platform_device *pdev)
}
static struct platform_driver ltq_mii_driver = {
- .remove_new = ltq_etop_remove,
+ .remove = ltq_etop_remove,
.driver = {
.name = "ltq_etop",
},
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 07904a528f21..b8766fb7a844 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -669,7 +669,7 @@ MODULE_DEVICE_TABLE(of, xrx200_match);
static struct platform_driver xrx200_driver = {
.probe = xrx200_probe,
- .remove_new = xrx200_remove,
+ .remove = xrx200_remove,
.driver = {
.name = "lantiq,xrx200-net",
.of_match_table = xrx200_match,
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index ff54fbe41bcc..829a4b828f8e 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -309,7 +309,7 @@ MODULE_DEVICE_TABLE(of, liteeth_of_match);
static struct platform_driver liteeth_driver = {
.probe = liteeth_probe,
- .remove_new = liteeth_remove,
+ .remove = liteeth_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = liteeth_of_match,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 9e80899546d9..a06048719e84 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1698,13 +1698,9 @@ static void mv643xx_eth_get_strings(struct net_device *dev,
{
int i;
- if (stringset == ETH_SS_STATS) {
- for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- mv643xx_eth_stats[i].stat_string,
- ETH_GSTRING_LEN);
- }
- }
+ if (stringset == ETH_SS_STATS)
+ for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++)
+ ethtool_puts(&data, mv643xx_eth_stats[i].stat_string);
}
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
@@ -2843,29 +2839,24 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
struct mv643xx_eth_shared_platform_data *pd;
struct mv643xx_eth_shared_private *msp;
const struct mbus_dram_target_info *dram;
- struct resource *res;
int ret;
if (!mv643xx_eth_version_printed++)
pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
mv643xx_eth_driver_version);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL)
- return -EINVAL;
-
msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
if (msp == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, msp);
- msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (msp->base == NULL)
- return -ENOMEM;
+ msp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(msp->base))
+ return PTR_ERR(msp->base);
- msp->clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(msp->clk))
- clk_prepare_enable(msp->clk);
+ msp->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(msp->clk))
+ return PTR_ERR(msp->clk);
/*
* (Re-)program MBUS remapping windows if we are asked to.
@@ -2876,7 +2867,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
- goto err_put_clk;
+ return ret;
pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2884,25 +2875,16 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
infer_hw_params(msp);
return 0;
-
-err_put_clk:
- if (!IS_ERR(msp->clk))
- clk_disable_unprepare(msp->clk);
- return ret;
}
static void mv643xx_eth_shared_remove(struct platform_device *pdev)
{
- struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
-
mv643xx_eth_shared_of_remove();
- if (!IS_ERR(msp->clk))
- clk_disable_unprepare(msp->clk);
}
static struct platform_driver mv643xx_eth_shared_driver = {
.probe = mv643xx_eth_shared_probe,
- .remove_new = mv643xx_eth_shared_remove,
+ .remove = mv643xx_eth_shared_remove,
.driver = {
.name = MV643XX_ETH_SHARED_NAME,
.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
@@ -3307,7 +3289,7 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
static struct platform_driver mv643xx_eth_driver = {
.probe = mv643xx_eth_probe,
- .remove_new = mv643xx_eth_remove,
+ .remove = mv643xx_eth_remove,
.shutdown = mv643xx_eth_shutdown,
.driver = {
.name = MV643XX_ETH_NAME,
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index e1d003fdbc2e..3f4447e68888 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -348,13 +348,12 @@ static int orion_mdio_probe(struct platform_device *pdev)
if (type == BUS_TYPE_XSMI)
orion_mdio_xsmi_set_mdc_freq(bus);
} else {
- dev->clk[0] = clk_get(&pdev->dev, NULL);
- if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ dev->clk[0] = clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk[0])) {
+ ret = PTR_ERR(dev->clk[0]);
goto out_clk;
}
- if (!IS_ERR(dev->clk[0]))
- clk_prepare_enable(dev->clk[0]);
+ clk_prepare_enable(dev->clk[0]);
}
@@ -422,8 +421,6 @@ static void orion_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(bus);
for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
- if (IS_ERR(dev->clk[i]))
- break;
clk_disable_unprepare(dev->clk[i]);
clk_put(dev->clk[i]);
}
@@ -447,7 +444,7 @@ MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
static struct platform_driver orion_mdio_driver = {
.probe = orion_mdio_probe,
- .remove_new = orion_mdio_remove,
+ .remove = orion_mdio_remove,
.driver = {
.name = "orion-mdio",
.of_match_table = orion_mdio_match,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d72b2d5f96db..1fb285fa0bdb 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4795,11 +4795,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
int i;
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- mvneta_statistics[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, mvneta_statistics[i].name);
if (!pp->bm_priv) {
- data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
page_pool_ethtool_stats_get_strings(data);
}
}
@@ -5883,7 +5881,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match);
static struct platform_driver mvneta_driver = {
.probe = mvneta_probe,
- .remove_new = mvneta_remove,
+ .remove = mvneta_remove,
.driver = {
.name = MVNETA_DRIVER_NAME,
.of_match_table = mvneta_match,
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 3f46a0fed048..6bb380494919 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -485,7 +485,7 @@ MODULE_DEVICE_TABLE(of, mvneta_bm_match);
static struct platform_driver mvneta_bm_driver = {
.probe = mvneta_bm_probe,
- .remove_new = mvneta_bm_remove,
+ .remove = mvneta_bm_remove,
.driver = {
.name = MVNETA_BM_DRIVER_NAME,
.of_match_table = mvneta_bm_match,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 3880dcc0418b..571631a30320 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1985,45 +1985,32 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
struct mvpp2_port *port = netdev_priv(netdev);
+ const char *str;
int i, q;
if (sset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
- strscpy(data, mvpp2_ethtool_mib_regs[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
+ ethtool_puts(&data, mvpp2_ethtool_mib_regs[i].string);
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
- strscpy(data, mvpp2_ethtool_port_regs[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
+ ethtool_puts(&data, mvpp2_ethtool_port_regs[i].string);
- for (q = 0; q < port->ntxqs; q++) {
+ for (q = 0; q < port->ntxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
- snprintf(data, ETH_GSTRING_LEN,
- mvpp2_ethtool_txq_regs[i].string, q);
- data += ETH_GSTRING_LEN;
+ str = mvpp2_ethtool_txq_regs[i].string;
+ ethtool_sprintf(&data, str, q);
}
- }
- for (q = 0; q < port->nrxqs; q++) {
+ for (q = 0; q < port->nrxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
- snprintf(data, ETH_GSTRING_LEN,
- mvpp2_ethtool_rxq_regs[i].string,
- q);
- data += ETH_GSTRING_LEN;
+ str = mvpp2_ethtool_rxq_regs[i].string;
+ ethtool_sprintf(&data, str, q);
}
- }
- for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
- strscpy(data, mvpp2_ethtool_xdp[i].string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++)
+ ethtool_puts(&data, mvpp2_ethtool_xdp[i].string);
}
static void
@@ -7774,7 +7761,7 @@ MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
- .remove_new = mvpp2_remove,
+ .remove = mvpp2_remove,
.driver = {
.name = MVPP2_DRIVER_NAME,
.of_match_table = mvpp2_match,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
index 7d0124b283da..4f4d58189118 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -47,7 +47,7 @@ static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = {
"rx_err_pkts",
};
-#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN)
+#define OCTEP_GLOBAL_STATS_CNT ARRAY_SIZE(octep_gstrings_global_stats)
static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_packets_posted[Q-%u]",
@@ -56,7 +56,7 @@ static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_busy[Q-%u]",
};
-#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+#define OCTEP_TX_Q_STATS_CNT ARRAY_SIZE(octep_gstrings_tx_q_stats)
static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_packets[Q-%u]",
@@ -64,7 +64,7 @@ static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_alloc_errors[Q-%u]",
};
-#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+#define OCTEP_RX_Q_STATS_CNT ARRAY_SIZE(octep_gstrings_rx_q_stats)
static void octep_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -80,32 +80,25 @@ static void octep_get_strings(struct net_device *netdev,
{
struct octep_device *oct = netdev_priv(netdev);
u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
- char *strings = (char *)data;
+ const char *str;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_gstrings_global_stats[i]);
- strings += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++)
+ ethtool_puts(&data, octep_gstrings_global_stats[i]);
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_gstrings_tx_q_stats[j], i);
- strings += ETH_GSTRING_LEN;
+ str = octep_gstrings_tx_q_stats[j];
+ ethtool_sprintf(&data, str, i);
}
- }
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_gstrings_rx_q_stats[j], i);
- strings += ETH_GSTRING_LEN;
+ str = octep_gstrings_rx_q_stats[j];
+ ethtool_sprintf(&data, str, i);
}
- }
break;
default:
break;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
index a1979b45e355..7b21439a315f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -25,7 +25,7 @@ static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = {
"rx_dropped_bytes_fifo_full",
};
-#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN)
+#define OCTEP_VF_GLOBAL_STATS_CNT ARRAY_SIZE(octep_vf_gstrings_global_stats)
static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_packets_posted[Q-%u]",
@@ -34,7 +34,7 @@ static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_busy[Q-%u]",
};
-#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+#define OCTEP_VF_TX_Q_STATS_CNT ARRAY_SIZE(octep_vf_gstrings_tx_q_stats)
static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_packets[Q-%u]",
@@ -42,7 +42,7 @@ static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_alloc_errors[Q-%u]",
};
-#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+#define OCTEP_VF_RX_Q_STATS_CNT ARRAY_SIZE(octep_vf_gstrings_rx_q_stats)
static void octep_vf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -58,32 +58,25 @@ static void octep_vf_get_strings(struct net_device *netdev,
{
struct octep_vf_device *oct = netdev_priv(netdev);
u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
- char *strings = (char *)data;
+ const char *str;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_vf_gstrings_global_stats[i]);
- strings += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++)
+ ethtool_puts(&data, octep_vf_gstrings_global_stats[i]);
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_vf_gstrings_tx_q_stats[j], i);
- strings += ETH_GSTRING_LEN;
+ str = octep_vf_gstrings_tx_q_stats[j];
+ ethtool_sprintf(&data, str, i);
}
- }
- for (i = 0; i < num_queues; i++) {
+ for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) {
- snprintf(strings, ETH_GSTRING_LEN,
- octep_vf_gstrings_rx_q_stats[j], i);
- strings += ETH_GSTRING_LEN;
+ str = octep_vf_gstrings_rx_q_stats[j];
+ ethtool_sprintf(&data, str, i);
}
- }
break;
default:
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index a32d85d6f599..35c4f5f64f58 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -46,3 +46,11 @@ config OCTEONTX2_VF
depends on OCTEONTX2_PF
help
This driver supports Marvell's OcteonTX2 NIC virtual function.
+
+config RVU_ESWITCH
+ tristate "Marvell RVU E-Switch support"
+ depends on OCTEONTX2_PF
+ default m
+ help
+ This driver supports Marvell's RVU E-Switch that
+ provides internal SRIOV packet steering and switching.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 3cf4c8285c90..ccea37847df8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,5 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \
+ rvu_rep.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 2436c1ff9ba4..5d84386ed22d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -156,6 +156,7 @@ enum nix_scheduler {
#define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212
#define SDP_HW_MAX_FRS 65535
+#define SDP_HW_MIN_FRS 16
#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 6ea2f3071fe8..62c07407eb94 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -144,6 +144,9 @@ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \
+M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \
+M(ESW_CFG, 0x00e, esw_cfg, esw_cfg_req, msg_rsp) \
+M(REP_EVENT_NOTIFY, 0x00f, rep_event_notify, rep_event, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -319,6 +322,7 @@ M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_re
M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
nix_mcast_grp_update_req, \
nix_mcast_grp_update_rsp) \
+M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -380,12 +384,16 @@ M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
#define MBOX_UP_MCS_MESSAGES \
M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+#define MBOX_UP_REP_MESSAGES \
+M(REP_EVENT_UP_NOTIFY, 0xEF0, rep_event_up_notify, rep_event, msg_rsp) \
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
MBOX_UP_MCS_MESSAGES
+MBOX_UP_REP_MESSAGES
#undef M
};
@@ -1364,6 +1372,37 @@ struct nix_bandprof_get_hwinfo_rsp {
u32 policer_timeunit;
};
+struct nix_stats_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u16 pcifunc;
+ u64 rsvd;
+};
+
+struct nix_stats_rsp {
+ struct mbox_msghdr hdr;
+ u16 pcifunc;
+ struct {
+ u64 octs;
+ u64 ucast;
+ u64 bcast;
+ u64 mcast;
+ u64 drop;
+ u64 drop_octs;
+ u64 drop_mcast;
+ u64 drop_bcast;
+ u64 err;
+ u64 rsvd[5];
+ } rx;
+ struct {
+ u64 ucast;
+ u64 bcast;
+ u64 mcast;
+ u64 drop;
+ u64 octs;
+ } tx;
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -1525,6 +1564,41 @@ struct ptp_get_cap_rsp {
u64 cap;
};
+struct get_rep_cnt_rsp {
+ struct mbox_msghdr hdr;
+ u16 rep_cnt;
+ u16 rep_pf_map[64];
+ u64 rsvd;
+};
+
+struct esw_cfg_req {
+ struct mbox_msghdr hdr;
+ u8 ena;
+ u64 rsvd;
+};
+
+struct rep_evt_data {
+ u8 port_state;
+ u8 vf_state;
+ u16 rx_mode;
+ u16 rx_flags;
+ u16 mtu;
+ u8 mac[ETH_ALEN];
+ u64 rsvd[5];
+};
+
+struct rep_event {
+ struct mbox_msghdr hdr;
+ u16 pcifunc;
+#define RVU_EVENT_PORT_STATE BIT_ULL(0)
+#define RVU_EVENT_PFVF_STATE BIT_ULL(1)
+#define RVU_EVENT_MTU_CHANGE BIT_ULL(2)
+#define RVU_EVENT_RX_MODE_CHANGE BIT_ULL(3)
+#define RVU_EVENT_MAC_ADDR_CHANGE BIT_ULL(4)
+ u16 event;
+ struct rep_evt_data evt_data;
+};
+
struct flow_msg {
unsigned char dmac[6];
unsigned char smac[6];
@@ -1563,6 +1637,7 @@ struct flow_msg {
u8 icmp_type;
u8 icmp_code;
__be16 tcp_flags;
+ u16 sq_id;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 5016ba82e142..b897845e25fd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -513,6 +513,11 @@ struct rvu_switch {
u16 start_entry;
};
+struct rep_evtq_ent {
+ struct list_head node;
+ struct rep_event event;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -525,6 +530,7 @@ struct rvu {
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
u16 vf_devid; /* VF devices id */
+ bool def_rule_cntr_en;
int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
@@ -594,6 +600,15 @@ struct rvu {
spinlock_t cpt_intr_lock;
struct mutex mbox_lock; /* Serialize mbox up and down msgs */
+ u16 rep_pcifunc;
+ int rep_cnt;
+ u16 *rep2pfvf_map;
+ u8 rep_mode;
+ struct work_struct rep_evt_work;
+ struct workqueue_struct *rep_evt_wq;
+ struct list_head rep_evtq_head;
+ /* Representor event lock */
+ spinlock_t rep_evtq_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -852,6 +867,14 @@ bool is_sdp_pfvf(u16 pcifunc);
bool is_sdp_pf(u16 pcifunc);
bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
+static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc)
+{
+ if (rvu->rep_pcifunc && rvu->rep_pcifunc == pcifunc)
+ return true;
+
+ return false;
+}
+
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
@@ -960,7 +983,11 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
-
+void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule);
+void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp);
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
@@ -985,6 +1012,7 @@ void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
+int npc_config_cntr_default_entries(struct rvu *rvu, bool enable);
bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
@@ -1045,7 +1073,8 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
/* RVU Switch */
void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
-void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
+void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool ena);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
@@ -1058,4 +1087,9 @@ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
void rvu_mcs_exit(struct rvu *rvu);
+/* Representor APIs */
+int rvu_rep_pf_init(struct rvu *rvu);
+int rvu_rep_install_mcam_rules(struct rvu *rvu);
+void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
+int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 87ba77e5026a..148144f5b61d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -45,33 +45,6 @@ enum {
CGX_STAT18,
};
-/* NIX TX stats */
-enum nix_stat_lf_tx {
- TX_UCAST = 0x0,
- TX_BCAST = 0x1,
- TX_MCAST = 0x2,
- TX_DROP = 0x3,
- TX_OCTS = 0x4,
- TX_STATS_ENUM_LAST,
-};
-
-/* NIX RX stats */
-enum nix_stat_lf_rx {
- RX_OCTS = 0x0,
- RX_UCAST = 0x1,
- RX_BCAST = 0x2,
- RX_MCAST = 0x3,
- RX_DROP = 0x4,
- RX_DROP_OCTS = 0x5,
- RX_FCS = 0x6,
- RX_ERR = 0x7,
- RX_DRP_BCAST = 0x8,
- RX_DRP_MCAST = 0x9,
- RX_DRP_L3BCAST = 0xa,
- RX_DRP_L3MCAST = 0xb,
- RX_STATS_ENUM_LAST,
-};
-
static char *cgx_rx_stats_fields[] = {
[CGX_STAT0] = "Received packets",
[CGX_STAT1] = "Octets of received packets",
@@ -663,16 +636,16 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
-static void get_lf_str_list(struct rvu_block block, int pcifunc,
+static void get_lf_str_list(const struct rvu_block *block, int pcifunc,
char *lfs)
{
- int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
+ int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max;
- for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
- if (lf >= block.lf.max)
+ for_each_set_bit(lf, block->lf.bmap, block->lf.max) {
+ if (lf >= block->lf.max)
break;
- if (block.fn_map[lf] != pcifunc)
+ if (block->fn_map[lf] != pcifunc)
continue;
if (lf == prev_lf + 1) {
@@ -719,7 +692,7 @@ static int get_max_column_width(struct rvu *rvu)
if (!strlen(block.name))
continue;
- get_lf_str_list(block, pcifunc, buf);
+ get_lf_str_list(&block, pcifunc, buf);
if (lf_str_size <= strlen(buf))
lf_str_size = strlen(buf) + 1;
}
@@ -803,7 +776,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
continue;
len = 0;
lfs[len] = '\0';
- get_lf_str_list(block, pcifunc, lfs);
+ get_lf_str_list(&block, pcifunc, lfs);
if (strlen(lfs))
flag = 1;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 7498ab429963..dab4deca893f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1238,6 +1238,7 @@ enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
@@ -1358,6 +1359,32 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink
return 0;
}
+static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ ctx->val.vbool = rvu->def_rule_cntr_en;
+
+ return 0;
+}
+
+static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = npc_config_cntr_default_entries(rvu, ctx->val.vbool);
+ if (!err)
+ rvu->def_rule_cntr_en = ctx->val.vbool;
+
+ return err;
+}
+
static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@@ -1444,6 +1471,11 @@ static const struct devlink_param rvu_af_dl_params[] = {
rvu_af_dl_npc_mcam_high_zone_percent_get,
rvu_af_dl_npc_mcam_high_zone_percent_set,
rvu_af_dl_npc_mcam_high_zone_percent_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
+ "npc_def_rule_cntr", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_npc_def_rule_cntr_get,
+ rvu_af_dl_npc_def_rule_cntr_set, NULL),
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
"nix_maxlf", DEVLINK_PARAM_TYPE_U16,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1468,6 +1500,9 @@ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
struct rvu *rvu = rvu_dl->rvu;
struct rvu_switch *rswitch;
+ if (rvu->rep_mode)
+ return -EOPNOTSUPP;
+
rswitch = &rvu->rswitch;
*mode = rswitch->mode;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index da69350c6f76..5d5a01dbbca1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -31,6 +31,7 @@ static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
u32 leaf_prof);
static const char *nix_get_ctx_name(int ctype);
+static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -312,7 +313,9 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
/* TLs aggegating traffic are shared across PF and VFs */
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
- if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+ if ((nix_get_tx_link(rvu, map_func) !=
+ nix_get_tx_link(rvu, pcifunc)) &&
+ (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)))
return false;
else
return true;
@@ -360,7 +363,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
-
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -584,6 +586,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
+ if (is_sdp_pfvf(pcifunc))
+ type = NIX_INTF_TYPE_SDP;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
@@ -1614,6 +1619,12 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+ if (is_rep_dev(rvu, pcifunc)) {
+ pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN;
+ pfvf->tx_chan_cnt = 1;
+ goto exit;
+ }
+
intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
intf = NIX_INTF_TYPE_SDP;
@@ -1684,6 +1695,9 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ if (is_rep_dev(rvu, pcifunc))
+ goto free_lf;
+
if (req->flags & NIX_LF_DISABLE_FLOWS)
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
else
@@ -1695,6 +1709,7 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
nix_interface_deinit(rvu, pcifunc, nixlf);
+free_lf:
/* Reset this NIX LF */
err = rvu_lf_reset(rvu, block, nixlf);
if (err) {
@@ -2007,7 +2022,8 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
- if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */
+ /* LBK links */
+ if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) {
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
@@ -2760,7 +2776,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
int schq;
u64 cfg;
- if (!is_pf_cgxmapped(rvu, pf))
+ if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc))
return;
cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
@@ -4393,8 +4409,6 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
ether_addr_copy(pfvf->default_mac, req->mac_addr);
- rvu_switch_update_rules(rvu, pcifunc);
-
return 0;
}
@@ -4555,7 +4569,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- if (is_lbk_vf(rvu, pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &max_mtu);
@@ -4583,6 +4597,8 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
/* For VFs of PF0 ingress is LBK port, so config LBK link */
pfvf = rvu_get_pfvf(rvu, pcifunc);
link = hw->cgx_links + pfvf->lbkid;
+ } else if (is_rep_dev(rvu, pcifunc)) {
+ link = hw->cgx_links + 0;
}
if (link < 0)
@@ -4674,7 +4690,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
if (hw->sdp_links) {
link = hw->cgx_links + hw->lbk_links;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
- SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS);
}
/* Get MCS external bypass status for CN10K-B */
@@ -5166,7 +5182,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int nixlf, err;
+ int nixlf, err, pf;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
@@ -5182,7 +5198,11 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
set_bit(NIXLF_INITIALIZED, &pfvf->flags);
- rvu_switch_update_rules(rvu, pcifunc);
+ rvu_switch_update_rules(rvu, pcifunc, true);
+
+ pf = rvu_get_pf(pcifunc);
+ if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
+ rvu_rep_notify_pfvf_state(rvu, pcifunc, true);
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -5192,7 +5212,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
{
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int nixlf, err;
+ int nixlf, err, pf;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
@@ -5210,8 +5230,12 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
if (err)
return err;
+ rvu_switch_update_rules(rvu, pcifunc, false);
rvu_cgx_tx_enable(rvu, pcifunc, true);
+ pf = rvu_get_pf(pcifunc);
+ if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
+ rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
return 0;
}
@@ -5239,6 +5263,9 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
+ rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
+
rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (pfvf->sq_ctx) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 97722ce8c4cb..821fe242f821 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2691,6 +2691,49 @@ void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx)
npc_mcam_set_bit(mcam, entry_idx);
}
+int npc_config_cntr_default_entries(struct rvu *rvu, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_install_flow_rsp rsp;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, rule->entry))
+ continue;
+ if (!rule->default_rule)
+ continue;
+ if (enable && !rule->has_cntr) { /* Alloc and map new counter */
+ __rvu_mcam_add_counter_to_rule(rvu, rule->owner,
+ rule, &rsp);
+ if (rsp.counter < 0) {
+ dev_err(rvu->dev,
+ "%s: Failed to allocate cntr for default rule (err=%d)\n",
+ __func__, rsp.counter);
+ break;
+ }
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ rule->entry, rsp.counter);
+ /* Reset counter before use */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(rule->cntr), 0x0);
+ }
+
+ /* Free and unmap counter */
+ if (!enable && rule->has_cntr)
+ __rvu_mcam_remove_counter_from_rule(rvu, rule->owner,
+ rule);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
struct npc_mcam_alloc_entry_req *req,
struct npc_mcam_alloc_entry_rsp *rsp)
@@ -2975,9 +3018,9 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
return rc;
}
-int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
- struct npc_mcam_alloc_counter_req *req,
- struct npc_mcam_alloc_counter_rsp *rsp)
+static int __npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 pcifunc = req->hdr.pcifunc;
@@ -2998,11 +3041,9 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
return NPC_MCAM_INVALID_REQ;
- mutex_lock(&mcam->lock);
/* Check if unused counters are available or not */
if (!rvu_rsrc_free_count(&mcam->counters)) {
- mutex_unlock(&mcam->lock);
return NPC_MCAM_ALLOC_FAILED;
}
@@ -3035,12 +3076,27 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
}
}
- mutex_unlock(&mcam->lock);
return 0;
}
-int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
- struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int err;
+
+ mutex_lock(&mcam->lock);
+
+ err = __npc_mcam_alloc_counter(rvu, req, rsp);
+
+ mutex_unlock(&mcam->lock);
+ return err;
+}
+
+static int __npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct msg_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 index, entry = 0;
@@ -3050,10 +3106,8 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
- mutex_lock(&mcam->lock);
err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
if (err) {
- mutex_unlock(&mcam->lock);
return err;
}
@@ -3077,10 +3131,66 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
index, req->cntr);
}
- mutex_unlock(&mcam->lock);
return 0;
}
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int err;
+
+ mutex_lock(&mcam->lock);
+
+ err = __npc_mcam_free_counter(rvu, req, rsp);
+
+ mutex_unlock(&mcam->lock);
+
+ return err;
+}
+
+void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ __npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = __npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 150635de2bd5..da69e454662a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1081,44 +1081,26 @@ static void rvu_mcam_add_rule(struct npc_mcam *mcam,
static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule)
{
- struct npc_mcam_oper_counter_req free_req = { 0 };
- struct msg_rsp free_rsp;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
- if (!rule->has_cntr)
- return;
+ mutex_lock(&mcam->lock);
- free_req.hdr.pcifunc = pcifunc;
- free_req.cntr = rule->cntr;
+ __rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
- rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
- rule->has_cntr = false;
+ mutex_unlock(&mcam->lock);
}
static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule,
struct npc_install_flow_rsp *rsp)
{
- struct npc_mcam_alloc_counter_req cntr_req = { 0 };
- struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
- int err;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
- cntr_req.hdr.pcifunc = pcifunc;
- cntr_req.contig = true;
- cntr_req.count = 1;
+ mutex_lock(&mcam->lock);
- /* we try to allocate a counter to track the stats of this
- * rule. If counter could not be allocated then proceed
- * without counter because counters are limited than entries.
- */
- err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
- &cntr_rsp);
- if (!err && cntr_rsp.count) {
- rule->cntr = cntr_rsp.cntr;
- rule->has_cntr = true;
- rsp->counter = rule->cntr;
- } else {
- rsp->counter = err;
- }
+ __rvu_mcam_add_counter_to_rule(rvu, pcifunc, rule, rsp);
+
+ mutex_unlock(&mcam->lock);
}
static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req,
@@ -1416,6 +1398,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
struct npc_install_flow_rsp *rsp)
{
bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ bool from_rep_dev = !!is_rep_dev(rvu, req->hdr.pcifunc);
struct rvu_switch *rswitch = &rvu->rswitch;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
@@ -1472,14 +1455,19 @@ process_flow:
/* AF installing for a PF/VF */
if (!req->hdr.pcifunc)
target = req->vf;
+
/* PF installing for its VF */
- else if (!from_vf && req->vf) {
+ if (!from_vf && req->vf && !from_rep_dev) {
target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
pf_set_vfs_mac = req->default_rule &&
(req->features & BIT_ULL(NPC_DMAC));
}
- /* msg received from PF/VF */
+
+ /* Representor device installing for a representee */
+ if (from_rep_dev && req->vf)
+ target = req->vf;
else
+ /* msg received from PF/VF */
target = req->hdr.pcifunc;
/* ignore chan_mask in case pf func is not AF, revisit later */
@@ -1492,8 +1480,10 @@ process_flow:
pfvf = rvu_get_pfvf(rvu, target);
+ if (from_rep_dev)
+ req->channel = pfvf->rx_chan_base;
/* PF installing for its VF */
- if (req->hdr.pcifunc && !from_vf && req->vf)
+ if (req->hdr.pcifunc && !from_vf && req->vf && !from_rep_dev)
set_bit(PF_SET_VF_CFG, &pfvf->flags);
/* update req destination mac addr */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 2b299fa85159..62cdc714ba57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -445,6 +445,7 @@
#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+#define NIX_VLAN_ETYPE_MASK GENMASK_ULL(63, 48)
#define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16)
#define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
new file mode 100644
index 000000000000..052ae5923e3a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "rvu_reg.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_REP_MESSAGES
+#undef M
+
+static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc);
+ struct rep_event *msg;
+ int pf;
+
+ pf = rvu_get_pf(event->pcifunc);
+
+ if (event->event & RVU_EVENT_MAC_ADDR_CHANGE)
+ ether_addr_copy(pfvf->mac_addr, event->evt_data.mac);
+
+ mutex_lock(&rvu->mbox_lock);
+ msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
+ if (!msg) {
+ mutex_unlock(&rvu->mbox_lock);
+ return -ENOMEM;
+ }
+
+ msg->hdr.pcifunc = event->pcifunc;
+ msg->event = event->event;
+
+ memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data));
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
+ return 0;
+}
+
+static void rvu_rep_wq_handler(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, rep_evt_work);
+ struct rep_evtq_ent *qentry;
+ struct rep_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->rep_evtq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->rep_evtq_head,
+ struct rep_evtq_ent,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->event;
+
+ rvu_rep_up_notify(rvu, event);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req,
+ struct msg_rsp *rsp)
+{
+ struct rep_evtq_ent *qentry;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->event = *req;
+ spin_lock(&rvu->rep_evtq_lock);
+ list_add_tail(&qentry->node, &rvu->rep_evtq_head);
+ spin_unlock(&rvu->rep_evtq_lock);
+ queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work);
+ return 0;
+}
+
+int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rep_event *req;
+ int pf;
+
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return 0;
+
+ pf = rvu_get_pf(rvu->rep_pcifunc);
+
+ mutex_lock(&rvu->mbox_lock);
+ req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
+ if (!req) {
+ mutex_unlock(&rvu->mbox_lock);
+ return -ENOMEM;
+ }
+
+ req->hdr.pcifunc = rvu->rep_pcifunc;
+ req->event |= RVU_EVENT_PFVF_STATE;
+ req->pcifunc = pcifunc;
+ req->evt_data.vf_state = enable;
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
+ return 0;
+}
+
+#define RVU_LF_RX_STATS(reg) \
+ rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg))
+
+#define RVU_LF_TX_STATS(reg) \
+ rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg))
+
+int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu,
+ struct nix_stats_req *req,
+ struct nix_stats_rsp *rsp)
+{
+ u16 pcifunc = req->pcifunc;
+ int nixlf, blkaddr, err;
+ struct msg_req rst_req;
+ struct msg_rsp rst_rsp;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return 0;
+
+ if (req->reset) {
+ rst_req.hdr.pcifunc = pcifunc;
+ return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp);
+ }
+ rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS);
+ rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST);
+ rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST);
+ rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST);
+ rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP);
+ rsp->rx.err = RVU_LF_RX_STATS(RX_ERR);
+ rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS);
+ rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST);
+ rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST);
+
+ rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS);
+ rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST);
+ rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST);
+ rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST);
+ rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP);
+
+ rsp->pcifunc = req->pcifunc;
+ return 0;
+}
+
+static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc)
+{
+ int id;
+
+ for (id = 0; id < rvu->rep_cnt; id++)
+ if (rvu->rep2pfvf_map[id] == pcifunc)
+ return id;
+ return 0;
+}
+
+static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc,
+ u16 vlan_tci, int *vidx)
+{
+ struct nix_vtag_config_rsp rsp = {};
+ struct nix_vtag_config req = {};
+ u64 etype = ETH_P_8021Q;
+ int err;
+
+ /* Insert vlan tag */
+ req.hdr.pcifunc = pcifunc;
+ req.vtag_size = VTAGSIZE_T4;
+ req.cfg_type = 0; /* tx vlan cfg */
+ req.tx.cfg_vtag0 = true;
+ req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci;
+
+ err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "Tx vlan config failed\n");
+ return err;
+ }
+ *vidx = rsp.vtag0_idx;
+ return 0;
+}
+
+static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_vtag_config req = {};
+ struct nix_vtag_config_rsp rsp;
+
+ /* config strip, capture and size */
+ req.hdr.pcifunc = pcifunc;
+ req.vtag_size = VTAGSIZE_T4;
+ req.cfg_type = 1; /* rx vlan cfg */
+ req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req.rx.strip_vtag = true;
+ req.rx.capture_vtag = false;
+
+ return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
+}
+
+static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 entry, bool rte)
+{
+ struct npc_install_flow_req req = {};
+ struct npc_install_flow_rsp rsp = {};
+ struct rvu_pfvf *pfvf;
+ u16 vlan_tci, rep_id;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* To steer the traffic from Representee to Representor */
+ rep_id = rvu_rep_get_vlan_id(rvu, pcifunc);
+ if (rte) {
+ vlan_tci = rep_id | BIT_ULL(8);
+ req.vf = rvu->rep_pcifunc;
+ req.op = NIX_RX_ACTIONOP_UCAST;
+ req.index = rep_id;
+ } else {
+ vlan_tci = rep_id;
+ req.vf = pcifunc;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ }
+
+ rvu_rep_rx_vlan_cfg(rvu, req.vf);
+ req.entry = entry;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG);
+ req.vtag0_valid = true;
+ req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q);
+ req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q);
+ req.packet.vlan_tci = cpu_to_be16(vlan_tci);
+ req.mask.vlan_tci = cpu_to_be16(0xffff);
+
+ req.channel = RVU_SWITCH_LBK_CHAN;
+ req.chan_mask = 0xffff;
+ req.intf = pfvf->nix_rx_intf;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry,
+ bool rte)
+{
+ struct npc_install_flow_req req = {};
+ struct npc_install_flow_rsp rsp = {};
+ struct rvu_pfvf *pfvf;
+ int vidx, err;
+ u16 vlan_tci;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc);
+ if (rte)
+ vlan_tci |= BIT_ULL(8);
+
+ err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx);
+ if (err)
+ return err;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ if (rte) {
+ req.vf = pcifunc;
+ } else {
+ req.vf = rvu->rep_pcifunc;
+ req.packet.sq_id = vlan_tci;
+ req.mask.sq_id = 0xffff;
+ }
+
+ req.entry = entry;
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+ req.vtag0_def = vidx;
+ req.vtag0_op = 1;
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+int rvu_rep_install_mcam_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
+ int err, nixlf, i;
+ u8 rep;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+ rep = true;
+ for (i = 0; i < 2; i++) {
+ err = rvu_rep_install_rx_rule(rvu, pcifunc,
+ start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ err = rvu_rep_install_tx_rule(rvu, pcifunc,
+ start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ rep = false;
+ }
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << RVU_PFVF_PF_SHIFT |
+ ((vf + 1) & RVU_PFVF_FUNC_MASK);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* Skip installimg rules if nixlf is not attached */
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ continue;
+ rep = true;
+ for (i = 0; i < 2; i++) {
+ err = rvu_rep_install_rx_rule(rvu, pcifunc,
+ start + entry,
+ rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ err = rvu_rep_install_tx_rule(rvu, pcifunc,
+ start + entry,
+ rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ rep = false;
+ }
+ }
+ }
+
+ /* Initialize the wq for handling REP events */
+ spin_lock_init(&rvu->rep_evtq_lock);
+ INIT_LIST_HEAD(&rvu->rep_evtq_head);
+ INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler);
+ rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0);
+ if (!rvu->rep_evt_wq) {
+ dev_err(rvu->dev, "REP workqueue allocation failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u32 max = rswitch->used_entries;
+ int blkaddr;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ if (blkaddr < 0)
+ return;
+
+ rvu_switch_enable_lbk_link(rvu, pcifunc, ena);
+ mutex_lock(&mcam->lock);
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+int rvu_rep_pf_init(struct rvu *rvu)
+{
+ u16 pcifunc = rvu->rep_pcifunc;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ rvu_switch_enable_lbk_link(rvu, pcifunc, true);
+ rvu_rep_rx_vlan_cfg(rvu, pcifunc);
+ return 0;
+}
+
+int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ if (req->hdr.pcifunc != rvu->rep_pcifunc)
+ return 0;
+
+ rvu->rep_mode = req->ena;
+
+ if (!rvu->rep_mode)
+ rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
+ struct get_rep_cnt_rsp *rsp)
+{
+ int pf, vf, numvfs, hwvf, rep = 0;
+ u16 pcifunc;
+
+ rvu->rep_pcifunc = req->hdr.pcifunc;
+ rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ rvu->rep_cnt = rsp->rep_cnt;
+
+ rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt *
+ sizeof(u16), GFP_KERNEL);
+ if (!rvu->rep2pfvf_map)
+ return -ENOMEM;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ rvu->rep2pfvf_map[rep] = pcifunc;
+ rsp->rep_pf_map[rep] = pcifunc;
+ rep++;
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++) {
+ rvu->rep2pfvf_map[rep] = pcifunc |
+ ((vf + 1) & RVU_PFVF_FUNC_MASK);
+ rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep];
+ rep++;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index fc8da2090657..77ac94cb2ec4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -823,4 +823,30 @@ enum nix_tx_vtag_op {
#define VTAG_STRIP BIT_ULL(4)
#define VTAG_CAPTURE BIT_ULL(5)
+/* NIX TX stats */
+enum nix_stat_lf_tx {
+ TX_UCAST = 0x0,
+ TX_BCAST = 0x1,
+ TX_MCAST = 0x2,
+ TX_DROP = 0x3,
+ TX_OCTS = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+/* NIX RX stats */
+enum nix_stat_lf_rx {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_DROP = 0x4,
+ RX_DROP_OCTS = 0x5,
+ RX_FCS = 0x6,
+ RX_ERR = 0x7,
+ RX_DRP_BCAST = 0x8,
+ RX_DRP_MCAST = 0x9,
+ RX_DRP_L3BCAST = 0xa,
+ RX_DRP_L3MCAST = 0xb,
+ RX_STATS_ENUM_LAST,
+};
#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
index 854045ed3b06..268efb7c1c15 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -8,7 +8,7 @@
#include <linux/bitfield.h>
#include "rvu.h"
-static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable)
+void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct nix_hw *nix_hw;
@@ -166,6 +166,8 @@ void rvu_switch_enable(struct rvu *rvu)
alloc_req.contig = true;
alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ if (rvu->rep_mode)
+ alloc_req.count = alloc_req.count * 4;
ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
&alloc_rsp);
if (ret) {
@@ -189,7 +191,12 @@ void rvu_switch_enable(struct rvu *rvu)
rswitch->used_entries = alloc_rsp.count;
rswitch->start_entry = alloc_rsp.entry;
- ret = rvu_switch_install_rules(rvu);
+ if (rvu->rep_mode) {
+ rvu_rep_pf_init(rvu);
+ ret = rvu_rep_install_mcam_rules(rvu);
+ } else {
+ ret = rvu_switch_install_rules(rvu);
+ }
if (ret)
goto uninstall_rules;
@@ -222,6 +229,9 @@ void rvu_switch_disable(struct rvu *rvu)
if (!rswitch->used_entries)
return;
+ if (rvu->rep_mode)
+ goto free_ents;
+
for (pf = 1; pf < hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
@@ -249,6 +259,7 @@ void rvu_switch_disable(struct rvu *rvu)
}
}
+free_ents:
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
free_req.all = 1;
@@ -258,12 +269,15 @@ void rvu_switch_disable(struct rvu *rvu)
kfree(rswitch->entry2pcifunc);
}
-void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
{
struct rvu_switch *rswitch = &rvu->rswitch;
u32 max = rswitch->used_entries;
u16 entry;
+ if (rvu->rep_mode)
+ return rvu_rep_update_rules(rvu, pcifunc, ena);
+
if (!rswitch->used_entries)
return;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 64a97a0a10ed..dbc971266865 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -5,11 +5,13 @@
obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
+obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o qos_sq.o qos.o
rvu_nicvf-y := otx2_vf.o
+rvu_rep-y := rep.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index c1c99d7054f8..a15cc86635d6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -72,7 +72,7 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
}
EXPORT_SYMBOL(cn10k_lmtst_init);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
{
struct nix_cn10k_aq_enq_req *aq;
struct otx2_nic *pfvf = dev;
@@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@@ -203,6 +203,11 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
rsp = (struct nix_bandprof_alloc_rsp *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ rc = PTR_ERR(rsp);
+ goto out;
+ }
+
if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
rc = -EIO;
goto out;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index c1861f7de254..e3f0bce9908f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -26,7 +26,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 87d5776e3b88..523ecb798a7a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -83,6 +83,7 @@ int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
return 1;
}
+EXPORT_SYMBOL(otx2_update_rq_stats);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
{
@@ -99,6 +100,7 @@ int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
return 1;
}
+EXPORT_SYMBOL(otx2_update_sq_stats);
void otx2_get_dev_stats(struct otx2_nic *pfvf)
{
@@ -227,7 +229,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
u16 maxlen;
int err;
- maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ maxlen = pfvf->hw.max_mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
@@ -236,7 +238,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ req->maxlen = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
/* Use max receive length supported by hardware for loopback devices */
if (is_otx2_lbkvf(pfvf->pdev))
@@ -246,13 +248,14 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_hw_set_mtu);
int otx2_config_pause_frm(struct otx2_nic *pfvf)
{
struct cgx_pause_frm_cfg *req;
int err;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 0;
mutex_lock(&pfvf->mbox.lock);
@@ -646,12 +649,22 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
+ int sdp_chan = hw->tx_chan_base + prio;
+
+ if (is_otx2_sdp_rep(pfvf->pdev))
+ prio = 0;
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
+ if (is_otx2_sdp_rep(pfvf->pdev)) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
+ req->regval[2] = BIT_ULL(12) | BIT_ULL(13) |
+ (sdp_chan & 0xff);
+ }
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
@@ -659,7 +672,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
- if (lvl == hw->txschq_link_cfg_lvl) {
+ if (lvl == hw->txschq_link_cfg_lvl &&
+ !is_otx2_sdp_rep(pfvf->pdev)) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
/* Enable this queue and backpressure
@@ -676,7 +690,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
- if (lvl == hw->txschq_link_cfg_lvl) {
+ if (lvl == hw->txschq_link_cfg_lvl &&
+ !is_otx2_sdp_rep(pfvf->pdev)) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
/* Enable this queue and backpressure
@@ -735,6 +750,7 @@ EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
+ int chan_cnt = pfvf->hw.tx_chan_cnt;
struct nix_txsch_alloc_req *req;
struct nix_txsch_alloc_rsp *rsp;
int lvl, schq, rc;
@@ -747,6 +763,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
/* Request one schq per level */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
req->schq[lvl] = 1;
+
+ if (is_otx2_sdp_rep(pfvf->pdev) && chan_cnt > 1) {
+ req->schq[NIX_TXSCH_LVL_SMQ] = chan_cnt;
+ req->schq[NIX_TXSCH_LVL_TL4] = chan_cnt;
+ }
+
rc = otx2_sync_mbox_msg(&pfvf->mbox);
if (rc)
return rc;
@@ -757,10 +779,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
return PTR_ERR(rsp);
/* Setup transmit scheduler list */
- for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ pfvf->hw.txschq_cnt[lvl] = rsp->schq[lvl];
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pfvf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
+ }
pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
@@ -798,12 +822,15 @@ EXPORT_SYMBOL(otx2_txschq_free_one);
void otx2_txschq_stop(struct otx2_nic *pfvf)
{
- int lvl, schq;
+ int lvl, schq, idx;
/* free non QOS TLx nodes */
- for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
- otx2_txschq_free_one(pfvf, lvl,
- pfvf->hw.txschq_list[lvl][0]);
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (idx = 0; idx < pfvf->hw.txschq_cnt[lvl]; idx++) {
+ otx2_txschq_free_one(pfvf, lvl,
+ pfvf->hw.txschq_list[lvl][idx]);
+ }
+ }
/* Clear the txschq list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
@@ -883,7 +910,7 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
-int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
{
struct otx2_nic *pfvf = dev;
struct otx2_snd_queue *sq;
@@ -902,7 +929,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@@ -925,6 +952,7 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
struct otx2_qset *qset = &pfvf->qset;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
+ u8 chan_offset;
int err;
pool = &pfvf->qset.pool[sqb_aura];
@@ -971,7 +999,8 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ chan_offset = qidx % pfvf->hw.tx_chan_cnt;
+ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
if (err) {
kfree(sq->sg);
sq->sg = NULL;
@@ -1738,6 +1767,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.sqb_size = rsp->sqb_size;
pfvf->hw.rx_chan_base = rsp->rx_chan_base;
pfvf->hw.tx_chan_base = rsp->tx_chan_base;
+ pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt;
+ pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt;
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
pfvf->hw.cgx_links = rsp->cgx_links;
@@ -1782,6 +1813,7 @@ void otx2_free_cints(struct otx2_nic *pfvf, int n)
free_irq(vector, &qset->napi[qidx]);
}
}
+EXPORT_SYMBOL(otx2_free_cints);
void otx2_set_cints_affinity(struct otx2_nic *pfvf)
{
@@ -1837,6 +1869,10 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
if (!rc) {
rsp = (struct nix_hw_info *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ rc = PTR_ERR(rsp);
+ goto out;
+ }
/* HW counts VLAN insertion bytes (8 for double tag)
* irrespective of whether SQE is requesting to insert VLAN
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index f27a3456ae64..566848663fea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -29,6 +29,7 @@
#include "otx2_devlink.h"
#include <rvu_trace.h>
#include "qos.h"
+#include "rep.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -41,6 +42,8 @@
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
+#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7
+
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
#define PCI_MBOX_BAR_NUM 4
@@ -120,33 +123,6 @@ enum otx2_errcodes_re {
ERRCODE_IL4_CSUM = 0x22,
};
-/* NIX TX stats */
-enum nix_stat_lf_tx {
- TX_UCAST = 0x0,
- TX_BCAST = 0x1,
- TX_MCAST = 0x2,
- TX_DROP = 0x3,
- TX_OCTS = 0x4,
- TX_STATS_ENUM_LAST,
-};
-
-/* NIX RX stats */
-enum nix_stat_lf_rx {
- RX_OCTS = 0x0,
- RX_UCAST = 0x1,
- RX_BCAST = 0x2,
- RX_MCAST = 0x3,
- RX_DROP = 0x4,
- RX_DROP_OCTS = 0x5,
- RX_FCS = 0x6,
- RX_ERR = 0x7,
- RX_DRP_BCAST = 0x8,
- RX_DRP_MCAST = 0x9,
- RX_DRP_L3BCAST = 0xa,
- RX_DRP_L3MCAST = 0xb,
- RX_STATS_ENUM_LAST,
-};
-
struct otx2_dev_stats {
u64 rx_bytes;
u64 rx_frames;
@@ -224,15 +200,19 @@ struct otx2_hw {
/* NIX */
u8 txschq_link_cfg_lvl;
+ u8 txschq_cnt[NIX_TXSCH_LVL_CNT];
u8 txschq_aggr_lvl_rr_prio;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
+ u32 max_mtu;
u8 smq_link_type;
/* HW settings, coalescing etc */
u16 rx_chan_base;
u16 tx_chan_base;
+ u8 rx_chan_cnt;
+ u8 tx_chan_cnt;
u16 cq_qcount_wait;
u16 cq_ecount_wait;
u16 rq_skid;
@@ -367,7 +347,8 @@ struct otx2_flow_config {
};
struct dev_hw_ops {
- int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
+ int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset,
+ u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
@@ -465,6 +446,8 @@ struct otx2_nic {
#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
+#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18)
+#define OTX2_FLAG_PORT_UP BIT_ULL(19)
u64 flags;
u64 *cq_op_addr;
@@ -532,11 +515,19 @@ struct otx2_nic {
#if IS_ENABLED(CONFIG_MACSEC)
struct cn10k_mcs_cfg *macsec_cfg;
#endif
+
+#if IS_ENABLED(CONFIG_RVU_ESWITCH)
+ struct rep_dev **reps;
+ int rep_cnt;
+ u16 rep_pf_map[RVU_MAX_REP];
+ u16 esw_mode;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
{
- return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
+ return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) ||
+ (pdev->device == PCI_DEVID_RVU_REP);
}
static inline bool is_96xx_A0(struct pci_dev *pdev)
@@ -551,6 +542,11 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
}
+static inline bool is_otx2_sdp_rep(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP;
+}
+
/* REVID for PCIe devices.
* Bits 0..1: minor pass, bit 3..2: major pass
* bits 7..4: midr id
@@ -913,15 +909,19 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
{
u16 smq;
+ int idx;
+
#ifdef CONFIG_DCB
if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
#endif
/* check if qidx falls under QOS queues */
- if (qidx >= pfvf->hw.non_qos_queues)
+ if (qidx >= pfvf->hw.non_qos_queues) {
smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
- else
- smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ } else {
+ idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ];
+ smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx];
+ }
return smq;
}
@@ -988,14 +988,28 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
-int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma);
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type);
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs);
+int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf);
+void otx2_free_queue_mem(struct otx2_qset *qset);
+int otx2_alloc_queue_mem(struct otx2_nic *pf);
+int otx2_init_hw_resources(struct otx2_nic *pfvf);
+void otx2_free_hw_resources(struct otx2_nic *pf);
+int otx2_wq_init(struct otx2_nic *pf);
+int otx2_check_pf_usable(struct otx2_nic *pf);
+int otx2_pfaf_mbox_init(struct otx2_nic *pf);
+int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af);
+int otx2_realloc_msix_vectors(struct otx2_nic *pf);
+void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
+void otx2_disable_mbox_intr(struct otx2_nic *pf);
+void otx2_disable_napi(struct otx2_nic *pf);
+irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -1127,4 +1141,12 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid);
void otx2_qos_config_txschq(struct otx2_nic *pfvf);
void otx2_clean_qos_queues(struct otx2_nic *pfvf);
+int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info);
+int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower);
+
+static inline int mcam_entry_cmp(const void *a, const void *b)
+{
+ return *(u16 *)a - *(u16 *)b;
+}
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index aa01110f04a3..294fba58b670 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -315,6 +315,11 @@ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pfc_rsp *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto unlock;
+ }
+
if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
dev_warn(pfvf->dev,
"Failed to config PFC\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 53f14aa944bd..33ec9a7f7c03 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -141,7 +141,56 @@ static const struct devlink_param otx2_dl_params[] = {
otx2_dl_ucast_flt_cnt_validate),
};
+#ifdef CONFIG_RVU_ESWITCH
+static int otx2_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ if (!otx2_rep_dev(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ *mode = pfvf->esw_mode;
+
+ return 0;
+}
+
+static int otx2_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ int ret = 0;
+
+ if (!otx2_rep_dev(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ if (pfvf->esw_mode == mode)
+ return 0;
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ rvu_rep_destroy(pfvf);
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ ret = rvu_rep_create(pfvf, extack);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ret)
+ pfvf->esw_mode = mode;
+
+ return ret;
+}
+#endif
+
static const struct devlink_ops otx2_devlink_ops = {
+#ifdef CONFIG_RVU_ESWITCH
+ .eswitch_mode_get = otx2_devlink_eswitch_mode_get,
+ .eswitch_mode_set = otx2_devlink_eswitch_mode_set,
+#endif
};
int otx2_register_dl(struct otx2_nic *pfvf)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
index 80d853b343f9..2046dd0da00d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -28,6 +28,11 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
if (!err) {
rsp = (struct cgx_mac_addr_add_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&pf->mbox.lock);
+ return PTR_ERR(rsp);
+ }
+
*dmac_index = rsp->index;
}
@@ -200,6 +205,10 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
rsp = (struct cgx_mac_addr_update_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ rc = PTR_ERR(rsp);
+ goto out;
+ }
pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 32468c663605..2d53dc77ef1e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -85,26 +85,22 @@ static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
int start_qidx = qset * pfvf->hw.rx_queues;
int qidx, stats;
- for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
- for (stats = 0; stats < otx2_n_queue_stats; stats++) {
- sprintf(*data, "rxq%d: %s", qidx + start_qidx,
- otx2_queue_stats[stats].name);
- *data += ETH_GSTRING_LEN;
- }
- }
+ for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++)
+ for (stats = 0; stats < otx2_n_queue_stats; stats++)
+ ethtool_sprintf(data, "rxq%d: %s", qidx + start_qidx,
+ otx2_queue_stats[stats].name);
- for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
- for (stats = 0; stats < otx2_n_queue_stats; stats++) {
+ for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++)
+ for (stats = 0; stats < otx2_n_queue_stats; stats++)
if (qidx >= pfvf->hw.non_qos_queues)
- sprintf(*data, "txq_qos%d: %s",
- qidx + start_qidx - pfvf->hw.non_qos_queues,
- otx2_queue_stats[stats].name);
+ ethtool_sprintf(data, "txq_qos%d: %s",
+ qidx + start_qidx -
+ pfvf->hw.non_qos_queues,
+ otx2_queue_stats[stats].name);
else
- sprintf(*data, "txq%d: %s", qidx + start_qidx,
- otx2_queue_stats[stats].name);
- *data += ETH_GSTRING_LEN;
- }
- }
+ ethtool_sprintf(data, "txq%d: %s",
+ qidx + start_qidx,
+ otx2_queue_stats[stats].name);
}
static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -115,36 +111,25 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
- for (stats = 0; stats < otx2_n_dev_stats; stats++) {
- memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < otx2_n_dev_stats; stats++)
+ ethtool_puts(&data, otx2_dev_stats[stats].name);
- for (stats = 0; stats < otx2_n_drv_stats; stats++) {
- memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < otx2_n_drv_stats; stats++)
+ ethtool_puts(&data, otx2_drv_stats[stats].name);
otx2_get_qset_strings(pfvf, &data, 0);
if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
- for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_rxstat%d: ", stats);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++)
+ ethtool_sprintf(&data, "cgx_rxstat%d: ", stats);
- for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_txstat%d: ", stats);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++)
+ ethtool_sprintf(&data, "cgx_txstat%d: ", stats);
}
- strcpy(data, "reset_count");
- data += ETH_GSTRING_LEN;
- sprintf(data, "Fec Corrected Errors: ");
- data += ETH_GSTRING_LEN;
- sprintf(data, "Fec Uncorrected Errors: ");
- data += ETH_GSTRING_LEN;
+ ethtool_puts(&data, "reset_count");
+ ethtool_puts(&data, "Fec Corrected Errors: ");
+ ethtool_puts(&data, "Fec Uncorrected Errors: ");
}
static void otx2_get_qset_stats(struct otx2_nic *pfvf,
@@ -343,6 +328,11 @@ static void otx2_get_pauseparam(struct net_device *netdev,
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pause_frm_cfg *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+ }
+
pause->rx_pause = rsp->rx_pause;
pause->tx_pause = rsp->tx_pause;
}
@@ -1072,6 +1062,11 @@ static int otx2_set_fecparam(struct net_device *netdev,
rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto end;
+ }
+
if (rsp->fec >= 0)
pfvf->linfo.fec = rsp->fec;
else
@@ -1365,20 +1360,15 @@ static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
- for (stats = 0; stats < otx2_n_dev_stats; stats++) {
- memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < otx2_n_dev_stats; stats++)
+ ethtool_puts(&data, otx2_dev_stats[stats].name);
- for (stats = 0; stats < otx2_n_drv_stats; stats++) {
- memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (stats = 0; stats < otx2_n_drv_stats; stats++)
+ ethtool_puts(&data, otx2_drv_stats[stats].name);
otx2_get_qset_strings(vf, &data, 0);
- strcpy(data, "reset_count");
- data += ETH_GSTRING_LEN;
+ ethtool_puts(&data, "reset_count");
}
static void otx2vf_get_ethtool_stats(struct net_device *netdev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 98c31a16c70b..47bfd1fb37d4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -64,11 +64,6 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
return 0;
}
-static int mcam_entry_cmp(const void *a, const void *b)
-{
- return *(u16 *)a - *(u16 *)b;
-}
-
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
@@ -119,6 +114,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ goto exit;
for (ent = 0; ent < rsp->count; ent++)
flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
@@ -197,6 +194,10 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return PTR_ERR(rsp);
+ }
if (rsp->count != req->count) {
netdev_info(pfvf->netdev,
@@ -232,6 +233,10 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
(&pfvf->mbox.mbox, 0, &freq->hdr);
+ if (IS_ERR(frsp)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return PTR_ERR(frsp);
+ }
if (frsp->enable) {
pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 5492dea547a1..e310f99b1736 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -519,6 +519,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
switch (msg->id) {
case MBOX_MSG_CGX_LINK_EVENT:
+ case MBOX_MSG_REP_EVENT_UP_NOTIFY:
break;
default:
if (msg->rc)
@@ -832,6 +833,9 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
struct cgx_link_user_info *linfo = &pf->linfo;
struct net_device *netdev = pf->netdev;
+ if (pf->flags & OTX2_FLAG_PORT_UP)
+ return;
+
pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
linfo->link_up ? "UP" : "DOWN", linfo->speed,
linfo->full_duplex ? "Full" : "Half");
@@ -844,6 +848,35 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
}
}
+static int otx2_mbox_up_handler_rep_event_up_notify(struct otx2_nic *pf,
+ struct rep_event *info,
+ struct msg_rsp *rsp)
+{
+ struct net_device *netdev = pf->netdev;
+
+ if (info->event == RVU_EVENT_MTU_CHANGE) {
+ netdev->mtu = info->evt_data.mtu;
+ return 0;
+ }
+
+ if (info->event == RVU_EVENT_PORT_STATE) {
+ if (info->evt_data.port_state) {
+ pf->flags |= OTX2_FLAG_PORT_UP;
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ pf->flags &= ~OTX2_FLAG_PORT_UP;
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
+ return 0;
+ }
+#ifdef CONFIG_RVU_ESWITCH
+ rvu_event_up_notify(pf, info);
+#endif
+ return 0;
+}
+
int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
struct mcs_intr_info *event,
struct msg_rsp *rsp)
@@ -913,6 +946,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
}
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
+MBOX_UP_REP_MESSAGES
#undef M
break;
default:
@@ -1008,7 +1042,7 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
return IRQ_HANDLED;
}
-static void otx2_disable_mbox_intr(struct otx2_nic *pf)
+void otx2_disable_mbox_intr(struct otx2_nic *pf)
{
int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
@@ -1016,8 +1050,9 @@ static void otx2_disable_mbox_intr(struct otx2_nic *pf)
otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
free_irq(vector, pf);
}
+EXPORT_SYMBOL(otx2_disable_mbox_intr);
-static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
+int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
{
struct otx2_hw *hw = &pf->hw;
struct msg_req *req;
@@ -1061,7 +1096,7 @@ static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
return 0;
}
-static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
+void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
{
struct mbox *mbox = &pf->mbox;
@@ -1076,8 +1111,9 @@ static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
otx2_mbox_destroy(&mbox->mbox);
otx2_mbox_destroy(&mbox->mbox_up);
}
+EXPORT_SYMBOL(otx2_pfaf_mbox_destroy);
-static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
+int otx2_pfaf_mbox_init(struct otx2_nic *pf)
{
struct mbox *mbox = &pf->mbox;
void __iomem *hwbase;
@@ -1379,7 +1415,7 @@ done:
return IRQ_HANDLED;
}
-static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
+irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
{
struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
@@ -1398,20 +1434,25 @@ static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL(otx2_cq_intr_handler);
-static void otx2_disable_napi(struct otx2_nic *pf)
+void otx2_disable_napi(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
struct otx2_cq_poll *cq_poll;
+ struct work_struct *work;
int qidx;
for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
cq_poll = &qset->napi[qidx];
- cancel_work_sync(&cq_poll->dim.work);
+ work = &cq_poll->dim.work;
+ if (work->func)
+ cancel_work_sync(work);
napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi);
}
}
+EXPORT_SYMBOL(otx2_disable_napi);
static void otx2_free_cq_res(struct otx2_nic *pf)
{
@@ -1477,7 +1518,7 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
return ALIGN(rbuf_size, 2048);
}
-static int otx2_init_hw_resources(struct otx2_nic *pf)
+int otx2_init_hw_resources(struct otx2_nic *pf)
{
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
@@ -1493,10 +1534,11 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- /* Maximum hardware supported transmit length */
- pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
-
- pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
+ if (!otx2_rep_dev(pf->pdev)) {
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
+ pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
+ }
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1549,10 +1591,15 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
}
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- err = otx2_txschq_config(pf, lvl, 0, false);
- if (err) {
- mutex_unlock(&mbox->lock);
- goto err_free_nix_queues;
+ int idx;
+
+ for (idx = 0; idx < pf->hw.txschq_cnt[lvl]; idx++) {
+ err = otx2_txschq_config(pf, lvl, idx, false);
+ if (err) {
+ dev_err(pf->dev, "Failed to config TXSCH\n");
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
}
}
@@ -1601,8 +1648,9 @@ exit:
mutex_unlock(&mbox->lock);
return err;
}
+EXPORT_SYMBOL(otx2_init_hw_resources);
-static void otx2_free_hw_resources(struct otx2_nic *pf)
+void otx2_free_hw_resources(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
struct nix_lf_free_req *free_req;
@@ -1624,11 +1672,12 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_pfc_txschq_stop(pf);
#endif
- otx2_clean_qos_queues(pf);
+ if (!otx2_rep_dev(pf->pdev))
+ otx2_clean_qos_queues(pf);
mutex_lock(&mbox->lock);
/* Disable backpressure */
- if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, false);
mutex_unlock(&mbox->lock);
@@ -1660,7 +1709,8 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */
- cn10k_free_all_ipolicers(pf);
+ if (!otx2_rep_dev(pf->pdev))
+ cn10k_free_all_ipolicers(pf);
mutex_lock(&mbox->lock);
/* Reset NIX LF */
@@ -1688,6 +1738,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
}
mutex_unlock(&mbox->lock);
}
+EXPORT_SYMBOL(otx2_free_hw_resources);
static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
{
@@ -1770,15 +1821,24 @@ static void otx2_dim_work(struct work_struct *w)
dim->state = DIM_START_MEASURE;
}
-int otx2_open(struct net_device *netdev)
+void otx2_free_queue_mem(struct otx2_qset *qset)
+{
+ kfree(qset->sq);
+ qset->sq = NULL;
+ kfree(qset->cq);
+ qset->cq = NULL;
+ kfree(qset->rq);
+ qset->rq = NULL;
+ kfree(qset->napi);
+ qset->napi = NULL;
+}
+EXPORT_SYMBOL(otx2_free_queue_mem);
+
+int otx2_alloc_queue_mem(struct otx2_nic *pf)
{
- struct otx2_nic *pf = netdev_priv(netdev);
- struct otx2_cq_poll *cq_poll = NULL;
struct otx2_qset *qset = &pf->qset;
- int err = 0, qidx, vec;
- char *irq_name;
+ struct otx2_cq_poll *cq_poll;
- netif_carrier_off(netdev);
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
@@ -1798,7 +1858,6 @@ int otx2_open(struct net_device *netdev)
/* CQ size of SQ */
qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
- err = -ENOMEM;
qset->cq = kcalloc(pf->qset.cq_cnt,
sizeof(struct otx2_cq_queue), GFP_KERNEL);
if (!qset->cq)
@@ -1814,6 +1873,28 @@ int otx2_open(struct net_device *netdev)
if (!qset->rq)
goto err_free_mem;
+ return 0;
+
+err_free_mem:
+ otx2_free_queue_mem(qset);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(otx2_alloc_queue_mem);
+
+int otx2_open(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+ int err = 0, qidx, vec;
+ char *irq_name;
+
+ netif_carrier_off(netdev);
+
+ err = otx2_alloc_queue_mem(pf);
+ if (err)
+ return err;
+
err = otx2_init_hw_resources(pf);
if (err)
goto err_free_mem;
@@ -1932,6 +2013,7 @@ int otx2_open(struct net_device *netdev)
}
pf->flags &= ~OTX2_FLAG_INTF_DOWN;
+ pf->flags &= ~OTX2_FLAG_PORT_UP;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@@ -1979,10 +2061,7 @@ err_disable_napi:
otx2_disable_napi(pf);
otx2_free_hw_resources(pf);
err_free_mem:
- kfree(qset->sq);
- kfree(qset->cq);
- kfree(qset->rq);
- kfree(qset->napi);
+ otx2_free_queue_mem(qset);
return err;
}
EXPORT_SYMBOL(otx2_open);
@@ -2047,11 +2126,7 @@ int otx2_stop(struct net_device *netdev)
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
-
- kfree(qset->sq);
- kfree(qset->cq);
- kfree(qset->rq);
- kfree(qset->napi);
+ otx2_free_queue_mem(qset);
/* Do not clear RQ/SQ ringsize settings */
memset_startat(qset, 0, sqe_cnt);
return 0;
@@ -2081,7 +2156,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
sq = &pf->qset.sq[sq_idx];
txq = netdev_get_tx_queue(netdev, qidx);
- if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ if (!otx2_sq_append_skb(pf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
@@ -2786,7 +2861,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
-static int otx2_wq_init(struct otx2_nic *pf)
+int otx2_wq_init(struct otx2_nic *pf)
{
pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
if (!pf->otx2_wq)
@@ -2797,7 +2872,7 @@ static int otx2_wq_init(struct otx2_nic *pf)
return 0;
}
-static int otx2_check_pf_usable(struct otx2_nic *nic)
+int otx2_check_pf_usable(struct otx2_nic *nic)
{
u64 rev;
@@ -2815,7 +2890,7 @@ static int otx2_check_pf_usable(struct otx2_nic *nic)
return 0;
}
-static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
+int otx2_realloc_msix_vectors(struct otx2_nic *pf)
{
struct otx2_hw *hw = &pf->hw;
int num_vec, err;
@@ -2837,6 +2912,7 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
return otx2_register_mbox_intr(pf, false);
}
+EXPORT_SYMBOL(otx2_realloc_msix_vectors);
static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
{
@@ -2872,6 +2948,88 @@ static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
}
}
+int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf)
+{
+ struct device *dev = &pdev->dev;
+ struct otx2_hw *hw = &pf->hw;
+ int num_vec, err;
+
+ num_vec = pci_msix_vec_count(pdev);
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name)
+ return -ENOMEM;
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask)
+ return -ENOMEM;
+
+ /* Map CSRs */
+ pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!pf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ return -ENOMEM;
+ }
+
+ err = otx2_check_pf_usable(pf);
+ if (err)
+ return err;
+
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ otx2_setup_dev_hw_settings(pf);
+
+ /* Init PF <=> AF mailbox stuff */
+ err = otx2_pfaf_mbox_init(pf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2_register_mbox_intr(pf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and NIX LFs to this PF.
+ * NIX and NPA LFs are needed for this PF to function as a NIC.
+ */
+ err = otx2_attach_npa_nix(pf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2_realloc_msix_vectors(pf);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = cn10k_lmtst_init(pf);
+ if (err)
+ goto err_detach_rsrc;
+
+ return 0;
+
+err_detach_rsrc:
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
+ otx2_detach_resources(&pf->mbox);
+err_disable_mbox_intr:
+ otx2_disable_mbox_intr(pf);
+err_mbox_destroy:
+ otx2_pfaf_mbox_destroy(pf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+
+ return err;
+}
+EXPORT_SYMBOL(otx2_init_rsrc);
+
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -2879,7 +3037,6 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct net_device *netdev;
struct otx2_nic *pf;
struct otx2_hw *hw;
- int num_vec;
err = pcim_enable_device(pdev);
if (err) {
@@ -2930,72 +3087,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Use CQE of 128 byte descriptor size by default */
hw->xqe_size = 128;
- num_vec = pci_msix_vec_count(pdev);
- hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
- GFP_KERNEL);
- if (!hw->irq_name) {
- err = -ENOMEM;
- goto err_free_netdev;
- }
-
- hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
- sizeof(cpumask_var_t), GFP_KERNEL);
- if (!hw->affinity_mask) {
- err = -ENOMEM;
- goto err_free_netdev;
- }
-
- /* Map CSRs */
- pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
- if (!pf->reg_base) {
- dev_err(dev, "Unable to map physical function CSRs, aborting\n");
- err = -ENOMEM;
- goto err_free_netdev;
- }
-
- err = otx2_check_pf_usable(pf);
+ err = otx2_init_rsrc(pdev, pf);
if (err)
goto err_free_netdev;
- err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
- RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
- if (err < 0) {
- dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
- __func__, num_vec);
- goto err_free_netdev;
- }
-
- otx2_setup_dev_hw_settings(pf);
-
- /* Init PF <=> AF mailbox stuff */
- err = otx2_pfaf_mbox_init(pf);
- if (err)
- goto err_free_irq_vectors;
-
- /* Register mailbox interrupt */
- err = otx2_register_mbox_intr(pf, true);
- if (err)
- goto err_mbox_destroy;
-
- /* Request AF to attach NPA and NIX LFs to this PF.
- * NIX and NPA LFs are needed for this PF to function as a NIC.
- */
- err = otx2_attach_npa_nix(pf);
- if (err)
- goto err_disable_mbox_intr;
-
- err = otx2_realloc_msix_vectors(pf);
- if (err)
- goto err_detach_rsrc;
-
err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
if (err)
goto err_detach_rsrc;
- err = cn10k_lmtst_init(pf);
- if (err)
- goto err_detach_rsrc;
-
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -3058,6 +3157,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
+ hw->max_mtu = netdev->max_mtu;
/* reset CGX/RPM MAC stats */
otx2_reset_mac_stats(pf);
@@ -3118,11 +3218,8 @@ err_detach_rsrc:
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
-err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
-err_mbox_destroy:
otx2_pfaf_mbox_destroy(pf);
-err_free_irq_vectors:
pci_free_irq_vectors(hw->pdev);
err_free_netdev:
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index e63cc1eb6d89..9a226ca74425 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -443,6 +443,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
+ struct rep_dev *rdev;
u32 burst, mark = 0;
u8 nr_police = 0;
u8 num_intf = 1;
@@ -464,14 +465,18 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return 0;
case FLOW_ACTION_REDIRECT_INGRESS:
target = act->dev;
- priv = netdev_priv(target);
- /* npc_install_flow_req doesn't support passing a target pcifunc */
- if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't redirect to other pf/vf");
- return -EOPNOTSUPP;
+ if (target->dev.parent) {
+ priv = netdev_priv(target);
+ if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
+ return -EOPNOTSUPP;
+ }
+ req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
+ } else {
+ rdev = netdev_priv(target);
+ req->vf = rdev->pcifunc & RVU_PFVF_FUNC_MASK;
}
- req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
/* if op is already set; avoid overwriting the same */
if (!req->op)
@@ -1300,6 +1305,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
req->channel = nic->hw.rx_chan_base;
req->entry = flow_cfg->flow_ent[mcam_idx];
req->intf = NIX_INTF_RX;
+ req->vf = nic->pcifunc;
req->set_cntr = 1;
new_node->entry = req->entry;
@@ -1400,8 +1406,8 @@ static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
return 0;
}
-static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
- struct flow_cls_offload *cls_flower)
+int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
+ struct flow_cls_offload *cls_flower)
{
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
@@ -1414,6 +1420,7 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_setup_tc_cls_flower);
static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 933e18ba2fb2..04bc06a80e23 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -376,9 +376,11 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
otx2_set_rxhash(pfvf, cqe, skb);
- skb_record_rx_queue(skb, cq->cq_idx);
- if (pfvf->netdev->features & NETIF_F_RXCSUM)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (!(pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)) {
+ skb_record_rx_queue(skb, cq->cq_idx);
+ if (pfvf->netdev->features & NETIF_F_RXCSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED)
skb->mark = parse->match_id;
@@ -453,6 +455,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
int tx_pkts = 0, tx_bytes = 0, qidx;
struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
+ struct net_device *ndev;
int processed_cqe = 0;
if (cq->pend_cqe >= budget)
@@ -493,6 +496,13 @@ process_cqe:
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
+#if IS_ENABLED(CONFIG_RVU_ESWITCH)
+ if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
+ ndev = pfvf->reps[qidx]->netdev;
+ else
+#endif
+ ndev = pfvf->netdev;
+
if (likely(tx_pkts)) {
struct netdev_queue *txq;
@@ -500,12 +510,14 @@ process_cqe:
if (qidx >= pfvf->hw.tx_queues)
qidx -= pfvf->hw.xdp_queues;
- txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+ if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
+ qidx = 0;
+ txq = netdev_get_tx_queue(ndev, qidx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
/* Check if queue was stopped earlier due to ring full */
smp_mb();
if (netif_tx_queue_stopped(txq) &&
- netif_carrier_ok(pfvf->netdev))
+ netif_carrier_ok(ndev))
netif_tx_wake_queue(txq);
}
return 0;
@@ -527,7 +539,7 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
rx_frames + tx_frames,
rx_bytes + tx_bytes,
&dim_sample);
- net_dim(&cq_poll->dim, dim_sample);
+ net_dim(&cq_poll->dim, &dim_sample);
}
int otx2_napi_handler(struct napi_struct *napi, int budget)
@@ -594,6 +606,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
return workdone;
}
+EXPORT_SYMBOL(otx2_napi_handler);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx)
@@ -1141,13 +1154,13 @@ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
}
}
-bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
- struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
- struct otx2_nic *pfvf = netdev_priv(netdev);
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_nic *pfvf = dev;
/* Check if there is enough room between producer
* and consumer index.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 3f1d2655ff77..e1db5f961877 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -167,7 +167,8 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
}
int otx2_napi_handler(struct napi_struct *napi, int budget);
-bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
+bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 99fcc5661674..839fc77c11b2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -21,6 +21,7 @@
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_REP) },
{ }
};
@@ -371,7 +372,7 @@ static int otx2vf_open(struct net_device *netdev)
/* LBKs do not receive link events so tell everyone we are up here */
vf = netdev_priv(netdev);
- if (is_otx2_lbkvf(vf->pdev)) {
+ if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev)) {
pr_info("%s NIC Link is UP\n", netdev->name);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
@@ -395,7 +396,7 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
- if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ if (!otx2_sq_append_skb(vf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
@@ -500,7 +501,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_setup_tc = otx2_setup_tc,
};
-static int otx2_wq_init(struct otx2_nic *vf)
+static int otx2_vf_wq_init(struct otx2_nic *vf)
{
vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
if (!vf->otx2_wq)
@@ -671,6 +672,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(vf);
+ hw->max_mtu = netdev->max_mtu;
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
@@ -682,13 +684,22 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
}
+ if (is_otx2_sdp_rep(vf->pdev)) {
+ int n;
+
+ n = vf->pcifunc & RVU_PFVF_FUNC_MASK;
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d",
+ pdev->bus->number, n);
+ }
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
goto err_ptp_destroy;
}
- err = otx2_wq_init(vf);
+ err = otx2_vf_wq_init(vf);
if (err)
goto err_unreg_netdev;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
new file mode 100644
index 000000000000..232b10740c13
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU representor driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/net_tstamp.h>
+#include <linux/sort.h>
+
+#include "otx2_common.h"
+#include "cn10k.h"
+#include "otx2_reg.h"
+#include "rep.h"
+
+#define DRV_NAME "rvu_rep"
+#define DRV_STRING "Marvell RVU Representor Driver"
+
+static const struct pci_device_id rvu_rep_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) },
+ { }
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, rvu_rep_id_table);
+
+static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
+ struct rep_event *data);
+
+static int rvu_rep_mcam_flow_init(struct rep_dev *rep)
+{
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ struct otx2_nic *priv = rep->mdev;
+ int ent, allocated = 0;
+ int count;
+
+ rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL);
+
+ if (!rep->flow_cfg)
+ return -ENOMEM;
+
+ count = OTX2_DEFAULT_FLOWCOUNT;
+
+ rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL);
+ if (!rep->flow_cfg->flow_ent)
+ return -ENOMEM;
+
+ while (allocated < count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox);
+ if (!req)
+ goto exit;
+
+ req->hdr.pcifunc = rep->pcifunc;
+ req->contig = false;
+ req->ref_entry = 0;
+ req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
+ NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
+
+ if (otx2_sync_mbox_msg(&priv->mbox))
+ goto exit;
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&priv->mbox.mbox, 0, &req->hdr);
+
+ for (ent = 0; ent < rsp->count; ent++)
+ rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+
+ allocated += rsp->count;
+
+ if (rsp->count != req->count)
+ break;
+ }
+exit:
+ /* Multiple MCAM entry alloc requests could result in non-sequential
+ * MCAM entries in the flow_ent[] array. Sort them in an ascending
+ * order, otherwise user installed ntuple filter index and MCAM entry
+ * index will not be in sync.
+ */
+ if (allocated)
+ sort(&rep->flow_cfg->flow_ent[0], allocated,
+ sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
+
+ mutex_unlock(&priv->mbox.lock);
+
+ rep->flow_cfg->max_flows = allocated;
+
+ if (allocated) {
+ rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+ }
+
+ INIT_LIST_HEAD(&rep->flow_cfg->flow_list);
+ INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc);
+ return 0;
+}
+
+static int rvu_rep_setup_tc_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct rep_dev *rep = cb_priv;
+ struct otx2_nic *priv = rep->mdev;
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return -EINVAL;
+
+ if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
+ rvu_rep_mcam_flow_init(rep);
+
+ priv->netdev = rep->netdev;
+ priv->flags = rep->flags;
+ priv->pcifunc = rep->pcifunc;
+ priv->flow_cfg = rep->flow_cfg;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return otx2_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(rvu_rep_block_cb_list);
+static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct rvu_rep *rep = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &rvu_rep_block_cb_list,
+ rvu_rep_setup_tc_cb,
+ rep, rep, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+rvu_rep_sp_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct otx2_rcv_queue *rq;
+ struct otx2_snd_queue *sq;
+ u16 qidx = rep->rep_id;
+
+ otx2_update_rq_stats(priv, qidx);
+ rq = &priv->qset.rq[qidx];
+
+ otx2_update_sq_stats(priv, qidx);
+ sq = &priv->qset.sq[qidx];
+
+ stats->tx_bytes = sq->stats.bytes;
+ stats->tx_packets = sq->stats.pkts;
+ stats->rx_bytes = rq->stats.bytes;
+ stats->rx_packets = rq->stats.pkts;
+ return 0;
+}
+
+static bool
+rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
+}
+
+static int
+rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
+ return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
+
+ return -EINVAL;
+}
+
+static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
+
+ ether_addr_copy(hw_addr, rep->mac);
+ *hw_addr_len = ETH_ALEN;
+ return 0;
+}
+
+static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ eth_hw_addr_set(rep->netdev, hw_addr);
+ ether_addr_copy(rep->mac, hw_addr);
+
+ ether_addr_copy(evt.evt_data.mac, hw_addr);
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt);
+ return 0;
+}
+
+static const struct devlink_port_ops rvu_rep_dl_port_ops = {
+ .port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set,
+};
+
+static void
+rvu_rep_devlink_set_switch_id(struct otx2_nic *priv,
+ struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = priv->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+static void rvu_rep_devlink_port_unregister(struct rep_dev *rep)
+{
+ devlink_port_unregister(&rep->dl_port);
+}
+
+static int rvu_rep_devlink_port_register(struct rep_dev *rep)
+{
+ struct devlink_port_attrs attrs = {};
+ struct otx2_nic *priv = rep->mdev;
+ struct devlink *dl = priv->dl->dl;
+ int err;
+
+ if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = rvu_get_pf(rep->pcifunc);
+ } else {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc);
+ attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK;
+ }
+
+ rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id);
+ devlink_port_attrs_set(&rep->dl_port, &attrs);
+
+ err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id,
+ &rvu_rep_dl_port_ops);
+ if (err) {
+ dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n",
+ err);
+ return err;
+ }
+ return 0;
+}
+
+static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc)
+{
+ int rep_id;
+
+ for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++)
+ if (priv->rep_pf_map[rep_id] == pcifunc)
+ return rep_id;
+ return -EINVAL;
+}
+
+static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
+ struct rep_event *data)
+{
+ struct rep_event *req;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ req->event = event;
+ req->pcifunc = data->pcifunc;
+
+ memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data));
+ otx2_sync_mbox_msg(&priv->mbox);
+ mutex_unlock(&priv->mbox.lock);
+ return 0;
+}
+
+static void rvu_rep_state_evt_handler(struct otx2_nic *priv,
+ struct rep_event *info)
+{
+ struct rep_dev *rep;
+ int rep_id;
+
+ rep_id = rvu_rep_get_repid(priv, info->pcifunc);
+ rep = priv->reps[rep_id];
+ if (info->evt_data.vf_state)
+ rep->flags |= RVU_REP_VF_INITIALIZED;
+ else
+ rep->flags &= ~RVU_REP_VF_INITIALIZED;
+}
+
+int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info)
+{
+ if (info->event & RVU_EVENT_PFVF_STATE)
+ rvu_rep_state_evt_handler(pf, info);
+ return 0;
+}
+
+static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ netdev_info(dev, "Changing MTU from %d to %d\n",
+ dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ evt.evt_data.mtu = new_mtu;
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt);
+ return 0;
+}
+
+static void rvu_rep_get_stats(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct nix_stats_req *req;
+ struct nix_stats_rsp *rsp;
+ struct rep_stats *stats;
+ struct otx2_nic *priv;
+ struct rep_dev *rep;
+ int err;
+
+ rep = container_of(del_work, struct rep_dev, stats_wrk);
+ priv = rep->mdev;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return;
+ }
+ req->pcifunc = rep->pcifunc;
+ err = otx2_sync_mbox_msg_busy_poll(&priv->mbox);
+ if (err)
+ goto exit;
+
+ rsp = (struct nix_stats_rsp *)
+ otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
+
+ if (IS_ERR(rsp)) {
+ err = PTR_ERR(rsp);
+ goto exit;
+ }
+
+ stats = &rep->stats;
+ stats->rx_bytes = rsp->rx.octs;
+ stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast +
+ rsp->rx.mcast;
+ stats->rx_drops = rsp->rx.drop;
+ stats->rx_mcast_frames = rsp->rx.mcast;
+ stats->tx_bytes = rsp->tx.octs;
+ stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast;
+ stats->tx_drops = rsp->tx.drop;
+exit:
+ mutex_unlock(&priv->mbox.lock);
+}
+
+static void rvu_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return;
+
+ stats->rx_packets = rep->stats.rx_frames;
+ stats->rx_bytes = rep->stats.rx_bytes;
+ stats->rx_dropped = rep->stats.rx_drops;
+ stats->multicast = rep->stats.rx_mcast_frames;
+
+ stats->tx_packets = rep->stats.tx_frames;
+ stats->tx_bytes = rep->stats.tx_bytes;
+ stats->tx_dropped = rep->stats.tx_drops;
+
+ schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100));
+}
+
+static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena)
+{
+ struct esw_cfg_req *req;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ req->ena = ena;
+ otx2_sync_mbox_msg(&priv->mbox);
+ mutex_unlock(&priv->mbox.lock);
+ return 0;
+}
+
+static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *pf = rep->mdev;
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+
+ sq = &pf->qset.sq[rep->rep_id];
+ txq = netdev_get_tx_queue(dev, 0);
+
+ if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, in case SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+ return NETDEV_TX_OK;
+}
+
+static int rvu_rep_open(struct net_device *dev)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return 0;
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+
+ evt.event = RVU_EVENT_PORT_STATE;
+ evt.evt_data.port_state = 1;
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
+ return 0;
+}
+
+static int rvu_rep_stop(struct net_device *dev)
+{
+ struct rep_dev *rep = netdev_priv(dev);
+ struct otx2_nic *priv = rep->mdev;
+ struct rep_event evt = {0};
+
+ if (!(rep->flags & RVU_REP_VF_INITIALIZED))
+ return 0;
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+
+ evt.event = RVU_EVENT_PORT_STATE;
+ evt.pcifunc = rep->pcifunc;
+ rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
+ return 0;
+}
+
+static const struct net_device_ops rvu_rep_netdev_ops = {
+ .ndo_open = rvu_rep_open,
+ .ndo_stop = rvu_rep_stop,
+ .ndo_start_xmit = rvu_rep_xmit,
+ .ndo_get_stats64 = rvu_rep_get_stats64,
+ .ndo_change_mtu = rvu_rep_change_mtu,
+ .ndo_has_offload_stats = rvu_rep_has_offload_stats,
+ .ndo_get_offload_stats = rvu_rep_get_offload_stats,
+ .ndo_setup_tc = rvu_rep_setup_tc,
+};
+
+static int rvu_rep_napi_init(struct otx2_nic *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_qset *qset = &priv->qset;
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_hw *hw = &priv->hw;
+ int err = 0, qidx, vec;
+ char *irq_name;
+
+ qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
+ if (!qset->napi)
+ return -ENOMEM;
+
+ /* Register NAPI handler */
+ for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
+ cq_poll = &qset->napi[qidx];
+ cq_poll->cint_idx = qidx;
+ cq_poll->cq_ids[CQ_RX] =
+ (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ?
+ qidx + hw->rx_queues :
+ CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+ cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ;
+
+ cq_poll->dev = (void *)priv;
+ netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi,
+ otx2_napi_handler);
+ napi_enable(&cq_poll->napi);
+ }
+ /* Register CQ IRQ handlers */
+ vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
+ irq_name = &hw->irq_name[vec * NAME_SIZE];
+
+ snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx);
+
+ err = request_irq(pci_irq_vector(priv->pdev, vec),
+ otx2_cq_intr_handler, 0, irq_name,
+ &qset->napi[qidx]);
+ if (err) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "RVU REP IRQ registration failed for CQ%d",
+ qidx);
+ goto err_free_cints;
+ }
+ vec++;
+
+ /* Enable CQ IRQ */
+ otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
+ otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
+ }
+ priv->flags &= ~OTX2_FLAG_INTF_DOWN;
+ return 0;
+
+err_free_cints:
+ otx2_free_cints(priv, qidx);
+ otx2_disable_napi(priv);
+ return err;
+}
+
+static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv)
+{
+ struct otx2_qset *qset = &priv->qset;
+ struct otx2_cq_poll *cq_poll = NULL;
+ int qidx, vec;
+
+ /* Cleanup CQ NAPI and IRQ */
+ vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
+ for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) {
+ /* Disable interrupt */
+ otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
+
+ synchronize_irq(pci_irq_vector(priv->pdev, vec));
+
+ cq_poll = &qset->napi[qidx];
+ napi_synchronize(&cq_poll->napi);
+ vec++;
+ }
+ otx2_free_cints(priv, priv->hw.cint_cnt);
+ otx2_disable_napi(priv);
+}
+
+static void rvu_rep_rsrc_free(struct otx2_nic *priv)
+{
+ struct otx2_qset *qset = &priv->qset;
+ struct delayed_work *work;
+ int wrk;
+
+ for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) {
+ work = &priv->refill_wrk[wrk].pool_refill_work;
+ cancel_delayed_work_sync(work);
+ }
+ devm_kfree(priv->dev, priv->refill_wrk);
+
+ otx2_free_hw_resources(priv);
+ otx2_free_queue_mem(qset);
+}
+
+static int rvu_rep_rsrc_init(struct otx2_nic *priv)
+{
+ struct otx2_qset *qset = &priv->qset;
+ int err;
+
+ err = otx2_alloc_queue_mem(priv);
+ if (err)
+ return err;
+
+ priv->hw.max_mtu = otx2_get_max_mtu(priv);
+ priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN;
+ priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
+
+ err = otx2_init_hw_resources(priv);
+ if (err)
+ goto err_free_rsrc;
+
+ /* Set maximum frame size allowed in HW */
+ err = otx2_hw_set_mtu(priv, priv->hw.max_mtu);
+ if (err) {
+ dev_err(priv->dev, "Failed to set HW MTU\n");
+ goto err_free_rsrc;
+ }
+ return 0;
+
+err_free_rsrc:
+ otx2_free_hw_resources(priv);
+ otx2_free_queue_mem(qset);
+ return err;
+}
+
+void rvu_rep_destroy(struct otx2_nic *priv)
+{
+ struct rep_dev *rep;
+ int rep_id;
+
+ rvu_eswitch_config(priv, false);
+ priv->flags |= OTX2_FLAG_INTF_DOWN;
+ rvu_rep_free_cq_rsrc(priv);
+ for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) {
+ rep = priv->reps[rep_id];
+ unregister_netdev(rep->netdev);
+ rvu_rep_devlink_port_unregister(rep);
+ free_netdev(rep->netdev);
+ kfree(rep->flow_cfg);
+ }
+ kfree(priv->reps);
+ rvu_rep_rsrc_free(priv);
+}
+
+int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
+{
+ int rep_cnt = priv->rep_cnt;
+ struct net_device *ndev;
+ struct rep_dev *rep;
+ int rep_id, err;
+ u16 pcifunc;
+
+ err = rvu_rep_rsrc_init(priv);
+ if (err)
+ return -ENOMEM;
+
+ priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL);
+ if (!priv->reps)
+ return -ENOMEM;
+
+ for (rep_id = 0; rep_id < rep_cnt; rep_id++) {
+ ndev = alloc_etherdev(sizeof(*rep));
+ if (!ndev) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "PFVF representor:%d creation failed",
+ rep_id);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ rep = netdev_priv(ndev);
+ priv->reps[rep_id] = rep;
+ rep->mdev = priv;
+ rep->netdev = ndev;
+ rep->rep_id = rep_id;
+
+ ndev->min_mtu = OTX2_MIN_MTU;
+ ndev->max_mtu = priv->hw.max_mtu;
+ ndev->netdev_ops = &rvu_rep_netdev_ops;
+ pcifunc = priv->rep_pf_map[rep_id];
+ rep->pcifunc = pcifunc;
+
+ snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d",
+ rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK));
+
+ ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+
+ ndev->hw_features |= NETIF_F_HW_TC;
+ ndev->features |= ndev->hw_features;
+ eth_hw_addr_random(ndev);
+ err = rvu_rep_devlink_port_register(rep);
+ if (err)
+ goto exit;
+
+ SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
+ err = register_netdev(ndev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "PFVF representor registration failed");
+ free_netdev(ndev);
+ goto exit;
+ }
+
+ INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats);
+ }
+ err = rvu_rep_napi_init(priv, extack);
+ if (err)
+ goto exit;
+
+ rvu_eswitch_config(priv, true);
+ return 0;
+exit:
+ while (--rep_id >= 0) {
+ rep = priv->reps[rep_id];
+ unregister_netdev(rep->netdev);
+ rvu_rep_devlink_port_unregister(rep);
+ free_netdev(rep->netdev);
+ }
+ kfree(priv->reps);
+ rvu_rep_rsrc_free(priv);
+ return err;
+}
+
+static int rvu_get_rep_cnt(struct otx2_nic *priv)
+{
+ struct get_rep_cnt_rsp *rsp;
+ struct mbox_msghdr *msghdr;
+ struct msg_req *req;
+ int err, rep;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ err = otx2_sync_mbox_msg(&priv->mbox);
+ if (err)
+ goto exit;
+
+ msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(msghdr)) {
+ err = PTR_ERR(msghdr);
+ goto exit;
+ }
+
+ rsp = (struct get_rep_cnt_rsp *)msghdr;
+ priv->hw.tx_queues = rsp->rep_cnt;
+ priv->hw.rx_queues = rsp->rep_cnt;
+ priv->rep_cnt = rsp->rep_cnt;
+ for (rep = 0; rep < priv->rep_cnt; rep++)
+ priv->rep_pf_map[rep] = rsp->rep_pf_map[rep];
+
+exit:
+ mutex_unlock(&priv->mbox.lock);
+ return err;
+}
+
+static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct otx2_nic *priv;
+ struct otx2_hw *hw;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, priv);
+ priv->pdev = pdev;
+ priv->dev = dev;
+ priv->flags |= OTX2_FLAG_INTF_DOWN;
+ priv->flags |= OTX2_FLAG_REP_MODE_ENABLED;
+
+ hw = &priv->hw;
+ hw->pdev = pdev;
+ hw->max_queues = OTX2_MAX_CQ_CNT;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ hw->xqe_size = 128;
+
+ err = otx2_init_rsrc(pdev, priv);
+ if (err)
+ goto err_release_regions;
+
+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ err = rvu_get_rep_cnt(priv);
+ if (err)
+ goto err_detach_rsrc;
+
+ err = otx2_register_dl(priv);
+ if (err)
+ goto err_detach_rsrc;
+
+ return 0;
+
+err_detach_rsrc:
+ if (priv->hw.lmt_info)
+ free_percpu(priv->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
+ qmem_free(priv->dev, priv->dync_lmt);
+ otx2_detach_resources(&priv->mbox);
+ otx2_disable_mbox_intr(priv);
+ otx2_pfaf_mbox_destroy(priv);
+ pci_free_irq_vectors(pdev);
+err_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+ return err;
+}
+
+static void rvu_rep_remove(struct pci_dev *pdev)
+{
+ struct otx2_nic *priv = pci_get_drvdata(pdev);
+
+ otx2_unregister_dl(priv);
+ if (!(priv->flags & OTX2_FLAG_INTF_DOWN))
+ rvu_rep_destroy(priv);
+ otx2_detach_resources(&priv->mbox);
+ if (priv->hw.lmt_info)
+ free_percpu(priv->hw.lmt_info);
+ if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
+ qmem_free(priv->dev, priv->dync_lmt);
+ otx2_disable_mbox_intr(priv);
+ otx2_pfaf_mbox_destroy(priv);
+ pci_free_irq_vectors(priv->pdev);
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver rvu_rep_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_rep_id_table,
+ .probe = rvu_rep_probe,
+ .remove = rvu_rep_remove,
+ .shutdown = rvu_rep_remove,
+};
+
+static int __init rvu_rep_init_module(void)
+{
+ return pci_register_driver(&rvu_rep_driver);
+}
+
+static void __exit rvu_rep_cleanup_module(void)
+{
+ pci_unregister_driver(&rvu_rep_driver);
+}
+
+module_init(rvu_rep_init_module);
+module_exit(rvu_rep_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
new file mode 100644
index 000000000000..38446b3e4f13
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU REPRESENTOR driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef REP_H
+#define REP_H
+
+#include <linux/pci.h>
+
+#include "otx2_reg.h"
+#include "otx2_txrx.h"
+#include "otx2_common.h"
+
+#define PCI_DEVID_RVU_REP 0xA0E0
+
+#define RVU_MAX_REP OTX2_MAX_CQ_CNT
+
+struct rep_stats {
+ u64 rx_bytes;
+ u64 rx_frames;
+ u64 rx_drops;
+ u64 rx_mcast_frames;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_drops;
+};
+
+struct rep_dev {
+ struct otx2_nic *mdev;
+ struct net_device *netdev;
+ struct rep_stats stats;
+ struct delayed_work stats_wrk;
+ struct devlink_port dl_port;
+ struct otx2_flow_config *flow_cfg;
+#define RVU_REP_VF_INITIALIZED BIT_ULL(0)
+ u64 flags;
+ u16 rep_id;
+ u16 pcifunc;
+ u8 mac[ETH_ALEN];
+};
+
+static inline bool otx2_rep_dev(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_RVU_REP;
+}
+
+int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack);
+void rvu_rep_destroy(struct otx2_nic *priv);
+int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info);
+#endif /* REP_H */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1a59c952aa01..fe38426ec42d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1579,7 +1579,7 @@ MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
static struct platform_driver pxa168_eth_driver = {
.probe = pxa168_eth_probe,
- .remove_new = pxa168_eth_remove,
+ .remove = pxa168_eth_remove,
.shutdown = pxa168_eth_shutdown,
.resume = pxa168_eth_resume,
.suspend = pxa168_eth_suspend,
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index fcfb34561882..25bf6ec44289 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -484,8 +484,7 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- skge_stats[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, skge_stats[i].name);
break;
}
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index a7a16eac1891..3914cd9210d4 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3800,8 +3800,7 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- sky2_stats[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, sky2_stats[i].name);
break;
}
}
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 2c26eb185283..6c683a12d5aa 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -554,7 +554,7 @@
#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
#define REG_EGRESS_RATE_METER_CFG 0x100c
-#define EGRESS_RATE_METER_EN_MASK BIT(29)
+#define EGRESS_RATE_METER_EN_MASK BIT(31)
#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
@@ -752,11 +752,9 @@ struct airoha_tx_irq_queue {
struct airoha_qdma *qdma;
struct napi_struct napi;
- u32 *q;
int size;
- int queued;
- u16 head;
+ u32 *q;
};
struct airoha_hw_stats {
@@ -1116,17 +1114,23 @@ static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
}
+static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
+{
+ u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
+
+ return FIELD_GET(PSE_ALLRSV_MASK, val);
+}
+
static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
u32 port, u32 queue, u32 val)
{
- u32 orig_val, tmp, all_rsv, fq_limit;
+ u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
+ u32 tmp, all_rsv, fq_limit;
airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
/* modify all rsv */
- orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
- tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
- all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp);
+ all_rsv = airoha_fe_get_pse_all_rsv(eth);
all_rsv += (val - orig_val);
airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
@@ -1166,11 +1170,13 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
[FE_PSE_PORT_GDM4] = 2,
[FE_PSE_PORT_CDM5] = 2,
};
+ u32 all_rsv;
int q;
+ all_rsv = airoha_fe_get_pse_all_rsv(eth);
/* hw misses PPE2 oq rsv */
- airoha_fe_set(eth, REG_FE_PSE_BUF_SET,
- PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]);
+ all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
+ airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
/* CMD1 */
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
@@ -1363,7 +1369,8 @@ static int airoha_fe_init(struct airoha_eth *eth)
airoha_fe_set(eth, REG_GDM_MISC_CFG,
GDM2_RDM_ACK_WAIT_PREF_MASK |
GDM2_CHN_VLD_MODE_MASK);
- airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15);
+ airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
+ FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
/* init fragment and assemble Force Port */
/* NPU Core-3, NPU Bridge Channel-3 */
@@ -1647,30 +1654,38 @@ static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_tx_irq_queue *irq_q;
+ int id, done = 0, irq_queued;
struct airoha_qdma *qdma;
struct airoha_eth *eth;
- int id, done = 0;
+ u32 status, head;
irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
qdma = irq_q->qdma;
id = irq_q - &qdma->q_tx_irq[0];
eth = qdma->eth;
- while (irq_q->queued > 0 && done < budget) {
- u32 qid, last, val = irq_q->q[irq_q->head];
+ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
+ head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
+ head = head % irq_q->size;
+ irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
+
+ while (irq_queued > 0 && done < budget) {
+ u32 qid, val = irq_q->q[head];
+ struct airoha_qdma_desc *desc;
+ struct airoha_queue_entry *e;
struct airoha_queue *q;
+ u32 index, desc_ctrl;
+ struct sk_buff *skb;
if (val == 0xff)
break;
- irq_q->q[irq_q->head] = 0xff; /* mark as done */
- irq_q->head = (irq_q->head + 1) % irq_q->size;
- irq_q->queued--;
+ irq_q->q[head] = 0xff; /* mark as done */
+ head = (head + 1) % irq_q->size;
+ irq_queued--;
done++;
- last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
-
if (qid >= ARRAY_SIZE(qdma->q_tx))
continue;
@@ -1678,44 +1693,53 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
if (!q->ndesc)
continue;
+ index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
+ if (index >= q->ndesc)
+ continue;
+
spin_lock_bh(&q->lock);
- while (q->queued > 0) {
- struct airoha_qdma_desc *desc = &q->desc[q->tail];
- struct airoha_queue_entry *e = &q->entry[q->tail];
- u32 desc_ctrl = le32_to_cpu(desc->ctrl);
- struct sk_buff *skb = e->skb;
- u16 index = q->tail;
+ if (!q->queued)
+ goto unlock;
- if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
- !(desc_ctrl & QDMA_DESC_DROP_MASK))
- break;
+ desc = &q->desc[index];
+ desc_ctrl = le32_to_cpu(desc->ctrl);
- q->tail = (q->tail + 1) % q->ndesc;
- q->queued--;
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
+ !(desc_ctrl & QDMA_DESC_DROP_MASK))
+ goto unlock;
- dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
- DMA_TO_DEVICE);
+ e = &q->entry[index];
+ skb = e->skb;
- WRITE_ONCE(desc->msg0, 0);
- WRITE_ONCE(desc->msg1, 0);
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ memset(e, 0, sizeof(*e));
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ q->queued--;
- if (skb) {
- struct netdev_queue *txq;
+ /* completion ring can report out-of-order indexes if hw QoS
+ * is enabled and packets with different priority are queued
+ * to same DMA ring. Take into account possible out-of-order
+ * reports incrementing DMA ring tail pointer
+ */
+ while (q->tail != q->head && !q->entry[q->tail].dma_addr)
+ q->tail = (q->tail + 1) % q->ndesc;
- txq = netdev_get_tx_queue(skb->dev, qid);
- if (netif_tx_queue_stopped(txq) &&
- q->ndesc - q->queued >= q->free_thr)
- netif_tx_wake_queue(txq);
+ if (skb) {
+ u16 queue = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq;
- dev_kfree_skb_any(skb);
- e->skb = NULL;
- }
+ txq = netdev_get_tx_queue(skb->dev, queue);
+ netdev_tx_completed_queue(txq, 1, skb->len);
+ if (netif_tx_queue_stopped(txq) &&
+ q->ndesc - q->queued >= q->free_thr)
+ netif_tx_wake_queue(txq);
- if (index == last)
- break;
+ dev_kfree_skb_any(skb);
}
-
+unlock:
spin_unlock_bh(&q->lock);
}
@@ -2015,20 +2039,11 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
if (intr[0] & INT_TX_MASK) {
for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
- struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
- u32 status, head;
-
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
-
- status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
- head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
- irq_q->head = head % irq_q->size;
- irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
-
napi_schedule(&qdma->q_tx_irq[i].napi);
}
}
@@ -2331,7 +2346,7 @@ static int airoha_dev_stop(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
struct airoha_qdma *qdma = port->qdma;
- int err;
+ int i, err;
netif_tx_disable(dev);
err = airoha_set_gdm_ports(qdma->eth, false);
@@ -2342,6 +2357,14 @@ static int airoha_dev_stop(struct net_device *dev)
GLOBAL_CFG_TX_DMA_EN_MASK |
GLOBAL_CFG_RX_DMA_EN_MASK);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ netdev_tx_reset_subqueue(dev, i);
+ }
+
return 0;
}
@@ -2479,7 +2502,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
q->queued += i;
skb_tx_timestamp(skb);
- if (!netdev_xmit_more())
+ netdev_tx_sent_queue(txq, skb->len);
+
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
@@ -2780,7 +2805,7 @@ MODULE_DEVICE_TABLE(of, of_airoha_match);
static struct platform_driver airoha_driver = {
.probe = airoha_probe,
- .remove_new = airoha_remove,
+ .remove = airoha_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_airoha_match,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ed7313c10a05..53485142938c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2227,7 +2227,7 @@ rx_done:
eth->rx_bytes += bytes;
dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
&dim_sample);
- net_dim(&eth->rx_dim, dim_sample);
+ net_dim(&eth->rx_dim, &dim_sample);
if (xdp_flush)
xdp_do_flush();
@@ -2377,7 +2377,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
&dim_sample);
- net_dim(&eth->tx_dim, dim_sample);
+ net_dim(&eth->tx_dim, &dim_sample);
if (mtk_queue_stopped(eth) &&
(atomic_read(&ring->free_count) > ring->thresh))
@@ -4329,10 +4329,8 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
case ETH_SS_STATS: {
struct mtk_mac *mac = netdev_priv(dev);
- for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
- memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
+ ethtool_puts(&data, mtk_ethtool_stats[i].str);
if (mtk_page_pool_enabled(mac->hw))
page_pool_ethtool_stats_get_strings(data);
break;
@@ -5358,7 +5356,7 @@ MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
- .remove_new = mtk_remove,
+ .remove = mtk_remove,
.driver = {
.name = "mtk_soc_eth",
.of_match_table = of_mtk_match,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 461cc2c79c71..0e92956e84cf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -156,7 +156,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
- netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
+ netif_napi_add_config(cq->dev, &cq->napi, mlx4_en_poll_rx_cq,
+ cq_idx);
netif_napi_set_irq(&cq->napi, irq);
napi_enable(&cq->napi);
netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 5912f7e614f9..be3d0876c521 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -109,35 +109,48 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
-mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
- steering/dr_matcher.o steering/dr_rule.o \
- steering/dr_icm_pool.o steering/dr_buddy.o \
- steering/dr_ste.o steering/dr_send.o \
- steering/dr_ste_v0.o steering/dr_ste_v1.o \
- steering/dr_ste_v2.o \
- steering/dr_cmd.o steering/dr_fw.o \
- steering/dr_action.o steering/fs_dr.o \
- steering/dr_definer.o steering/dr_ptrn.o \
- steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
+#
+# SW Steering
+#
+mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
+ steering/sws/dr_table.o \
+ steering/sws/dr_matcher.o \
+ steering/sws/dr_rule.o \
+ steering/sws/dr_icm_pool.o \
+ steering/sws/dr_buddy.o \
+ steering/sws/dr_ste.o \
+ steering/sws/dr_send.o \
+ steering/sws/dr_ste_v0.o \
+ steering/sws/dr_ste_v1.o \
+ steering/sws/dr_ste_v2.o \
+ steering/sws/dr_cmd.o \
+ steering/sws/dr_fw.o \
+ steering/sws/dr_action.o \
+ steering/sws/dr_definer.o \
+ steering/sws/dr_ptrn.o \
+ steering/sws/dr_arg.o \
+ steering/sws/dr_dbg.o \
+ steering/sws/fs_dr.o \
+ lib/smfs.o
#
# HW Steering
#
-mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/mlx5hws_cmd.o \
- steering/hws/mlx5hws_context.o \
- steering/hws/mlx5hws_pat_arg.o \
- steering/hws/mlx5hws_buddy.o \
- steering/hws/mlx5hws_pool.o \
- steering/hws/mlx5hws_table.o \
- steering/hws/mlx5hws_action.o \
- steering/hws/mlx5hws_rule.o \
- steering/hws/mlx5hws_matcher.o \
- steering/hws/mlx5hws_send.o \
- steering/hws/mlx5hws_definer.o \
- steering/hws/mlx5hws_bwc.o \
- steering/hws/mlx5hws_debug.o \
- steering/hws/mlx5hws_vport.o \
- steering/hws/mlx5hws_bwc_complex.o
+mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
+ steering/hws/context.o \
+ steering/hws/pat_arg.o \
+ steering/hws/buddy.o \
+ steering/hws/pool.o \
+ steering/hws/table.o \
+ steering/hws/action.o \
+ steering/hws/rule.o \
+ steering/hws/matcher.o \
+ steering/hws/send.o \
+ steering/hws/definer.o \
+ steering/hws/bwc.o \
+ steering/hws/debug.o \
+ steering/hws/vport.o \
+ steering/hws/bwc_complex.o
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 4caa1b6f40ba..1fd403713baf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -71,6 +71,7 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
{
unsigned long flags;
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+ bool schedule_tasklet = false;
spin_lock_irqsave(&tasklet_ctx->lock, flags);
/* When migrating CQs between EQs will be implemented, please note
@@ -80,9 +81,19 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
mlx5_cq_hold(cq);
+ /* If the tasklet CQ work list isn't empty, mlx5_cq_tasklet_cb()
+ * is scheduled/running and hasn't processed the list yet, so it
+ * will see this added CQ when it runs. If the list is empty,
+ * the tasklet needs to be scheduled to pick up the CQ. The
+ * spinlock avoids any race with the tasklet accessing the list.
+ */
+ schedule_tasklet = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
+
+ if (schedule_tasklet)
+ tasklet_schedule(&tasklet_ctx->task);
}
/* Callers must verify outbox status in case of err */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 904e08de852e..31142f6cc372 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -166,9 +166,90 @@ static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
return 0;
}
+enum {
+ MLX5_DPLL_SSM_CODE_PRC = 0b0010,
+ MLX5_DPLL_SSM_CODE_SSU_A = 0b0100,
+ MLX5_DPLL_SSM_CODE_SSU_B = 0b1000,
+ MLX5_DPLL_SSM_CODE_EEC1 = 0b1011,
+ MLX5_DPLL_SSM_CODE_PRTC = 0b0010,
+ MLX5_DPLL_SSM_CODE_EPRTC = 0b0010,
+ MLX5_DPLL_SSM_CODE_EEEC = 0b1011,
+ MLX5_DPLL_SSM_CODE_EPRC = 0b0010,
+};
+
+enum {
+ MLX5_DPLL_ENHANCED_SSM_CODE_PRC = 0xff,
+ MLX5_DPLL_ENHANCED_SSM_CODE_SSU_A = 0xff,
+ MLX5_DPLL_ENHANCED_SSM_CODE_SSU_B = 0xff,
+ MLX5_DPLL_ENHANCED_SSM_CODE_EEC1 = 0xff,
+ MLX5_DPLL_ENHANCED_SSM_CODE_PRTC = 0x20,
+ MLX5_DPLL_ENHANCED_SSM_CODE_EPRTC = 0x21,
+ MLX5_DPLL_ENHANCED_SSM_CODE_EEEC = 0x22,
+ MLX5_DPLL_ENHANCED_SSM_CODE_EPRC = 0x23,
+};
+
+#define __MLX5_DPLL_SSM_COMBINED_CODE(ssm_code, enhanced_ssm_code) \
+ ((ssm_code) | ((enhanced_ssm_code) << 8))
+
+#define MLX5_DPLL_SSM_COMBINED_CODE(type) \
+ __MLX5_DPLL_SSM_COMBINED_CODE(MLX5_DPLL_SSM_CODE_##type, \
+ MLX5_DPLL_ENHANCED_SSM_CODE_##type)
+
+static int mlx5_dpll_clock_quality_level_get(const struct dpll_device *dpll,
+ void *priv, unsigned long *qls,
+ struct netlink_ext_ack *extack)
+{
+ u8 network_option, ssm_code, enhanced_ssm_code;
+ u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ struct mlx5_dpll *mdpll = priv;
+ int err;
+
+ err = mlx5_core_access_reg(mdpll->mdev, in, sizeof(in),
+ out, sizeof(out), MLX5_REG_MSECQ, 0, 0);
+ if (err)
+ return err;
+ network_option = MLX5_GET(msecq_reg, out, network_option);
+ if (network_option != 1)
+ goto errout;
+ ssm_code = MLX5_GET(msecq_reg, out, local_ssm_code);
+ enhanced_ssm_code = MLX5_GET(msecq_reg, out, local_enhanced_ssm_code);
+
+ switch (__MLX5_DPLL_SSM_COMBINED_CODE(ssm_code, enhanced_ssm_code)) {
+ case MLX5_DPLL_SSM_COMBINED_CODE(PRC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRC, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(SSU_A):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_A, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(SSU_B):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_B, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(EEC1):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEC1, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(PRTC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRTC, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(EPRTC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRTC, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(EEEC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEEC, qls);
+ return 0;
+ case MLX5_DPLL_SSM_COMBINED_CODE(EPRC):
+ __set_bit(DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRC, qls);
+ return 0;
+ }
+errout:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware\n");
+ return -EINVAL;
+}
+
static const struct dpll_device_ops mlx5_dpll_device_ops = {
.lock_status_get = mlx5_dpll_device_lock_status_get,
.mode_get = mlx5_dpll_device_mode_get,
+ .clock_quality_level_get = mlx5_dpll_clock_quality_level_get,
};
static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 57b7298a0e79..979fc56205e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -83,6 +83,7 @@ struct page_pool;
#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
@@ -624,16 +625,14 @@ struct mlx5e_dma_info {
struct mlx5e_shampo_hd {
u32 mkey;
- struct mlx5e_dma_info *info;
struct mlx5e_frag_page *pages;
- u16 curr_page_index;
u32 hd_per_wq;
u16 hd_per_wqe;
+ u16 pages_per_wq;
unsigned long *bitmap;
u16 pi;
u16 ci;
__be32 key;
- u64 last_addr;
};
struct mlx5e_hw_gro_data {
@@ -755,7 +754,7 @@ struct mlx5e_channel {
u8 lag_port;
/* XDP_REDIRECT */
- struct mlx5e_xdpsq xdpsq;
+ struct mlx5e_xdpsq *xdpsq;
/* AF_XDP zero-copy */
struct mlx5e_rq xskrq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 1c062a2e8996..45737d039252 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -318,7 +318,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
}
actions[num_actions++] = smfs_rule->count_action;
- actions[num_actions++] = attr->modify_hdr->action.dr_action;
+ actions[num_actions++] = attr->modify_hdr->fs_dr_action.dr_action;
actions[num_actions++] = fs_smfs->fwd_action;
nat = (attr->ft == fs_smfs->ct_nat);
@@ -379,7 +379,7 @@ static int mlx5_ct_fs_smfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_
struct mlx5dr_rule *rule;
actions[0] = smfs_rule->count_action;
- actions[1] = attr->modify_hdr->action.dr_action;
+ actions[1] = attr->modify_hdr->fs_dr_action.dr_action;
actions[2] = fs_smfs->fwd_action;
rule = mlx5_smfs_rule_create(smfs_rule->smfs_matcher->dr_matcher, spec,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 92d5cfec3dc0..a84ebac2f011 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1026,7 +1026,7 @@ mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
return ERR_PTR(-ENOMEM);
counter->is_shared = false;
- counter->counter = mlx5_fc_create_ex(ct_priv->dev, true);
+ counter->counter = mlx5_fc_create(ct_priv->dev, true);
if (IS_ERR(counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(counter->counter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 4610621a340e..94b291662087 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -865,7 +865,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(sq_num >= priv->channels.num))
return -ENXIO;
- sq = &priv->channels.c[sq_num]->xdpsq;
+ sq = priv->channels.c[sq_num]->xdpsq;
for (i = 0; i < n; i++) {
struct mlx5e_xmit_data_frags xdptxdf = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 1966736f98b4..cae39198b4db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -406,6 +406,9 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
unlock:
mutex_unlock(&priv->state_lock);
+ if (!err)
+ netdev_update_features(priv->netdev);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 13a3fa8dc0cb..d0b80b520397 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -350,19 +350,15 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
node);
- shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
- sizeof(*shampo->info)),
- GFP_KERNEL, node);
shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
sizeof(*shampo->pages)),
GFP_KERNEL, node);
- if (!shampo->bitmap || !shampo->info || !shampo->pages)
+ if (!shampo->bitmap || !shampo->pages)
goto err_nomem;
return 0;
err_nomem:
- kvfree(shampo->info);
kvfree(shampo->bitmap);
kvfree(shampo->pages);
@@ -372,7 +368,6 @@ err_nomem:
static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
{
kvfree(rq->mpwqe.shampo->bitmap);
- kvfree(rq->mpwqe.shampo->info);
kvfree(rq->mpwqe.shampo->pages);
}
@@ -767,8 +762,6 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
u32 *pool_size,
int node)
{
- void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
- int wq_size;
int err;
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
@@ -793,9 +786,9 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
cpu_to_be32(rq->mpwqe.shampo->mkey);
rq->mpwqe.shampo->hd_per_wqe =
mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
- wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
- *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
- MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+ rq->mpwqe.shampo->pages_per_wq =
+ rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+ *pool_size += rq->mpwqe.shampo->pages_per_wq;
return 0;
err_hw_gro_data:
@@ -1126,7 +1119,7 @@ static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq)
struct mlx5_cqe64 *cqe;
if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) {
- while ((cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)))
+ while ((cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq)))
mlx5_cqwq_pop(cqwq);
} else {
while ((cqe = mlx5_cqwq_get_cqe(cqwq)))
@@ -2086,6 +2079,44 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq);
}
+static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c,
+ struct mlx5e_params *params,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_create_cq_param *ccp)
+{
+ struct mlx5e_xdpsq *xdpsq;
+ int err;
+
+ xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, c->cpu);
+ if (!xdpsq)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation,
+ &cparam->xdp_sq.cqp, ccp, &xdpsq->cq);
+ if (err)
+ goto err_free_xdpsq;
+
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, xdpsq, true);
+ if (err)
+ goto err_close_xdpsq_cq;
+
+ return xdpsq;
+
+err_close_xdpsq_cq:
+ mlx5e_close_cq(&xdpsq->cq);
+err_free_xdpsq:
+ kvfree(xdpsq);
+
+ return ERR_PTR(err);
+}
+
+static void mlx5e_close_xdpredirect_sq(struct mlx5e_xdpsq *xdpsq)
+{
+ mlx5e_close_xdpsq(xdpsq);
+ mlx5e_close_cq(&xdpsq->cq);
+ kvfree(xdpsq);
+}
+
static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
struct net_device *netdev,
struct workqueue_struct *workqueue,
@@ -2476,6 +2507,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
+ const struct net_device_ops *netdev_ops = c->netdev->netdev_ops;
struct dim_cq_moder icocq_moder = {0, 0};
struct mlx5e_create_cq_param ccp;
int err;
@@ -2496,15 +2528,18 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
- &c->xdpsq.cq);
- if (err)
- goto err_close_tx_cqs;
+ if (netdev_ops->ndo_xdp_xmit) {
+ c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp);
+ if (IS_ERR(c->xdpsq)) {
+ err = PTR_ERR(c->xdpsq);
+ goto err_close_tx_cqs;
+ }
+ }
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err)
- goto err_close_xdp_tx_cqs;
+ goto err_close_xdpredirect_sq;
err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&ccp, &c->rq_xdpsq.cq) : 0;
@@ -2516,7 +2551,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
mlx5e_async_icosq_err_cqe_work);
if (err)
- goto err_close_xdpsq_cq;
+ goto err_close_rq_xdpsq_cq;
mutex_init(&c->icosq_recovery_lock);
@@ -2540,16 +2575,8 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
goto err_close_rq;
}
- err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
- if (err)
- goto err_close_xdp_sq;
-
return 0;
-err_close_xdp_sq:
- if (c->xdp)
- mlx5e_close_xdpsq(&c->rq_xdpsq);
-
err_close_rq:
mlx5e_close_rq(&c->rq);
@@ -2562,15 +2589,16 @@ err_close_icosq:
err_close_async_icosq:
mlx5e_close_icosq(&c->async_icosq);
-err_close_xdpsq_cq:
+err_close_rq_xdpsq_cq:
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
-err_close_xdp_tx_cqs:
- mlx5e_close_cq(&c->xdpsq.cq);
+err_close_xdpredirect_sq:
+ if (c->xdpsq)
+ mlx5e_close_xdpredirect_sq(c->xdpsq);
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
@@ -2586,7 +2614,6 @@ err_close_async_icosq_cq:
static void mlx5e_close_queues(struct mlx5e_channel *c)
{
- mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
/* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
@@ -2599,7 +2626,8 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
- mlx5e_close_cq(&c->xdpsq.cq);
+ if (c->xdpsq)
+ mlx5e_close_xdpredirect_sq(c->xdpsq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
mlx5e_close_cq(&c->async_icosq.cq);
@@ -2697,7 +2725,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
- netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
+ netif_napi_add_config(netdev, &c->napi, mlx5e_napi_poll, ix);
netif_napi_set_irq(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
@@ -4558,6 +4586,10 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
out:
WRITE_ONCE(netdev->mtu, params->sw_mtu);
mutex_unlock(&priv->state_lock);
+
+ if (!err)
+ netdev_update_features(netdev);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 92094bf60d59..554f9cb5b53f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -600,7 +600,8 @@ mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
if (c->xdp)
sqs[num_sqs++] = c->rq_xdpsq.sqn;
- sqs[num_sqs++] = c->xdpsq.sqn;
+ if (c->xdpsq)
+ sqs[num_sqs++] = c->xdpsq->sqn;
}
}
if (ptp_sq) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 8e24ba96c779..1963bc5adb18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -643,83 +643,82 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
+static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
+{
+ BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
+
+ return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE];
+}
+
+static u64 mlx5e_shampo_hd_offset(int header_index)
+{
+ return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
+ MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+}
+
+static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
+
static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
u16 ksm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
+ u16 pi, header_offset, err, wqe_bbs;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
- u16 page_index = shampo->curr_page_index;
- struct mlx5e_frag_page *frag_page;
- u64 addr = shampo->last_addr;
- struct mlx5e_dma_info *dma_info;
struct mlx5e_umr_wqe *umr_wqe;
- int headroom, i;
+ int headroom, i = 0;
headroom = rq->buff.headroom;
- new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
- entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
- wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries);
+ wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_ksm_umr(sq, umr_wqe, shampo->key, index, entries);
+ build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries);
- frag_page = &shampo->pages[page_index];
+ WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
+ while (i < ksm_entries) {
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+ u64 addr;
- for (i = 0; i < entries; i++, index++) {
- dma_info = &shampo->info[index];
- if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index <
- MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT))
- goto update_ksm;
- header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
- MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
- if (!(header_offset & (PAGE_SIZE - 1))) {
- page_index = (page_index + 1) & (shampo->hd_per_wq - 1);
- frag_page = &shampo->pages[page_index];
+ err = mlx5e_page_alloc_fragmented(rq, frag_page);
+ if (unlikely(err))
+ goto err_unmap;
- err = mlx5e_page_alloc_fragmented(rq, frag_page);
- if (unlikely(err))
- goto err_unmap;
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr(frag_page->page);
- dma_info->addr = addr;
- dma_info->frag_page = frag_page;
- } else {
- dma_info->addr = addr + header_offset;
- dma_info->frag_page = frag_page;
- }
+ for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
+ header_offset = mlx5e_shampo_hd_offset(index++);
-update_ksm:
- umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
- .key = cpu_to_be32(lkey),
- .va = cpu_to_be64(dma_info->addr + headroom),
- };
+ umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(lkey),
+ .va = cpu_to_be64(addr + header_offset + headroom),
+ };
+ }
}
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
.num_wqebbs = wqe_bbs,
- .shampo.len = new_entries,
+ .shampo.len = ksm_entries,
};
- shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1);
- shampo->curr_page_index = page_index;
- shampo->last_addr = addr;
+ shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
sq->pc += wqe_bbs;
sq->doorbell_cseg = &umr_wqe->ctrl;
return 0;
err_unmap:
- while (--i >= 0) {
- dma_info = &shampo->info[--index];
- if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
- dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
+ while (--i) {
+ --index;
+ header_offset = mlx5e_shampo_hd_offset(index);
+ if (!header_offset) {
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+
+ mlx5e_page_release_fragmented(rq, frag_page);
}
}
+
rq->stats->buff_alloc_err++;
return err;
}
@@ -731,7 +730,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_ksm_entries, len;
- max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
+ max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev),
+ MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
@@ -739,8 +739,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
if (!ksm_entries)
return 0;
- ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
- index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
+ /* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */
+ index = shampo->pi;
entries_before = shampo->hd_per_wq - index;
if (unlikely(entries_before < ksm_entries))
@@ -851,13 +851,11 @@ static void
mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u64 addr = shampo->info[header_index].addr;
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
- struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
+ mlx5e_page_release_fragmented(rq, frag_page);
}
clear_bit(header_index, shampo->bitmap);
}
@@ -1211,10 +1209,10 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
{
- struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
- u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
+ u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom;
- return page_address(last_head->frag_page->page) + head_offset;
+ return page_address(frag_page->page) + head_offset;
}
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
@@ -2185,29 +2183,30 @@ static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
- struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
- u16 head_offset = head->addr & (PAGE_SIZE - 1);
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
+ dma_addr_t page_dma_addr = page_pool_get_dma_addr(frag_page->page);
+ u16 head_offset = mlx5e_shampo_hd_offset(header_index);
+ dma_addr_t dma_addr = page_dma_addr + head_offset;
u16 head_size = cqe->shampo.header_size;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb = NULL;
void *hdr, *data;
u32 frag_size;
- hdr = page_address(head->frag_page->page) + head_offset;
+ hdr = page_address(frag_page->page) + head_offset;
data = hdr + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
/* build SKB around header */
- dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
+ dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
net_prefetchw(hdr);
net_prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
-
if (unlikely(!skb))
return NULL;
- head->frag_page->frags++;
+ frag_page->frags++;
} else {
/* allocate SKB and copy header for large header */
rq->stats->gro_large_hds++;
@@ -2219,7 +2218,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
net_prefetchw(skb->data);
- mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
+ mlx5e_copy_skb_header(rq, skb, frag_page->page, dma_addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
@@ -2436,7 +2435,7 @@ static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
struct mlx5e_cq_decomp *cqd = &rq->cqd;
int work_done = 0;
- cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq);
+ cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq);
if (!cqe)
return work_done;
@@ -2466,7 +2465,7 @@ static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
rq, cqe);
work_done++;
} while (work_done < budget_rem &&
- (cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)));
+ (cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq)));
/* last cqe might be title on next poll bulk */
if (title_cqe) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 5873fde65c2e..76108299ea57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(sq->dim, dim_sample);
+ net_dim(sq->dim, &dim_sample);
}
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(rq->dim, dim_sample);
+ net_dim(rq->dim, &dim_sample);
}
void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
@@ -165,7 +165,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (unlikely(!budget))
goto out;
- busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+ if (c->xdpsq)
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq->cq);
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
@@ -236,7 +237,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->async_icosq.cq);
- mlx5e_cq_arm(&c->xdpsq.cq);
+ if (c->xdpsq)
+ mlx5e_cq_arm(&c->xdpsq->cq);
if (xsk_open) {
mlx5e_handle_rx_dim(xskrq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 68cb86b37e56..2b229b6226c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -114,14 +114,10 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
struct mlx5_eq *eq = &eq_comp->core;
struct mlx5_eqe *eqe;
int num_eqes = 0;
- u32 cqn = -1;
- eqe = next_eqe_sw(eq);
- if (!eqe)
- goto out;
-
- do {
+ while ((eqe = next_eqe_sw(eq))) {
struct mlx5_core_cq *cq;
+ u32 cqn;
/* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -142,14 +138,12 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
++eq->cons_index;
- } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+ if (++num_eqes >= MLX5_EQ_POLLING_BUDGET)
+ break;
+ }
-out:
eq_update_ci(eq, 1);
- if (cqn != -1)
- tasklet_schedule(&eq_comp->tasklet_ctx.task);
-
return 0;
}
@@ -215,11 +209,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
recovery = action == ASYNC_EQ_RECOVER;
mlx5_eq_async_int_lock(eq_async, recovery, &flags);
- eqe = next_eqe_sw(eq);
- if (!eqe)
- goto out;
-
- do {
+ while ((eqe = next_eqe_sw(eq))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -231,9 +221,10 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
++eq->cons_index;
- } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
+ if (++num_eqes >= MLX5_EQ_POLLING_BUDGET)
+ break;
+ }
-out:
eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
@@ -810,15 +801,8 @@ EXPORT_SYMBOL(mlx5_eq_get_eqe);
void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
{
- __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
- u32 val;
-
eq->cons_index += cc;
- val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
-
- __raw_writel((__force u32)cpu_to_be32(val), addr);
- /* We still want ordering, just not swabbing, so add a barrier */
- wmb();
+ eq_update_ci(eq, arm);
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index f8869c9b6802..982fe3714683 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -187,7 +187,7 @@ rate_err:
return err;
}
-void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport)
{
struct mlx5_devlink_port *dl_port;
@@ -195,7 +195,7 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct
return;
dl_port = vport->dl_port;
- mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
+ mlx5_esw_qos_vport_update_parent(vport, NULL, NULL);
devl_rate_leaf_destroy(&dl_port->dl_port);
devl_port_unregister(&dl_port->dl_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
index 1ce332f21ebe..43550a416a6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
@@ -9,107 +9,111 @@
#include <linux/tracepoint.h>
#include "eswitch.h"
+#include "qos.h"
TRACE_EVENT(mlx5_esw_vport_qos_destroy,
- TP_PROTO(const struct mlx5_vport *vport),
- TP_ARGS(vport),
- TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device))
+ TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport),
+ TP_ARGS(dev, vport),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
__field(unsigned short, vport_id)
- __field(unsigned int, tsar_ix)
+ __field(unsigned int, sched_elem_ix)
),
TP_fast_assign(__assign_str(devname);
__entry->vport_id = vport->vport;
- __entry->tsar_ix = vport->qos.esw_tsar_ix;
+ __entry->sched_elem_ix = mlx5_esw_qos_vport_get_sched_elem_ix(vport);
),
- TP_printk("(%s) vport=%hu tsar_ix=%u\n",
- __get_str(devname), __entry->vport_id, __entry->tsar_ix
+ TP_printk("(%s) vport=%hu sched_elem_ix=%u\n",
+ __get_str(devname), __entry->vport_id, __entry->sched_elem_ix
)
);
DECLARE_EVENT_CLASS(mlx5_esw_vport_qos_template,
- TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate),
- TP_ARGS(vport, bw_share, max_rate),
- TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device))
+ TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport,
+ u32 bw_share, u32 max_rate),
+ TP_ARGS(dev, vport, bw_share, max_rate),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
__field(unsigned short, vport_id)
- __field(unsigned int, tsar_ix)
+ __field(unsigned int, sched_elem_ix)
__field(unsigned int, bw_share)
__field(unsigned int, max_rate)
- __field(void *, group)
+ __field(void *, parent)
),
TP_fast_assign(__assign_str(devname);
__entry->vport_id = vport->vport;
- __entry->tsar_ix = vport->qos.esw_tsar_ix;
+ __entry->sched_elem_ix = mlx5_esw_qos_vport_get_sched_elem_ix(vport);
__entry->bw_share = bw_share;
__entry->max_rate = max_rate;
- __entry->group = vport->qos.group;
+ __entry->parent = mlx5_esw_qos_vport_get_parent(vport);
),
- TP_printk("(%s) vport=%hu tsar_ix=%u bw_share=%u, max_rate=%u group=%p\n",
- __get_str(devname), __entry->vport_id, __entry->tsar_ix,
- __entry->bw_share, __entry->max_rate, __entry->group
+ TP_printk("(%s) vport=%hu sched_elem_ix=%u bw_share=%u, max_rate=%u parent=%p\n",
+ __get_str(devname), __entry->vport_id, __entry->sched_elem_ix,
+ __entry->bw_share, __entry->max_rate, __entry->parent
)
);
DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_create,
- TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate),
- TP_ARGS(vport, bw_share, max_rate)
+ TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport,
+ u32 bw_share, u32 max_rate),
+ TP_ARGS(dev, vport, bw_share, max_rate)
);
DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_config,
- TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate),
- TP_ARGS(vport, bw_share, max_rate)
+ TP_PROTO(const struct mlx5_core_dev *dev, const struct mlx5_vport *vport,
+ u32 bw_share, u32 max_rate),
+ TP_ARGS(dev, vport, bw_share, max_rate)
);
-DECLARE_EVENT_CLASS(mlx5_esw_group_qos_template,
+DECLARE_EVENT_CLASS(mlx5_esw_node_qos_template,
TP_PROTO(const struct mlx5_core_dev *dev,
- const struct mlx5_esw_rate_group *group,
+ const struct mlx5_esw_sched_node *node,
unsigned int tsar_ix),
- TP_ARGS(dev, group, tsar_ix),
+ TP_ARGS(dev, node, tsar_ix),
TP_STRUCT__entry(__string(devname, dev_name(dev->device))
- __field(const void *, group)
+ __field(const void *, node)
__field(unsigned int, tsar_ix)
),
TP_fast_assign(__assign_str(devname);
- __entry->group = group;
+ __entry->node = node;
__entry->tsar_ix = tsar_ix;
),
- TP_printk("(%s) group=%p tsar_ix=%u\n",
- __get_str(devname), __entry->group, __entry->tsar_ix
+ TP_printk("(%s) node=%p tsar_ix=%u\n",
+ __get_str(devname), __entry->node, __entry->tsar_ix
)
);
-DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_create,
+DEFINE_EVENT(mlx5_esw_node_qos_template, mlx5_esw_node_qos_create,
TP_PROTO(const struct mlx5_core_dev *dev,
- const struct mlx5_esw_rate_group *group,
+ const struct mlx5_esw_sched_node *node,
unsigned int tsar_ix),
- TP_ARGS(dev, group, tsar_ix)
+ TP_ARGS(dev, node, tsar_ix)
);
-DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_destroy,
+DEFINE_EVENT(mlx5_esw_node_qos_template, mlx5_esw_node_qos_destroy,
TP_PROTO(const struct mlx5_core_dev *dev,
- const struct mlx5_esw_rate_group *group,
+ const struct mlx5_esw_sched_node *node,
unsigned int tsar_ix),
- TP_ARGS(dev, group, tsar_ix)
+ TP_ARGS(dev, node, tsar_ix)
);
-TRACE_EVENT(mlx5_esw_group_qos_config,
+TRACE_EVENT(mlx5_esw_node_qos_config,
TP_PROTO(const struct mlx5_core_dev *dev,
- const struct mlx5_esw_rate_group *group,
+ const struct mlx5_esw_sched_node *node,
unsigned int tsar_ix, u32 bw_share, u32 max_rate),
- TP_ARGS(dev, group, tsar_ix, bw_share, max_rate),
+ TP_ARGS(dev, node, tsar_ix, bw_share, max_rate),
TP_STRUCT__entry(__string(devname, dev_name(dev->device))
- __field(const void *, group)
+ __field(const void *, node)
__field(unsigned int, tsar_ix)
__field(unsigned int, bw_share)
__field(unsigned int, max_rate)
),
TP_fast_assign(__assign_str(devname);
- __entry->group = group;
+ __entry->node = node;
__entry->tsar_ix = tsar_ix;
__entry->bw_share = bw_share;
__entry->max_rate = max_rate;
),
- TP_printk("(%s) group=%p tsar_ix=%u bw_share=%u max_rate=%u\n",
- __get_str(devname), __entry->group, __entry->tsar_ix,
+ TP_printk("(%s) node=%p tsar_ix=%u bw_share=%u max_rate=%u\n",
+ __get_str(devname), __entry->node, __entry->tsar_ix,
__entry->bw_share, __entry->max_rate
)
);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 8587cd572da5..45183de424f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -176,20 +176,10 @@ static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
static int esw_create_legacy_table(struct mlx5_eswitch *esw)
{
- int err;
-
memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
atomic64_set(&esw->user_count, 0);
- err = esw_create_legacy_vepa_table(esw);
- if (err)
- return err;
-
- err = esw_create_legacy_fdb_table(esw);
- if (err)
- esw_destroy_legacy_vepa_table(esw);
-
- return err;
+ return esw_create_legacy_fdb_table(esw);
}
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
@@ -259,15 +249,22 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
if (!setting) {
esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_vepa_table(esw);
return 0;
}
if (esw->fdb_table.legacy.vepa_uplink_rule)
return 0;
+ err = esw_create_legacy_vepa_table(esw);
+ if (err)
+ return err;
+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
+ if (!spec) {
+ err = -ENOMEM;
+ goto out;
+ }
/* Uplink rule forward uplink traffic to FDB */
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
@@ -303,8 +300,10 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
out:
kvfree(spec);
- if (err)
+ if (err) {
esw_cleanup_vepa_rules(esw);
+ esw_destroy_legacy_vepa_table(esw);
+ }
return err;
}
@@ -513,15 +512,11 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
u32 max_rate, u32 min_rate)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
- int err;
if (!mlx5_esw_allowed(esw))
return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
- mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
- mutex_unlock(&esw->state_lock);
- return err;
+ return mlx5_esw_qos_set_vport_rate(evport, max_rate, min_rate);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 02a3563f51ad..8b7c843446e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -11,497 +11,427 @@
/* Minimum supported BW share value by the HW is 1 Mbit/sec */
#define MLX5_MIN_BW_SHARE 1
-#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
- min_t(u32, max_t(u32, DIV_ROUND_UP(rate, divider), MLX5_MIN_BW_SHARE), limit)
-
-struct mlx5_esw_rate_group {
- u32 tsar_ix;
- u32 max_rate;
- u32 min_rate;
- u32 bw_share;
- struct list_head list;
+/* Holds rate nodes associated with an E-Switch. */
+struct mlx5_qos_domain {
+ /* Serializes access to all qos changes in the qos domain. */
+ struct mutex lock;
+ /* List of all mlx5_esw_sched_nodes. */
+ struct list_head nodes;
};
-static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
- u32 tsar_ix, u32 max_rate, u32 bw_share)
+static void esw_qos_lock(struct mlx5_eswitch *esw)
{
- u32 bitmask = 0;
-
- if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return -EOPNOTSUPP;
+ mutex_lock(&esw->qos.domain->lock);
+}
- MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
- MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
- bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
- bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
+static void esw_qos_unlock(struct mlx5_eswitch *esw)
+{
+ mutex_unlock(&esw->qos.domain->lock);
+}
- return mlx5_modify_scheduling_element_cmd(dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- sched_ctx,
- tsar_ix,
- bitmask);
+static void esw_assert_qos_lock_held(struct mlx5_eswitch *esw)
+{
+ lockdep_assert_held(&esw->qos.domain->lock);
}
-static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group,
- u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
+static struct mlx5_qos_domain *esw_qos_domain_alloc(void)
{
- u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_core_dev *dev = esw->dev;
- int err;
+ struct mlx5_qos_domain *qos_domain;
- err = esw_qos_tsar_config(dev, sched_ctx,
- group->tsar_ix,
- max_rate, bw_share);
- if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
+ qos_domain = kzalloc(sizeof(*qos_domain), GFP_KERNEL);
+ if (!qos_domain)
+ return NULL;
- trace_mlx5_esw_group_qos_config(dev, group, group->tsar_ix, bw_share, max_rate);
+ mutex_init(&qos_domain->lock);
+ INIT_LIST_HEAD(&qos_domain->nodes);
- return err;
+ return qos_domain;
}
-static int esw_qos_vport_config(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share,
- struct netlink_ext_ack *extack)
+static int esw_qos_domain_init(struct mlx5_eswitch *esw)
{
- u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_core_dev *dev = esw->dev;
- int err;
-
- if (!vport->qos.enabled)
- return -EIO;
-
- err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
- max_rate, bw_share);
- if (err) {
- esw_warn(esw->dev,
- "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
- vport->vport, err);
- NL_SET_ERR_MSG_MOD(extack, "E-Switch modify TSAR vport element failed");
- return err;
- }
+ esw->qos.domain = esw_qos_domain_alloc();
- trace_mlx5_esw_vport_qos_config(vport, bw_share, max_rate);
-
- return 0;
+ return esw->qos.domain ? 0 : -ENOMEM;
}
-static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- bool group_level)
+static void esw_qos_domain_release(struct mlx5_eswitch *esw)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- struct mlx5_vport *evport;
- u32 max_guarantee = 0;
- unsigned long i;
+ kfree(esw->qos.domain);
+ esw->qos.domain = NULL;
+}
- if (group_level) {
- struct mlx5_esw_rate_group *group;
+enum sched_node_type {
+ SCHED_NODE_TYPE_VPORTS_TSAR,
+ SCHED_NODE_TYPE_VPORT,
+};
- list_for_each_entry(group, &esw->qos.groups, list) {
- if (group->min_rate < max_guarantee)
- continue;
- max_guarantee = group->min_rate;
- }
- } else {
- mlx5_esw_for_each_vport(esw, i, evport) {
- if (!evport->enabled || !evport->qos.enabled ||
- evport->qos.group != group || evport->qos.min_rate < max_guarantee)
- continue;
- max_guarantee = evport->qos.min_rate;
- }
- }
+static const char * const sched_node_type_str[] = {
+ [SCHED_NODE_TYPE_VPORTS_TSAR] = "vports TSAR",
+ [SCHED_NODE_TYPE_VPORT] = "vport",
+};
- if (max_guarantee)
- return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+struct mlx5_esw_sched_node {
+ u32 ix;
+ /* Bandwidth parameters. */
+ u32 max_rate;
+ u32 min_rate;
+ /* A computed value indicating relative min_rate between node's children. */
+ u32 bw_share;
+ /* The parent node in the rate hierarchy. */
+ struct mlx5_esw_sched_node *parent;
+ /* Entry in the parent node's children list. */
+ struct list_head entry;
+ /* The type of this node in the rate hierarchy. */
+ enum sched_node_type type;
+ /* The eswitch this node belongs to. */
+ struct mlx5_eswitch *esw;
+ /* The children nodes of this node, empty list for leaf nodes. */
+ struct list_head children;
+ /* Valid only if this node is associated with a vport. */
+ struct mlx5_vport *vport;
+};
- /* If vports min rate divider is 0 but their group has bw_share configured, then
- * need to set bw_share for vports to minimal value.
- */
- if (!group_level && !max_guarantee && group && group->bw_share)
- return 1;
- return 0;
+static void
+esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent)
+{
+ list_del_init(&node->entry);
+ node->parent = parent;
+ list_add_tail(&node->entry, &parent->children);
+ node->esw = parent->esw;
}
-static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max)
+void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport)
{
- if (divider)
- return MLX5_RATE_TO_BW_SHARE(min_rate, divider, fw_max);
-
- return 0;
+ kfree(vport->qos.sched_node);
+ memset(&vport->qos, 0, sizeof(vport->qos));
}
-static int esw_qos_normalize_vports_min_rate(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- u32 divider = esw_qos_calculate_min_rate_divider(esw, group, false);
- struct mlx5_vport *evport;
- unsigned long i;
- u32 bw_share;
- int err;
+ if (!vport->qos.sched_node)
+ return 0;
- mlx5_esw_for_each_vport(esw, i, evport) {
- if (!evport->enabled || !evport->qos.enabled || evport->qos.group != group)
- continue;
- bw_share = esw_qos_calc_bw_share(evport->qos.min_rate, divider, fw_max_bw_share);
+ return vport->qos.sched_node->ix;
+}
- if (bw_share == evport->qos.bw_share)
- continue;
+struct mlx5_esw_sched_node *
+mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport)
+{
+ if (!vport->qos.sched_node)
+ return NULL;
- err = esw_qos_vport_config(esw, evport, evport->qos.max_rate, bw_share, extack);
- if (err)
- return err;
+ return vport->qos.sched_node->parent;
+}
- evport->qos.bw_share = bw_share;
+static void esw_qos_sched_elem_warn(struct mlx5_esw_sched_node *node, int err, const char *op)
+{
+ if (node->vport) {
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (vport=%d,err=%d)\n",
+ op, sched_node_type_str[node->type], node->vport->vport, err);
+ return;
}
- return 0;
+ esw_warn(node->esw->dev,
+ "E-Switch %s %s scheduling element failed (err=%d)\n",
+ op, sched_node_type_str[node->type], err);
}
-static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divider,
+static int esw_qos_node_create_sched_element(struct mlx5_esw_sched_node *node, void *ctx,
struct netlink_ext_ack *extack)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- struct mlx5_esw_rate_group *group;
- u32 bw_share;
int err;
- list_for_each_entry(group, &esw->qos.groups, list) {
- bw_share = esw_qos_calc_bw_share(group->min_rate, divider, fw_max_bw_share);
-
- if (bw_share == group->bw_share)
- continue;
-
- err = esw_qos_group_config(esw, group, group->max_rate, bw_share, extack);
- if (err)
- return err;
-
- group->bw_share = bw_share;
-
- /* All the group's vports need to be set with default bw_share
- * to enable them with QOS
- */
- err = esw_qos_normalize_vports_min_rate(esw, group, extack);
-
- if (err)
- return err;
+ err = mlx5_create_scheduling_element_cmd(node->esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, ctx,
+ &node->ix);
+ if (err) {
+ esw_qos_sched_elem_warn(node, err, "create");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch create scheduling element failed");
}
- return 0;
+ return err;
}
-static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
- u32 min_rate, struct netlink_ext_ack *extack)
+static int esw_qos_node_destroy_sched_element(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
{
- u32 fw_max_bw_share, previous_min_rate;
- bool min_rate_supported;
int err;
- lockdep_assert_held(&esw->state_lock);
- fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
- fw_max_bw_share >= MLX5_MIN_BW_SHARE;
- if (min_rate && !min_rate_supported)
- return -EOPNOTSUPP;
- if (min_rate == evport->qos.min_rate)
- return 0;
-
- previous_min_rate = evport->qos.min_rate;
- evport->qos.min_rate = min_rate;
- err = esw_qos_normalize_vports_min_rate(esw, evport->qos.group, extack);
- if (err)
- evport->qos.min_rate = previous_min_rate;
+ err = mlx5_destroy_scheduling_element_cmd(node->esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ if (err) {
+ esw_qos_sched_elem_warn(node, err, "destroy");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch destroying scheduling element failed.");
+ }
return err;
}
-static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
- u32 max_rate, struct netlink_ext_ack *extack)
+static int esw_qos_sched_elem_config(struct mlx5_esw_sched_node *node, u32 max_rate, u32 bw_share,
+ struct netlink_ext_ack *extack)
{
- u32 act_max_rate = max_rate;
- bool max_rate_supported;
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_core_dev *dev = node->esw->dev;
+ u32 bitmask = 0;
int err;
- lockdep_assert_held(&esw->state_lock);
- max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ return -EOPNOTSUPP;
- if (max_rate && !max_rate_supported)
+ if (bw_share && (!MLX5_CAP_QOS(dev, esw_bw_share) ||
+ MLX5_CAP_QOS(dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE))
return -EOPNOTSUPP;
- if (max_rate == evport->qos.max_rate)
+
+ if (node->max_rate == max_rate && node->bw_share == bw_share)
return 0;
- /* If parent group has rate limit need to set to group
- * value when new max rate is 0.
- */
- if (evport->qos.group && !max_rate)
- act_max_rate = evport->qos.group->max_rate;
+ if (node->max_rate != max_rate) {
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ }
+ if (node->bw_share != bw_share) {
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
+ }
+
+ err = mlx5_modify_scheduling_element_cmd(dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ sched_ctx,
+ node->ix,
+ bitmask);
+ if (err) {
+ esw_qos_sched_elem_warn(node, err, "modify");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch modify scheduling element failed");
- err = esw_qos_vport_config(esw, evport, act_max_rate, evport->qos.bw_share, extack);
+ return err;
+ }
- if (!err)
- evport->qos.max_rate = max_rate;
+ node->max_rate = max_rate;
+ node->bw_share = bw_share;
+ if (node->type == SCHED_NODE_TYPE_VPORTS_TSAR)
+ trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate);
+ else if (node->type == SCHED_NODE_TYPE_VPORT)
+ trace_mlx5_esw_vport_qos_config(dev, node->vport, bw_share, max_rate);
- return err;
+ return 0;
}
-static int esw_qos_set_group_min_rate(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group,
- u32 min_rate, struct netlink_ext_ack *extack)
+static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
+ struct mlx5_esw_sched_node *parent)
{
+ struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes;
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- struct mlx5_core_dev *dev = esw->dev;
- u32 previous_min_rate, divider;
- int err;
+ struct mlx5_esw_sched_node *node;
+ u32 max_guarantee = 0;
- if (!(MLX5_CAP_QOS(dev, esw_bw_share) && fw_max_bw_share >= MLX5_MIN_BW_SHARE))
- return -EOPNOTSUPP;
+ /* Find max min_rate across all nodes.
+ * This will correspond to fw_max_bw_share in the final bw_share calculation.
+ */
+ list_for_each_entry(node, nodes, entry) {
+ if (node->esw == esw && node->ix != esw->qos.root_tsar_ix &&
+ node->min_rate > max_guarantee)
+ max_guarantee = node->min_rate;
+ }
- if (min_rate == group->min_rate)
- return 0;
+ if (max_guarantee)
+ return max_t(u32, max_guarantee / fw_max_bw_share, 1);
- previous_min_rate = group->min_rate;
- group->min_rate = min_rate;
- divider = esw_qos_calculate_min_rate_divider(esw, group, true);
- err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
- if (err) {
- group->min_rate = previous_min_rate;
- NL_SET_ERR_MSG_MOD(extack, "E-Switch group min rate setting failed");
+ /* If nodes max min_rate divider is 0 but their parent has bw_share
+ * configured, then set bw_share for nodes to minimal value.
+ */
- /* Attempt restoring previous configuration */
- divider = esw_qos_calculate_min_rate_divider(esw, group, true);
- if (esw_qos_normalize_groups_min_rate(esw, divider, extack))
- NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed");
- }
+ if (parent && parent->bw_share)
+ return 1;
- return err;
+ /* If the node nodes has min_rate configured, a divider of 0 sets all
+ * nodes' bw_share to 0, effectively disabling min guarantees.
+ */
+ return 0;
}
-static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- u32 max_rate, struct netlink_ext_ack *extack)
+static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max)
{
- struct mlx5_vport *vport;
- unsigned long i;
- int err;
-
- if (group->max_rate == max_rate)
+ if (!divider)
return 0;
+ return min_t(u32, max_t(u32, DIV_ROUND_UP(min_rate, divider), MLX5_MIN_BW_SHARE), fw_max);
+}
- err = esw_qos_group_config(esw, group, max_rate, group->bw_share, extack);
- if (err)
- return err;
+static void esw_qos_update_sched_node_bw_share(struct mlx5_esw_sched_node *node,
+ u32 divider,
+ struct netlink_ext_ack *extack)
+{
+ u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share);
+ u32 bw_share;
- group->max_rate = max_rate;
+ bw_share = esw_qos_calc_bw_share(node->min_rate, divider, fw_max_bw_share);
- /* Any unlimited vports in the group should be set
- * with the value of the group.
- */
- mlx5_esw_for_each_vport(esw, i, vport) {
- if (!vport->enabled || !vport->qos.enabled ||
- vport->qos.group != group || vport->qos.max_rate)
+ esw_qos_sched_elem_config(node, node->max_rate, bw_share, extack);
+}
+
+static void esw_qos_normalize_min_rate(struct mlx5_eswitch *esw,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct list_head *nodes = parent ? &parent->children : &esw->qos.domain->nodes;
+ u32 divider = esw_qos_calculate_min_rate_divider(esw, parent);
+ struct mlx5_esw_sched_node *node;
+
+ list_for_each_entry(node, nodes, entry) {
+ if (node->esw != esw || node->ix == esw->qos.root_tsar_ix)
continue;
- err = esw_qos_vport_config(esw, vport, max_rate, vport->qos.bw_share, extack);
- if (err)
- NL_SET_ERR_MSG_MOD(extack,
- "E-Switch vport implicit rate limit setting failed");
- }
+ esw_qos_update_sched_node_bw_share(node, divider, extack);
- return err;
-}
+ if (list_empty(&node->children))
+ continue;
-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
-{
- switch (type) {
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_TSAR;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT_TC;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ esw_qos_normalize_min_rate(node->esw, node, extack);
}
- return false;
}
-static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share)
+static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
+ u32 min_rate, struct netlink_ext_ack *extack)
{
- u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_esw_rate_group *group = vport->qos.group;
- struct mlx5_core_dev *dev = esw->dev;
- u32 parent_tsar_ix;
- void *vport_elem;
- int err;
+ struct mlx5_eswitch *esw = node->esw;
- if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT))
- return -EOPNOTSUPP;
+ if (min_rate == node->min_rate)
+ return 0;
- parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
- MLX5_SET(scheduling_context, sched_ctx, element_type,
- SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
- vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
- MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
- MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_tsar_ix);
- MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
- MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
-
- err = mlx5_create_scheduling_element_cmd(dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- sched_ctx,
- &vport->qos.esw_tsar_ix);
- if (err) {
- esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
- vport->vport, err);
- return err;
- }
+ node->min_rate = min_rate;
+ esw_qos_normalize_min_rate(esw, node->parent, extack);
return 0;
}
-static int esw_qos_update_group_scheduling_element(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *curr_group,
- struct mlx5_esw_rate_group *new_group,
- struct netlink_ext_ack *extack)
+static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
+ u32 *tsar_ix)
{
- u32 max_rate;
- int err;
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ void *attr;
+
+ if (!mlx5_qos_element_type_supported(dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR,
+ SCHEDULING_HIERARCHY_E_SWITCH) ||
+ !mlx5_qos_tsar_type_supported(dev,
+ TSAR_ELEMENT_TSAR_TYPE_DWRR,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- vport->qos.esw_tsar_ix);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR vport element failed");
- return err;
- }
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
+ parent_element_id);
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
- vport->qos.group = new_group;
- max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_group->max_rate;
+ return mlx5_create_scheduling_element_cmd(dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ tsar_ctx,
+ tsar_ix);
+}
- /* If vport is unlimited, we set the group's value.
- * Therefore, if the group is limited it will apply to
- * the vport as well and if not, vport will remain unlimited.
- */
- err = esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch vport group set failed.");
- goto err_sched;
- }
+static int esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
+ struct netlink_ext_ack *extack)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_core_dev *dev = vport_node->esw->dev;
+ void *attr;
- return 0;
+ if (!mlx5_qos_element_type_supported(dev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT,
+ SCHEDULING_HIERARCHY_E_SWITCH))
+ return -EOPNOTSUPP;
-err_sched:
- vport->qos.group = curr_group;
- max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_group->max_rate;
- if (esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share))
- esw_warn(esw->dev, "E-Switch vport group restore failed (vport=%d)\n",
- vport->vport);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+ attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
+ MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, vport_node->parent->ix);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, vport_node->max_rate);
- return err;
+ return esw_qos_node_create_sched_element(vport_node, sched_ctx, extack);
}
-static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static struct mlx5_esw_sched_node *
+__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent)
{
- struct mlx5_esw_rate_group *new_group, *curr_group;
- int err;
+ struct list_head *parent_children;
+ struct mlx5_esw_sched_node *node;
- if (!vport->enabled)
- return -EINVAL;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
- curr_group = vport->qos.group;
- new_group = group ?: esw->qos.group0;
- if (curr_group == new_group)
- return 0;
+ node->esw = esw;
+ node->ix = tsar_ix;
+ node->type = type;
+ node->parent = parent;
+ INIT_LIST_HEAD(&node->children);
+ parent_children = parent ? &parent->children : &esw->qos.domain->nodes;
+ list_add_tail(&node->entry, parent_children);
- err = esw_qos_update_group_scheduling_element(esw, vport, curr_group, new_group, extack);
- if (err)
- return err;
+ return node;
+}
- /* Recalculate bw share weights of old and new groups */
- if (vport->qos.bw_share || new_group->bw_share) {
- esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
- esw_qos_normalize_vports_min_rate(esw, new_group, extack);
- }
+static void __esw_qos_free_node(struct mlx5_esw_sched_node *node)
+{
+ list_del(&node->entry);
+ kfree(node);
+}
- return 0;
+static void esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
+{
+ esw_qos_node_destroy_sched_element(node, extack);
+ __esw_qos_free_node(node);
}
-static struct mlx5_esw_rate_group *
-__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+static struct mlx5_esw_sched_node *
+__esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
{
- u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
- struct mlx5_esw_rate_group *group;
- __be32 *attr;
- u32 divider;
+ struct mlx5_esw_sched_node *node;
+ u32 tsar_ix;
int err;
- group = kzalloc(sizeof(*group), GFP_KERNEL);
- if (!group)
- return ERR_PTR(-ENOMEM);
-
- MLX5_SET(scheduling_context, tsar_ctx, element_type,
- SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
-
- attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
- *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
-
- MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
- esw->qos.root_tsar_ix);
- err = mlx5_create_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- tsar_ctx,
- &group->tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix);
if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for group failed");
- goto err_sched_elem;
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed");
+ return ERR_PTR(err);
}
- list_add_tail(&group->list, &esw->qos.groups);
-
- divider = esw_qos_calculate_min_rate_divider(esw, group, true);
- if (divider) {
- err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed");
- goto err_min_rate;
- }
+ node = __esw_qos_alloc_node(esw, tsar_ix, SCHED_NODE_TYPE_VPORTS_TSAR, parent);
+ if (!node) {
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch alloc node failed");
+ err = -ENOMEM;
+ goto err_alloc_node;
}
- trace_mlx5_esw_group_qos_create(esw->dev, group, group->tsar_ix);
- return group;
+ esw_qos_normalize_min_rate(esw, NULL, extack);
+ trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix);
+
+ return node;
-err_min_rate:
- list_del(&group->list);
+err_alloc_node:
if (mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- group->tsar_ix))
- NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
-err_sched_elem:
- kfree(group);
+ tsar_ix))
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for node failed");
return ERR_PTR(err);
}
static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack);
static void esw_qos_put(struct mlx5_eswitch *esw);
-static struct mlx5_esw_rate_group *
-esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+static struct mlx5_esw_sched_node *
+esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
int err;
+ esw_assert_qos_lock_held(esw);
if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
return ERR_PTR(-EOPNOTSUPP);
@@ -509,96 +439,58 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (err)
return ERR_PTR(err);
- group = __esw_qos_create_rate_group(esw, extack);
- if (IS_ERR(group))
+ node = __esw_qos_create_vports_sched_node(esw, NULL, extack);
+ if (IS_ERR(node))
esw_qos_put(esw);
- return group;
+ return node;
}
-static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static void __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
{
- u32 divider;
- int err;
-
- list_del(&group->list);
-
- divider = esw_qos_calculate_min_rate_divider(esw, NULL, true);
- err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
- if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch groups' normalization failed");
+ struct mlx5_eswitch *esw = node->esw;
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- group->tsar_ix);
- if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
-
- trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
-
- kfree(group);
-
- return err;
-}
-
-static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
-{
- int err;
-
- err = __esw_qos_destroy_rate_group(esw, group, extack);
- esw_qos_put(esw);
-
- return err;
+ trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix);
+ esw_qos_destroy_node(node, extack);
+ esw_qos_normalize_min_rate(esw, NULL, extack);
}
static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
- u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = esw->dev;
- __be32 *attr;
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) ||
- !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
- return -EOPNOTSUPP;
-
- MLX5_SET(scheduling_context, tsar_ctx, element_type,
- SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
-
- attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
- *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
-
- err = mlx5_create_scheduling_element_cmd(dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- tsar_ctx,
- &esw->qos.root_tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
return err;
}
- INIT_LIST_HEAD(&esw->qos.groups);
if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
- esw->qos.group0 = __esw_qos_create_rate_group(esw, extack);
- if (IS_ERR(esw->qos.group0)) {
- esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
- PTR_ERR(esw->qos.group0));
- err = PTR_ERR(esw->qos.group0);
- goto err_group0;
- }
+ esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack);
+ } else {
+ /* The eswitch doesn't support scheduling nodes.
+ * Create a software-only node0 using the root TSAR to attach vport QoS to.
+ */
+ if (!__esw_qos_alloc_node(esw,
+ esw->qos.root_tsar_ix,
+ SCHED_NODE_TYPE_VPORTS_TSAR,
+ NULL))
+ esw->qos.node0 = ERR_PTR(-ENOMEM);
+ }
+ if (IS_ERR(esw->qos.node0)) {
+ err = PTR_ERR(esw->qos.node0);
+ esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err);
+ goto err_node0;
}
refcount_set(&esw->qos.refcnt, 1);
return 0;
-err_group0:
+err_node0:
if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix))
esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
@@ -610,8 +502,11 @@ static void esw_qos_destroy(struct mlx5_eswitch *esw)
{
int err;
- if (esw->qos.group0)
- __esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
+ if (esw->qos.node0->ix != esw->qos.root_tsar_ix)
+ __esw_qos_destroy_node(esw->qos.node0, NULL);
+ else
+ __esw_qos_free_node(esw->qos.node0);
+ esw->qos.node0 = NULL;
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -624,8 +519,7 @@ static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
int err = 0;
- lockdep_assert_held(&esw->state_lock);
-
+ esw_assert_qos_lock_held(esw);
if (!refcount_inc_not_zero(&esw->qos.refcnt)) {
/* esw_qos_create() set refcount to 1 only on success.
* No need to decrement on failure.
@@ -638,77 +532,169 @@ static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
static void esw_qos_put(struct mlx5_eswitch *esw)
{
- lockdep_assert_held(&esw->state_lock);
+ esw_assert_qos_lock_held(esw);
if (refcount_dec_and_test(&esw->qos.refcnt))
esw_qos_destroy(esw);
}
-static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
+static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *parent = vport_node->parent;
+
+ esw_qos_node_destroy_sched_element(vport_node, extack);
+
+ vport_node->bw_share = 0;
+ list_del_init(&vport_node->entry);
+ esw_qos_normalize_min_rate(parent->esw, parent, extack);
+
+ trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport);
+}
+
+static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
{
int err;
- lockdep_assert_held(&esw->state_lock);
- if (vport->qos.enabled)
- return 0;
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ esw_qos_node_set_parent(vport->qos.sched_node, parent);
+ err = esw_qos_vport_create_sched_element(vport->qos.sched_node, extack);
+ if (err)
+ return err;
+
+ esw_qos_normalize_min_rate(parent->esw, parent, extack);
+
+ return 0;
+}
+
+static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent, u32 max_rate,
+ u32 min_rate, struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *sched_node;
+ int err;
+ esw_assert_qos_lock_held(esw);
err = esw_qos_get(esw, extack);
if (err)
return err;
- vport->qos.group = esw->qos.group0;
+ parent = parent ?: esw->qos.node0;
+ sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent);
+ if (!sched_node)
+ return -ENOMEM;
- err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share);
+ sched_node->max_rate = max_rate;
+ sched_node->min_rate = min_rate;
+ sched_node->vport = vport;
+ vport->qos.sched_node = sched_node;
+ err = esw_qos_vport_enable(vport, parent, extack);
if (err)
- goto err_out;
+ esw_qos_put(esw);
- vport->qos.enabled = true;
- trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
+ return err;
+}
- return 0;
+void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *parent;
-err_out:
+ lockdep_assert_held(&esw->state_lock);
+ esw_qos_lock(esw);
+ if (!vport->qos.sched_node)
+ goto unlock;
+
+ parent = vport->qos.sched_node->parent;
+ WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node");
+
+ esw_qos_vport_disable(vport, NULL);
+ mlx5_esw_qos_vport_qos_free(vport);
esw_qos_put(esw);
+unlock:
+ esw_qos_unlock(esw);
+}
- return err;
+static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+
+ if (!vport_node)
+ return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, max_rate, 0,
+ extack);
+ else
+ return esw_qos_sched_elem_config(vport_node, max_rate, vport_node->bw_share,
+ extack);
}
-void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+static int mlx5_esw_qos_set_vport_min_rate(struct mlx5_vport *vport, u32 min_rate,
+ struct netlink_ext_ack *extack)
{
- int err;
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
- lockdep_assert_held(&esw->state_lock);
- if (!vport->qos.enabled)
- return;
- WARN(vport->qos.group && vport->qos.group != esw->qos.group0,
- "Disabling QoS on port before detaching it from group");
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- vport->qos.esw_tsar_ix);
- if (err)
- esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
- vport->vport, err);
+ if (!vport_node)
+ return mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, NULL, 0, min_rate,
+ extack);
+ else
+ return esw_qos_set_node_min_rate(vport_node, min_rate, extack);
+}
- memset(&vport->qos, 0, sizeof(vport->qos));
- trace_mlx5_esw_vport_qos_destroy(vport);
+int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_rate)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ int err;
- esw_qos_put(esw);
+ esw_qos_lock(esw);
+ err = mlx5_esw_qos_set_vport_min_rate(vport, min_rate, NULL);
+ if (!err)
+ err = mlx5_esw_qos_set_vport_max_rate(vport, max_rate, NULL);
+ esw_qos_unlock(esw);
+ return err;
+}
+
+bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ bool enabled;
+
+ esw_qos_lock(esw);
+ enabled = !!vport->qos.sched_node;
+ if (enabled) {
+ *max_rate = vport->qos.sched_node->max_rate;
+ *min_rate = vport->qos.sched_node->min_rate;
+ }
+ esw_qos_unlock(esw);
+ return enabled;
}
-int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 min_rate)
+static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *curr_parent;
int err;
- lockdep_assert_held(&esw->state_lock);
- err = esw_qos_vport_enable(esw, vport, 0, 0, NULL);
- if (err)
- return err;
+ esw_assert_qos_lock_held(esw);
+ curr_parent = vport->qos.sched_node->parent;
+ parent = parent ?: esw->qos.node0;
+ if (curr_parent == parent)
+ return 0;
- err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL);
- if (!err)
- err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL);
+ esw_qos_vport_disable(vport, extack);
+
+ err = esw_qos_vport_enable(vport, parent, extack);
+ if (err) {
+ if (esw_qos_vport_enable(vport, curr_parent, NULL))
+ esw_warn(parent->esw->dev, "vport restore QoS failed (vport=%d)\n",
+ vport->vport);
+ }
return err;
}
@@ -779,10 +765,8 @@ static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{
- u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
u32 link_speed_max;
- u32 bitmask;
int err;
vport = mlx5_eswitch_get_vport(esw, vport_num);
@@ -800,21 +784,9 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
return err;
}
- mutex_lock(&esw->state_lock);
- if (!vport->qos.enabled) {
- /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
- err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share, NULL);
- } else {
- MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
-
- bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
- err = mlx5_modify_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- ctx,
- vport->qos.esw_tsar_ix,
- bitmask);
- }
- mutex_unlock(&esw->state_lock);
+ esw_qos_lock(esw);
+ err = mlx5_esw_qos_set_vport_max_rate(vport, rate_mbps, NULL);
+ esw_qos_unlock(esw);
return err;
}
@@ -852,6 +824,20 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return 0;
}
+int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
+{
+ if (esw->qos.domain)
+ return 0; /* Nothing to change. */
+
+ return esw_qos_domain_init(esw);
+}
+
+void mlx5_esw_qos_cleanup(struct mlx5_eswitch *esw)
+{
+ if (esw->qos.domain)
+ esw_qos_domain_release(esw);
+}
+
/* Eswitch devlink rate API */
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
@@ -869,14 +855,9 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
if (err)
return err;
- mutex_lock(&esw->state_lock);
- err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
- if (err)
- goto unlock;
-
- err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
-unlock:
- mutex_unlock(&esw->state_lock);
+ esw_qos_lock(esw);
+ err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack);
+ esw_qos_unlock(esw);
return err;
}
@@ -895,57 +876,50 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
if (err)
return err;
- mutex_lock(&esw->state_lock);
- err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
- if (err)
- goto unlock;
-
- err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
-unlock:
- mutex_unlock(&esw->state_lock);
+ esw_qos_lock(esw);
+ err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack);
+ esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
- struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink);
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_esw_rate_group *group = priv;
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
int err;
- err = esw_qos_devlink_rate_to_mbps(dev, "tx_share", &tx_share, extack);
+ err = esw_qos_devlink_rate_to_mbps(esw->dev, "tx_share", &tx_share, extack);
if (err)
return err;
- mutex_lock(&esw->state_lock);
- err = esw_qos_set_group_min_rate(esw, group, tx_share, extack);
- mutex_unlock(&esw->state_lock);
+ esw_qos_lock(esw);
+ err = esw_qos_set_node_min_rate(node, tx_share, extack);
+ esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
u64 tx_max, struct netlink_ext_ack *extack)
{
- struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink);
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_esw_rate_group *group = priv;
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
int err;
- err = esw_qos_devlink_rate_to_mbps(dev, "tx_max", &tx_max, extack);
+ err = esw_qos_devlink_rate_to_mbps(esw->dev, "tx_max", &tx_max, extack);
if (err)
return err;
- mutex_lock(&esw->state_lock);
- err = esw_qos_set_group_max_rate(esw, group, tx_max, extack);
- mutex_unlock(&esw->state_lock);
+ esw_qos_lock(esw);
+ err = esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack);
+ esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
struct mlx5_eswitch *esw;
int err = 0;
@@ -953,7 +927,7 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
if (IS_ERR(esw))
return PTR_ERR(esw);
- mutex_lock(&esw->state_lock);
+ esw_qos_lock(esw);
if (esw->mode != MLX5_ESWITCH_OFFLOADS) {
NL_SET_ERR_MSG_MOD(extack,
"Rate node creation supported only in switchdev mode");
@@ -961,51 +935,48 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
goto unlock;
}
- group = esw_qos_create_rate_group(esw, extack);
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
+ node = esw_qos_create_vports_sched_node(esw, extack);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
goto unlock;
}
- *priv = group;
+ *priv = node;
unlock:
- mutex_unlock(&esw->state_lock);
+ esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group = priv;
- struct mlx5_eswitch *esw;
- int err;
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
- esw = mlx5_devlink_eswitch_get(rate_node->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
-
- mutex_lock(&esw->state_lock);
- err = esw_qos_destroy_rate_group(esw, group, extack);
- mutex_unlock(&esw->state_lock);
- return err;
+ esw_qos_lock(esw);
+ __esw_qos_destroy_node(node, extack);
+ esw_qos_put(esw);
+ esw_qos_unlock(esw);
+ return 0;
}
-int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
int err = 0;
- mutex_lock(&esw->state_lock);
- if (!vport->qos.enabled && !group)
- goto unlock;
+ if (parent && parent->esw != esw) {
+ NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported");
+ return -EOPNOTSUPP;
+ }
- err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
- if (!err)
- err = esw_qos_vport_update_group(esw, vport, group, extack);
-unlock:
- mutex_unlock(&esw->state_lock);
+ esw_qos_lock(esw);
+ if (!vport->qos.sched_node && parent)
+ err = mlx5_esw_qos_vport_enable(vport, SCHED_NODE_TYPE_VPORT, parent, 0, 0, extack);
+ else if (vport->qos.sched_node)
+ err = esw_qos_vport_update_parent(vport, parent, extack);
+ esw_qos_unlock(esw);
return err;
}
@@ -1014,13 +985,12 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
void *priv, void *parent_priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
struct mlx5_vport *vport = priv;
if (!parent)
- return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch,
- vport, NULL, extack);
+ return mlx5_esw_qos_vport_update_parent(vport, NULL, extack);
- group = parent_priv;
- return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, vport, group, extack);
+ node = parent_priv;
+ return mlx5_esw_qos_vport_update_parent(vport, node, extack);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 0141e9d52037..6eb8f6a648c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -6,9 +6,16 @@
#ifdef CONFIG_MLX5_ESWITCH
-int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
- u32 max_rate, u32 min_rate);
-void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+int mlx5_esw_qos_init(struct mlx5_eswitch *esw);
+void mlx5_esw_qos_cleanup(struct mlx5_eswitch *esw);
+
+int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *evport, u32 max_rate, u32 min_rate);
+bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate);
+void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport);
+
+void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport);
+u32 mlx5_esw_qos_vport_get_sched_elem_ix(const struct mlx5_vport *vport);
+struct mlx5_esw_sched_node *mlx5_esw_qos_vport_get_parent(const struct mlx5_vport *vport);
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
u64 tx_share, struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7aef30dbd82d..7fb8a3381f84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -894,7 +894,7 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
vport_num, 1,
MLX5_VPORT_ADMIN_STATE_DOWN);
- mlx5_esw_qos_vport_disable(esw, vport);
+ mlx5_esw_qos_vport_disable(vport);
esw_vport_cleanup_acl(esw, vport);
}
@@ -1061,7 +1061,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
- memset(&vport->qos, 0, sizeof(vport->qos));
+ mlx5_esw_qos_vport_qos_free(vport);
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
@@ -1073,7 +1073,7 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
unsigned long i;
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
- memset(&vport->qos, 0, sizeof(vport->qos));
+ mlx5_esw_qos_vport_qos_free(vport);
memset(&vport->info, 0, sizeof(vport->info));
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
}
@@ -1481,15 +1481,18 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
mlx5_eq_notifier_register(esw->dev, &esw->nb);
+ err = mlx5_esw_qos_init(esw);
+ if (err)
+ goto err_esw_init;
+
if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
- mlx5_rescan_drivers(esw->dev);
err = esw_offloads_enable(esw);
}
if (err)
- goto err_esw_enable;
+ goto err_esw_init;
esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
@@ -1503,7 +1506,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
return 0;
-err_esw_enable:
+err_esw_init:
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
mlx5_esw_acls_ns_cleanup(esw);
return err;
@@ -1875,6 +1878,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto reps_err;
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err = mlx5_esw_qos_init(esw);
+ if (err)
+ goto reps_err;
+
mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->offloads.decap_tbl_lock);
@@ -1888,7 +1896,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
- esw->mode = MLX5_ESWITCH_LEGACY;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
@@ -1925,6 +1932,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
+ mlx5_esw_qos_cleanup(esw);
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
@@ -2061,6 +2069,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
u16 vport, struct ifla_vf_info *ivi)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ u32 max_rate, min_rate;
if (IS_ERR(evport))
return PTR_ERR(evport);
@@ -2075,9 +2084,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
- if (evport->qos.enabled) {
- ivi->min_tx_rate = evport->qos.min_rate;
- ivi->max_tx_rate = evport->qos.max_rate;
+
+ if (mlx5_esw_qos_get_vport_rate(evport, &max_rate, &min_rate)) {
+ ivi->max_tx_rate = max_rate;
+ ivi->min_tx_rate = min_rate;
}
mutex_unlock(&esw->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index f44b4c7ebcfd..a83d41121db6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -212,13 +212,10 @@ struct mlx5_vport {
struct mlx5_vport_info info;
+ /* Protected with the E-Switch qos domain lock. */
struct {
- bool enabled;
- u32 esw_tsar_ix;
- u32 bw_share;
- u32 min_rate;
- u32 max_rate;
- struct mlx5_esw_rate_group *group;
+ /* Vport scheduling element node. */
+ struct mlx5_esw_sched_node *sched_node;
} qos;
u16 vport;
@@ -333,6 +330,7 @@ enum {
};
struct dentry;
+struct mlx5_qos_domain;
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
@@ -359,15 +357,17 @@ struct mlx5_eswitch {
struct rw_semaphore mode_lock;
atomic64_t user_count;
+ /* Protected with the E-Switch qos domain lock. */
struct {
- u32 root_tsar_ix;
- struct mlx5_esw_rate_group *group0;
- struct list_head groups; /* Protected by esw->state_lock */
-
- /* Protected by esw->state_lock.
- * Initially 0, meaning no QoS users and QoS is disabled.
- */
+ /* Initially 0, meaning no QoS users and QoS is disabled. */
refcount_t refcnt;
+ u32 root_tsar_ix;
+ struct mlx5_qos_domain *domain;
+ /* Contains all vports with QoS enabled but no explicit node.
+ * Cannot be NULL if QoS is enabled, but may be a fake node
+ * referencing the root TSAR if the esw doesn't support nodes.
+ */
+ struct mlx5_esw_sched_node *node0;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
@@ -427,10 +427,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
u16 vport_num, bool setting);
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
u32 max_rate, u32 min_rate);
-int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack);
+int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack);
int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
@@ -806,7 +804,7 @@ int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5
void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 8cf61ae8b89d..d6ff2dc4c19e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2332,18 +2332,35 @@ out_free:
return err;
}
+static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode)
+{
+ mlx5_devcom_comp_lock(esw->dev->priv.hca_devcom_comp);
+
+ if (esw->dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) {
+ esw->mode = mode;
+ mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
+ return;
+ }
+
+ esw->dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(esw->dev);
+ esw->mode = mode;
+ esw->dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(esw->dev);
+ mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp);
+}
+
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
int err;
- esw->mode = MLX5_ESWITCH_OFFLOADS;
+ esw_mode_change(esw, MLX5_ESWITCH_OFFLOADS);
err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- esw->mode = MLX5_ESWITCH_LEGACY;
- mlx5_rescan_drivers(esw->dev);
+ esw_mode_change(esw, MLX5_ESWITCH_LEGACY);
return err;
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
@@ -2620,7 +2637,7 @@ int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
return err;
load_err:
- mlx5_esw_offloads_devlink_port_unregister(esw, vport);
+ mlx5_esw_offloads_devlink_port_unregister(vport);
return err;
}
@@ -2631,7 +2648,7 @@ void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *v
mlx5_esw_offloads_rep_unload(esw, vport->vport);
- mlx5_esw_offloads_devlink_port_unregister(esw, vport);
+ mlx5_esw_offloads_devlink_port_unregister(vport);
}
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
@@ -3587,7 +3604,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err;
- esw->mode = MLX5_ESWITCH_LEGACY;
+ esw_mode_change(esw, MLX5_ESWITCH_LEGACY);
/* If changing from switchdev to legacy mode without sriov enabled,
* no need to create legacy fdb.
@@ -3773,7 +3790,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
err = esw_offloads_stop(esw, extack);
- mlx5_rescan_drivers(esw->dev);
} else {
err = -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 6e4f8aaf8d2f..2eabfcc247c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -3698,6 +3698,7 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev)
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering;
+ char name[80];
int err = 0;
err = mlx5_init_fc_stats(dev);
@@ -3722,10 +3723,12 @@ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
else
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
- steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ snprintf(name, sizeof(name), "%s-mlx5_fs_fgs", dev_name(dev->device));
+ steering->fgs_cache = kmem_cache_create(name,
sizeof(struct mlx5_flow_group), 0,
0, NULL);
- steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ snprintf(name, sizeof(name), "%s-mlx5_fs_ftes", dev_name(dev->device));
+ steering->ftes_cache = kmem_cache_create(name, sizeof(struct fs_fte), 0,
0, NULL);
if (!steering->ftes_cache || !steering->fgs_cache) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 964937f17cf5..bad2df0715ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -37,7 +37,7 @@
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
#include <linux/llist.h>
-#include <steering/fs_dr.h>
+#include <steering/sws/fs_dr.h>
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -63,7 +63,7 @@ struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
enum mlx5_flow_resource_owner owner;
union {
- struct mlx5_fs_dr_action action;
+ struct mlx5_fs_dr_action fs_dr_action;
u32 id;
};
};
@@ -73,7 +73,7 @@ struct mlx5_pkt_reformat {
int reformat_type; /* from mlx5_ifc */
enum mlx5_flow_resource_owner owner;
union {
- struct mlx5_fs_dr_action action;
+ struct mlx5_fs_dr_action fs_dr_action;
u32 id;
};
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 0c26d707eed2..62d0c689796b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -32,13 +32,11 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
-#include <linux/rbtree.h>
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
-#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
#define MLX5_INIT_COUNTERS_BULK 8
@@ -52,21 +50,37 @@ struct mlx5_fc_cache {
};
struct mlx5_fc {
- struct list_head list;
- struct llist_node addlist;
- struct llist_node dellist;
-
- /* last{packets,bytes} members are used when calculating the delta since
- * last reading
- */
+ u32 id;
+ bool aging;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_cache cache;
+ /* last{packets,bytes} are used for calculating deltas since last reading. */
u64 lastpackets;
u64 lastbytes;
+};
- struct mlx5_fc_bulk *bulk;
- u32 id;
- bool aging;
+struct mlx5_fc_pool {
+ struct mlx5_core_dev *dev;
+ struct mutex pool_lock; /* protects pool lists */
+ struct list_head fully_used;
+ struct list_head partially_used;
+ struct list_head unused;
+ int available_fcs;
+ int used_fcs;
+ int threshold;
+};
- struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
+struct mlx5_fc_stats {
+ struct xarray counters;
+
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ unsigned long sampling_interval; /* jiffies */
+ u32 *bulk_query_out;
+ int bulk_query_len;
+ bool bulk_query_alloc_failed;
+ unsigned long next_bulk_query_alloc;
+ struct mlx5_fc_pool fc_pool;
};
static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
@@ -74,78 +88,6 @@ static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
-/* locking scheme:
- *
- * It is the responsibility of the user to prevent concurrent calls or bad
- * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
- * to struct mlx5_fc.
- * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
- * dump (access to struct mlx5_fc) after a counter is destroyed.
- *
- * access to counter list:
- * - create (user context)
- * - mlx5_fc_create() only adds to an addlist to be used by
- * mlx5_fc_stats_work(). addlist is a lockless single linked list
- * that doesn't require any additional synchronization when adding single
- * node.
- * - spawn thread to do the actual destroy
- *
- * - destroy (user context)
- * - add a counter to lockless dellist
- * - spawn thread to do the actual del
- *
- * - dump (user context)
- * user should not call dump after destroy
- *
- * - query (single thread workqueue context)
- * destroy/dump - no conflict (see destroy)
- * query/dump - packets and bytes might be inconsistent (since update is not
- * atomic)
- * query/create - no conflict (see create)
- * since every create/destroy spawn the work, only after necessary time has
- * elapsed, the thread will actually query the hardware.
- */
-
-static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
- u32 id)
-{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- unsigned long next_id = (unsigned long)id + 1;
- struct mlx5_fc *counter;
- unsigned long tmp;
-
- rcu_read_lock();
- /* skip counters that are in idr, but not yet in counters list */
- idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
- counter, tmp, next_id) {
- if (!list_empty(&counter->list))
- break;
- }
- rcu_read_unlock();
-
- return counter ? &counter->list : &fc_stats->counters;
-}
-
-static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
- struct mlx5_fc *counter)
-{
- struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
-
- list_add_tail(&counter->list, next);
-}
-
-static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
- struct mlx5_fc *counter)
-{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-
- list_del(&counter->list);
-
- spin_lock(&fc_stats->counters_idr_lock);
- WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
- spin_unlock(&fc_stats->counters_idr_lock);
-}
-
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
return min_t(int, MLX5_INIT_COUNTERS_BULK,
@@ -174,47 +116,64 @@ static void update_counter_cache(int index, u32 *bulk_raw_data,
cache->lastuse = jiffies;
}
-static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
- struct mlx5_fc *first,
- u32 last_id)
+/* Synchronization notes
+ *
+ * Access to counter array:
+ * - create - mlx5_fc_create() (user context)
+ * - inserts the counter into the xarray.
+ *
+ * - destroy - mlx5_fc_destroy() (user context)
+ * - erases the counter from the xarray and releases it.
+ *
+ * - query mlx5_fc_query(), mlx5_fc_query_cached{,_raw}() (user context)
+ * - user should not access a counter after destroy.
+ *
+ * - bulk query (single thread workqueue context)
+ * - create: query relies on 'lastuse' to avoid updating counters added
+ * around the same time as the current bulk cmd.
+ * - destroy: destroyed counters will not be accessed, even if they are
+ * destroyed during a bulk query command.
+ */
+static void mlx5_fc_stats_query_all_counters(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- bool query_more_counters = (first->id <= last_id);
- int cur_bulk_len = fc_stats->bulk_query_len;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
+ u32 bulk_len = fc_stats->bulk_query_len;
+ XA_STATE(xas, &fc_stats->counters, 0);
u32 *data = fc_stats->bulk_query_out;
- struct mlx5_fc *counter = first;
+ struct mlx5_fc *counter;
+ u32 last_bulk_id = 0;
+ u64 bulk_query_time;
u32 bulk_base_id;
- int bulk_len;
int err;
- while (query_more_counters) {
- /* first id must be aligned to 4 when using bulk query */
- bulk_base_id = counter->id & ~0x3;
-
- /* number of counters to query inc. the last counter */
- bulk_len = min_t(int, cur_bulk_len,
- ALIGN(last_id - bulk_base_id + 1, 4));
-
- err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
- data);
- if (err) {
- mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
- return;
- }
- query_more_counters = false;
-
- list_for_each_entry_from(counter, &fc_stats->counters, list) {
- int counter_index = counter->id - bulk_base_id;
- struct mlx5_fc_cache *cache = &counter->cache;
-
- if (counter->id >= bulk_base_id + bulk_len) {
- query_more_counters = true;
- break;
+ xas_lock(&xas);
+ xas_for_each(&xas, counter, U32_MAX) {
+ if (xas_retry(&xas, counter))
+ continue;
+ if (unlikely(counter->id >= last_bulk_id)) {
+ /* Start new bulk query. */
+ /* First id must be aligned to 4 when using bulk query. */
+ bulk_base_id = counter->id & ~0x3;
+ last_bulk_id = bulk_base_id + bulk_len;
+ /* The lock is released while querying the hw and reacquired after. */
+ xas_unlock(&xas);
+ /* The same id needs to be processed again in the next loop iteration. */
+ xas_reset(&xas);
+ bulk_query_time = jiffies;
+ err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, data);
+ if (err) {
+ mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+ return;
}
-
- update_counter_cache(counter_index, data, cache);
+ xas_lock(&xas);
+ continue;
}
+ /* Do not update counters added after bulk query was started. */
+ if (time_after64(bulk_query_time, counter->cache.lastuse))
+ update_counter_cache(counter->id - bulk_base_id, data,
+ &counter->cache);
}
+ xas_unlock(&xas);
}
static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
@@ -225,7 +184,7 @@ static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
if (counter->bulk)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
@@ -233,85 +192,55 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
mlx5_fc_free(dev, counter);
}
-static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
+static void mlx5_fc_stats_bulk_query_buf_realloc(struct mlx5_core_dev *dev,
+ int bulk_query_len)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- int max_bulk_len = get_max_bulk_query_len(dev);
- unsigned long now = jiffies;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
u32 *bulk_query_out_tmp;
- int max_out_len;
-
- if (fc_stats->bulk_query_alloc_failed &&
- time_before(now, fc_stats->next_bulk_query_alloc))
- return;
+ int out_len;
- max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
- bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
+ out_len = mlx5_cmd_fc_get_bulk_query_out_len(bulk_query_len);
+ bulk_query_out_tmp = kvzalloc(out_len, GFP_KERNEL);
if (!bulk_query_out_tmp) {
mlx5_core_warn_once(dev,
- "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
- max_bulk_len);
- fc_stats->bulk_query_alloc_failed = true;
- fc_stats->next_bulk_query_alloc =
- now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
+ "Can't increase flow counters bulk query buffer size, alloc failed, bulk_query_len(%d)\n",
+ bulk_query_len);
return;
}
- kfree(fc_stats->bulk_query_out);
+ kvfree(fc_stats->bulk_query_out);
fc_stats->bulk_query_out = bulk_query_out_tmp;
- fc_stats->bulk_query_len = max_bulk_len;
- if (fc_stats->bulk_query_alloc_failed) {
- mlx5_core_info(dev,
- "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
- max_bulk_len);
- fc_stats->bulk_query_alloc_failed = false;
- }
+ fc_stats->bulk_query_len = bulk_query_len;
+ mlx5_core_info(dev,
+ "Flow counters bulk query buffer size increased, bulk_query_len(%d)\n",
+ bulk_query_len);
}
-static void mlx5_fc_stats_work(struct work_struct *work)
+static int mlx5_fc_num_counters(struct mlx5_fc_stats *fc_stats)
{
- struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
- priv.fc_stats.work.work);
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- /* Take dellist first to ensure that counters cannot be deleted before
- * they are inserted.
- */
- struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
- struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
- struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
- unsigned long now = jiffies;
-
- if (addlist || !list_empty(&fc_stats->counters))
- queue_delayed_work(fc_stats->wq, &fc_stats->work,
- fc_stats->sampling_interval);
-
- llist_for_each_entry(counter, addlist, addlist) {
- mlx5_fc_stats_insert(dev, counter);
- fc_stats->num_counters++;
- }
-
- llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
- mlx5_fc_stats_remove(dev, counter);
+ struct mlx5_fc *counter;
+ int num_counters = 0;
+ unsigned long id;
- mlx5_fc_release(dev, counter);
- fc_stats->num_counters--;
- }
+ xa_for_each(&fc_stats->counters, id, counter)
+ num_counters++;
+ return num_counters;
+}
- if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
- fc_stats->num_counters > get_init_bulk_query_len(dev))
- mlx5_fc_stats_bulk_query_size_increase(dev);
+static void mlx5_fc_stats_work(struct work_struct *work)
+{
+ struct mlx5_fc_stats *fc_stats = container_of(work, struct mlx5_fc_stats,
+ work.work);
+ struct mlx5_core_dev *dev = fc_stats->fc_pool.dev;
- if (time_before(now, fc_stats->next_query) ||
- list_empty(&fc_stats->counters))
- return;
- last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
+ queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval);
- counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
- list);
- if (counter)
- mlx5_fc_stats_query_counter_range(dev, counter, last->id);
+ /* Grow the bulk query buffer to max if not maxed and enough counters are present. */
+ if (unlikely(fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
+ mlx5_fc_num_counters(fc_stats) > get_init_bulk_query_len(dev)))
+ mlx5_fc_stats_bulk_query_buf_realloc(dev, get_max_bulk_query_len(dev));
- fc_stats->next_query = now + fc_stats->sampling_interval;
+ mlx5_fc_stats_query_all_counters(dev);
}
static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
@@ -334,7 +263,7 @@ static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
struct mlx5_fc *counter;
if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
@@ -346,16 +275,15 @@ static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
return mlx5_fc_single_alloc(dev);
}
-struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
{
struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
int err;
if (IS_ERR(counter))
return counter;
- INIT_LIST_HEAD(&counter->list);
counter->aging = aging;
if (aging) {
@@ -365,18 +293,9 @@ struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
counter->lastbytes = counter->cache.bytes;
counter->lastpackets = counter->cache.packets;
- idr_preload(GFP_KERNEL);
- spin_lock(&fc_stats->counters_idr_lock);
-
- err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
- GFP_NOWAIT);
-
- spin_unlock(&fc_stats->counters_idr_lock);
- idr_preload_end();
- if (err)
+ err = xa_err(xa_store(&fc_stats->counters, id, counter, GFP_KERNEL));
+ if (err != 0)
goto err_out_alloc;
-
- llist_add(&counter->addlist, &fc_stats->addlist);
}
return counter;
@@ -385,16 +304,6 @@ err_out_alloc:
mlx5_fc_release(dev, counter);
return ERR_PTR(err);
}
-
-struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
-{
- struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging);
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-
- if (aging)
- mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
- return counter;
-}
EXPORT_SYMBOL(mlx5_fc_create);
u32 mlx5_fc_id(struct mlx5_fc *counter)
@@ -405,39 +314,32 @@ EXPORT_SYMBOL(mlx5_fc_id);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
if (!counter)
return;
- if (counter->aging) {
- llist_add(&counter->dellist, &fc_stats->dellist);
- mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
- return;
- }
-
+ if (counter->aging)
+ xa_erase(&fc_stats->counters, counter->id);
mlx5_fc_release(dev, counter);
}
EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- int init_bulk_len;
- int init_out_len;
+ struct mlx5_fc_stats *fc_stats;
+
+ fc_stats = kzalloc(sizeof(*fc_stats), GFP_KERNEL);
+ if (!fc_stats)
+ return -ENOMEM;
+ dev->priv.fc_stats = fc_stats;
- spin_lock_init(&fc_stats->counters_idr_lock);
- idr_init(&fc_stats->counters_idr);
- INIT_LIST_HEAD(&fc_stats->counters);
- init_llist_head(&fc_stats->addlist);
- init_llist_head(&fc_stats->dellist);
+ xa_init(&fc_stats->counters);
- init_bulk_len = get_init_bulk_query_len(dev);
- init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
- fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
+ /* Allocate initial (small) bulk query buffer. */
+ mlx5_fc_stats_bulk_query_buf_realloc(dev, get_init_bulk_query_len(dev));
if (!fc_stats->bulk_query_out)
- return -ENOMEM;
- fc_stats->bulk_query_len = init_bulk_len;
+ goto err_bulk;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
@@ -447,34 +349,35 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
+ queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
return 0;
err_wq_create:
- kfree(fc_stats->bulk_query_out);
+ kvfree(fc_stats->bulk_query_out);
+err_bulk:
+ kfree(fc_stats);
return -ENOMEM;
}
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- struct llist_node *tmplist;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
struct mlx5_fc *counter;
- struct mlx5_fc *tmp;
-
- cancel_delayed_work_sync(&dev->priv.fc_stats.work);
- destroy_workqueue(dev->priv.fc_stats.wq);
- dev->priv.fc_stats.wq = NULL;
+ unsigned long id;
- tmplist = llist_del_all(&fc_stats->addlist);
- llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
- mlx5_fc_release(dev, counter);
+ cancel_delayed_work_sync(&fc_stats->work);
+ destroy_workqueue(fc_stats->wq);
+ fc_stats->wq = NULL;
- list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
+ xa_for_each(&fc_stats->counters, id, counter) {
+ xa_erase(&fc_stats->counters, id);
mlx5_fc_release(dev, counter);
+ }
+ xa_destroy(&fc_stats->counters);
mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
- idr_destroy(&fc_stats->counters_idr);
- kfree(fc_stats->bulk_query_out);
+ kvfree(fc_stats->bulk_query_out);
+ kfree(fc_stats);
}
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
@@ -518,7 +421,7 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
struct delayed_work *dwork,
unsigned long delay)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
queue_delayed_work(fc_stats->wq, dwork, delay);
}
@@ -526,7 +429,7 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
fc_stats->sampling_interval = min_t(unsigned long, interval,
fc_stats->sampling_interval);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 4f55e55ecb55..566710d34a7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -35,6 +35,7 @@ struct mlx5_fw_reset {
enum {
MLX5_FW_RST_STATE_IDLE = 0,
MLX5_FW_RST_STATE_TOGGLE_REQ = 4,
+ MLX5_FW_RST_STATE_DROP_MODE = 5,
};
enum {
@@ -616,6 +617,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_dev *dev;
unsigned long timeout;
+ int poll_freq = 20;
bool reset_action;
u8 rst_state;
int err;
@@ -651,7 +653,12 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
reset_action = true;
break;
}
- msleep(20);
+ if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
+ mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
+ mlx5_set_fw_rst_ack(dev);
+ poll_freq = 1000;
+ }
+ msleep(poll_freq);
} while (!time_after(jiffies, timeout));
if (!reset_action) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index b306ae79bf97..4822d01123b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -402,9 +402,7 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
- unsigned long flags;
u64 cycles, ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
@@ -413,10 +411,8 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
goto out;
}
- write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_time(mdev, sts, false);
- ns = timecounter_cyc2time(&timer->tc, cycles);
- write_sequnlock_irqrestore(&clock->lock, flags);
+ ns = mlx5_timecounter_cyc2time(clock, cycles);
*ts = ns_to_timespec64(ns);
out:
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 4b7f7131c560..b1edc71ffc6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -72,7 +72,7 @@ static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
__raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
- mb();
+ wmb();
}
int mlx5_eq_table_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
index 452d0df339ac..404f3d4b6380 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
@@ -4,8 +4,8 @@
#ifndef __MLX5_LIB_SMFS_H__
#define __MLX5_LIB_SMFS_H__
-#include "steering/mlx5dr.h"
-#include "steering/dr_types.h"
+#include "steering/sws/mlx5dr.h"
+#include "steering/sws/dr_types.h"
struct mlx5dr_matcher *
mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 62c770b0eaa8..99de67c3aa74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -224,6 +224,8 @@ void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change);
int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
+bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy);
+bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *context, u32 *element_id);
int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
index db2bd3ad63ba..6be9981bb6b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
@@ -28,7 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
- if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
+ if (!mlx5_qos_element_type_supported(mdev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP,
+ SCHEDULING_HIERARCHY_NIC))
return -EOPNOTSUPP;
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
@@ -47,8 +49,12 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
void *attr;
- if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
- !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
+ if (!mlx5_qos_element_type_supported(mdev,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR,
+ SCHEDULING_HIERARCHY_NIC) ||
+ !mlx5_qos_tsar_type_supported(mdev,
+ TSAR_ELEMENT_TSAR_TYPE_DWRR,
+ SCHEDULING_HIERARCHY_NIC))
return -EOPNOTSUPP;
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 9f8b4005f4bd..e393391966e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -34,6 +34,64 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
+bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy)
+{
+ int cap;
+
+ switch (hierarchy) {
+ case SCHEDULING_HIERARCHY_E_SWITCH:
+ cap = MLX5_CAP_QOS(dev, esw_tsar_type);
+ break;
+ case SCHEDULING_HIERARCHY_NIC:
+ cap = MLX5_CAP_QOS(dev, nic_tsar_type);
+ break;
+ default:
+ return false;
+ }
+
+ switch (type) {
+ case TSAR_ELEMENT_TSAR_TYPE_DWRR:
+ return cap & TSAR_TYPE_CAP_MASK_DWRR;
+ case TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN:
+ return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN;
+ case TSAR_ELEMENT_TSAR_TYPE_ETS:
+ return cap & TSAR_TYPE_CAP_MASK_ETS;
+ }
+
+ return false;
+}
+
+bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy)
+{
+ int cap;
+
+ switch (hierarchy) {
+ case SCHEDULING_HIERARCHY_E_SWITCH:
+ cap = MLX5_CAP_QOS(dev, esw_element_type);
+ break;
+ case SCHEDULING_HIERARCHY_NIC:
+ cap = MLX5_CAP_QOS(dev, nic_element_type);
+ break;
+ default:
+ return false;
+ }
+
+ switch (type) {
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+ return cap & ELEMENT_TYPE_CAP_MASK_TSAR;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+ return cap & ELEMENT_TYPE_CAP_MASK_VPORT;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+ return cap & ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+ return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP:
+ return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP;
+ }
+
+ return false;
+}
+
/* Scheduling element fw management */
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *ctx, u32 *element_id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index b27bb4106532..a897cdc60fdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
index bf5c1b241006..e8f562c31826 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_ACTION_H_
-#define MLX5HWS_ACTION_H_
+#ifndef HWS_ACTION_H_
+#define HWS_ACTION_H_
/* Max number of STEs needed for a rule (including match) */
#define MLX5HWS_ACTION_MAX_STE 20
@@ -304,4 +304,4 @@ mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply,
htonl(num_of_actions << 29);
}
-#endif /* MLX5HWS_ACTION_H_ */
+#endif /* HWS_ACTION_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c
index e6ed66202a40..b9aef80ba094 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
-#include "mlx5hws_buddy.h"
+#include "internal.h"
+#include "buddy.h"
static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h
index 338c44bbedaf..ef6b223677aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_BUDDY_H_
-#define MLX5HWS_BUDDY_H_
+#ifndef HWS_BUDDY_H_
+#define HWS_BUDDY_H_
struct mlx5hws_buddy_mem {
unsigned long **bitmap;
@@ -18,4 +18,4 @@ int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
-#endif /* MLX5HWS_BUDDY_H_ */
+#endif /* HWS_BUDDY_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 8f3a6f9d703d..baacf662c0ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index 4fe8c32d8fbe..0b745968e21e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_BWC_H_
-#define MLX5HWS_BWC_H_
+#ifndef HWS_BWC_H_
+#define HWS_BWC_H_
#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
@@ -70,4 +70,4 @@ static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
return idx + mlx5hws_bwc_queues(ctx);
}
-#endif /* MLX5HWS_BWC_H_ */
+#endif /* HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
index 601fad5fc54a..c00010ca86bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
index 068ee8118609..340f0688e394 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_BWC_COMPLEX_H_
-#define MLX5HWS_BWC_COMPLEX_H_
+#ifndef HWS_BWC_COMPLEX_H_
+#define HWS_BWC_COMPLEX_H_
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
@@ -26,4 +26,4 @@ int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
-#endif /* MLX5HWS_BWC_COMPLEX_H_ */
+#endif /* HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index 2c7b14172049..c00c138c3366 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
static enum mlx5_ifc_flow_destination_type
hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 2fbcf4ff571a..434f62b0904e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_CMD_H_
-#define MLX5HWS_CMD_H_
+#ifndef HWS_CMD_H_
+#define HWS_CMD_H_
#define WIRE_PORT 0xFFFF
@@ -358,4 +358,4 @@ int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
u16 vport_number, u16 *gvmi);
-#endif /* MLX5HWS_CMD_H_ */
+#endif /* HWS_CMD_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
index 00e4fdf4a558..fd48b05e91e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
-#include "mlx5hws_internal.h"
+#include "internal.h"
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
index 8ab548aa402b..47f5cc8de73f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_CONTEXT_H_
-#define MLX5HWS_CONTEXT_H_
+#ifndef HWS_CONTEXT_H_
+#define HWS_CONTEXT_H_
enum mlx5hws_context_flags {
MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
@@ -62,4 +62,4 @@ bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
-#endif /* MLX5HWS_CONTEXT_H_ */
+#endif /* HWS_CONTEXT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
index 2b8c5a4e1c4c..5b200b4bc1a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -5,7 +5,7 @@
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/version.h>
-#include "mlx5hws_internal.h"
+#include "internal.h"
static int
hws_debug_dump_matcher_template_definer(struct seq_file *f,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
index b93a536035d9..e44e7ae28f93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_DEBUG_H_
-#define MLX5HWS_DEBUG_H_
+#ifndef HWS_DEBUG_H_
+#define HWS_DEBUG_H_
#define HWS_DEBUG_FORMAT_VERSION "1.0"
@@ -37,4 +37,4 @@ mlx5hws_debug_icm_to_idx(u64 icm_addr)
void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx);
void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx);
-#endif /* MLX5HWS_DEBUG_H_ */
+#endif /* HWS_DEBUG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 3f4c58bada37..8fe96eb76baf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
/* Pattern tunnel Layer bits. */
#define MLX5_FLOW_LAYER_VXLAN BIT(12)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 2f6a7df4021c..9432d5084def 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_DEFINER_H_
-#define MLX5HWS_DEFINER_H_
+#ifndef HWS_DEFINER_H_
+#define HWS_DEFINER_H_
/* Max available selecotrs */
#define DW_SELECTORS 9
@@ -831,4 +831,4 @@ mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
u32 *match_param,
int *fc_sz);
-#endif /* MLX5HWS_DEFINER_H_ */
+#endif /* HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
index 5643be1cd5bf..3c8635f286ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_INTERNAL_H_
-#define MLX5HWS_INTERNAL_H_
+#ifndef HWS_INTERNAL_H_
+#define HWS_INTERNAL_H_
#include <linux/mlx5/transobj.h>
#include <linux/mlx5/vport.h>
@@ -10,22 +10,22 @@
#include "wq.h"
#include "lib/mlx5.h"
-#include "mlx5hws_prm.h"
+#include "prm.h"
#include "mlx5hws.h"
-#include "mlx5hws_pool.h"
-#include "mlx5hws_vport.h"
-#include "mlx5hws_context.h"
-#include "mlx5hws_table.h"
-#include "mlx5hws_send.h"
-#include "mlx5hws_rule.h"
-#include "mlx5hws_cmd.h"
-#include "mlx5hws_action.h"
-#include "mlx5hws_definer.h"
-#include "mlx5hws_matcher.h"
-#include "mlx5hws_debug.h"
-#include "mlx5hws_pat_arg.h"
-#include "mlx5hws_bwc.h"
-#include "mlx5hws_bwc_complex.h"
+#include "pool.h"
+#include "vport.h"
+#include "context.h"
+#include "table.h"
+#include "send.h"
+#include "rule.h"
+#include "cmd.h"
+#include "action.h"
+#include "definer.h"
+#include "matcher.h"
+#include "debug.h"
+#include "pat_arg.h"
+#include "bwc.h"
+#include "bwc_complex.h"
#define W_SIZE 2
#define DW_SIZE 4
@@ -56,4 +56,4 @@ static inline unsigned long align(unsigned long val, unsigned long align)
return (val + align - 1) & ~(align - 1);
}
-#endif /* MLX5HWS_INTERNAL_H_ */
+#endif /* HWS_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index 61a1155d4b4f..1bb3a6f8c3cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
enum mlx5hws_matcher_rtc_type {
HWS_MATCHER_RTC_TYPE_MATCH,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
index 125391d1a114..81ff487f57be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#ifndef MLX5HWS_MATCHER_H_
-#define MLX5HWS_MATCHER_H_
+#ifndef HWS_MATCHER_H_
+#define HWS_MATCHER_H_
/* We calculated that concatenating a collision table to the main table with
* 3% of the main table rows will be enough resources for high insertion
@@ -104,4 +104,4 @@ static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matc
return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
}
-#endif /* MLX5HWS_MATCHER_H_ */
+#endif /* HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index e084a5cbf81f..06db5e4726ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
enum mlx5hws_arg_chunk_size
mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
index 27ca93385b08..27ca93385b08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index a8a63e3278be..fed2d913f3b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
-#include "mlx5hws_buddy.h"
+#include "internal.h"
+#include "buddy.h"
static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
index 621298b352b2..621298b352b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
index de92cecbeb92..de92cecbeb92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
index 8a011b958b43..e20c67a04203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
static void hws_rule_skip(struct mlx5hws_matcher *matcher,
struct mlx5hws_match_template *mt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
index 495cdd17e9f3..495cdd17e9f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index 6d443e6ee8d9..424797b6d802 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
#include "lib/clock.h"
enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
index b50825d6dc53..b50825d6dc53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index 8c063a8d87d7..9576e02d00c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
index dd50420eec9e..dd50420eec9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c
index faf42421c43f..d8e382b9fa61 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
-#include "mlx5hws_internal.h"
+#include "internal.h"
int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h
index 0912fc166b3a..0912fc166b3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
index 2ebb61ef3ea9..2ebb61ef3ea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
index 01ed6442095d..01ed6442095d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c
index fe228d948b47..fe228d948b47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
index baefb9a3fa05..baefb9a3fa05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
index 030a5776c937..030a5776c937 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h
index 57c6b363b870..57c6b363b870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c
index d5ea97751945..d5ea97751945 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 3d74109f8230..3d74109f8230 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c
index f05ef0cd54ba..f05ef0cd54ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c
index 0b5af9f3f605..0b5af9f3f605 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c
index 0726848eb3ff..0726848eb3ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c
index 8ca534ef5d03..8ca534ef5d03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c
index d1db04baa1fa..d1db04baa1fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 6fa06ba2d346..6fa06ba2d346 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
index e94fbb015efa..e94fbb015efa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 54a6619c3ecb..54a6619c3ecb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
index e9f6c7ed7a7b..e9f6c7ed7a7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 1d49704b9542..1d49704b9542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
index e2fc69867088..e2fc69867088 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
index 808b013cf48c..808b013cf48c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c
index 69294a66fd7f..69294a66fd7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
index 7618c6147f86..7618c6147f86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 833cb68c744f..4b349d4005e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -256,6 +256,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
{
struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
struct mlx5dr_action_dest *term_actions;
+ struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5dr_match_parameters params;
struct mlx5_core_dev *dev = ns->dev;
struct mlx5dr_action **fs_dr_actions;
@@ -332,18 +333,19 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
bool is_decap;
- if (fte->act_dests.action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ pkt_reformat = fte->act_dests.action.pkt_reformat;
+ if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
err = -EINVAL;
mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
goto free_actions;
}
- is_decap = fte->act_dests.action.pkt_reformat->reformat_type ==
+ is_decap = pkt_reformat->reformat_type ==
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
if (is_decap)
actions[num_actions++] =
- fte->act_dests.action.pkt_reformat->action.dr_action;
+ pkt_reformat->fs_dr_action.dr_action;
else
delay_encap_set = true;
}
@@ -370,9 +372,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- actions[num_actions++] =
- fte->act_dests.action.modify_hdr->action.dr_action;
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ struct mlx5_modify_hdr *modify_hdr = fte->act_dests.action.modify_hdr;
+
+ actions[num_actions++] = modify_hdr->fs_dr_action.dr_action;
+ }
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]);
@@ -395,8 +399,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
if (delay_encap_set)
- actions[num_actions++] =
- fte->act_dests.action.pkt_reformat->action.dr_action;
+ actions[num_actions++] = pkt_reformat->fs_dr_action.dr_action;
/* The order of the actions below is not important */
@@ -458,9 +461,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
term_actions[num_term_actions].dest = tmp_action;
if (dst->dest_attr.vport.flags &
- MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID) {
+ pkt_reformat = dst->dest_attr.vport.pkt_reformat;
term_actions[num_term_actions].reformat =
- dst->dest_attr.vport.pkt_reformat->action.dr_action;
+ pkt_reformat->fs_dr_action.dr_action;
+ }
num_term_actions++;
break;
@@ -671,7 +676,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
}
pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
- pkt_reformat->action.dr_action = action;
+ pkt_reformat->fs_dr_action.dr_action = action;
return 0;
}
@@ -679,7 +684,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat *pkt_reformat)
{
- mlx5dr_action_destroy(pkt_reformat->action.dr_action);
+ mlx5dr_action_destroy(pkt_reformat->fs_dr_action.dr_action);
}
static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
@@ -702,7 +707,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
}
modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
- modify_hdr->action.dr_action = action;
+ modify_hdr->fs_dr_action.dr_action = action;
return 0;
}
@@ -710,7 +715,7 @@ static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr)
{
- mlx5dr_action_destroy(modify_hdr->action.dr_action);
+ mlx5dr_action_destroy(modify_hdr->fs_dr_action.dr_action);
}
static int
@@ -836,7 +841,7 @@ int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
case MLX5_REFORMAT_TYPE_INSERT_HDR:
- return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->action.dr_action);
+ return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action);
}
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
index 99a3b2eff6b8..99a3b2eff6b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
index fb078fa0f0cc..fb078fa0f0cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h
index ca3b0f1453a7..ca3b0f1453a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 3ac7dc67509f..3ac7dc67509f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e4ef1d24a3ad..6debb8fd33ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -244,7 +244,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
}
static inline
-struct mlx5_cqe64 *mlx5_cqwq_get_cqe_enahnced_comp(struct mlx5_cqwq *wq)
+struct mlx5_cqe64 *mlx5_cqwq_get_cqe_enhanced_comp(struct mlx5_cqwq *wq)
{
u8 sw_validity_iteration_count = mlx5_cqwq_get_wrap_cnt(wq) & 0xff;
u32 ci = mlx5_cqwq_get_ci(wq);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 385a56ac7348..fb2e5b844c15 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -520,7 +520,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
static struct platform_driver mlxbf_gige_driver = {
.probe = mlxbf_gige_probe,
- .remove_new = mlxbf_gige_remove,
+ .remove = mlxbf_gige_remove,
.shutdown = mlxbf_gige_shutdown,
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 947500f8ed71..7aa1a462a103 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -67,7 +67,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
for (j = 0; j < block->instances_count; j++) {
const struct mlxsw_afk_element_info *elinfo;
- struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
elinfo = &mlxsw_afk_element_infos[elinst->element];
@@ -154,7 +154,7 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
for (j = 0; j < block->instances_count; j++) {
- struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
if (elinst->element == element) {
@@ -386,7 +386,7 @@ mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
int i;
for (i = 0; i < block->instances_count; i++) {
- struct mlxsw_afk_element_inst *elinst;
+ const struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[i];
if (elinst->element == element)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 98a05598178b..5aa1afb3f2ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -117,7 +117,7 @@ struct mlxsw_afk_element_inst { /* element instance in actual block */
struct mlxsw_afk_block {
u16 encoding; /* block ID */
- struct mlxsw_afk_element_inst *instances;
+ const struct mlxsw_afk_element_inst *instances;
unsigned int instances_count;
bool high_entropy;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index eaad78605602..6fe185ea6732 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -7,7 +7,7 @@
#include "item.h"
#include "core_acl_flex_keys.h"
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
@@ -15,7 +15,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
@@ -23,27 +23,27 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
@@ -51,35 +51,35 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
};
@@ -124,90 +124,90 @@ const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.clear_block = mlxsw_sp1_afk_clear_block,
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = {
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = {
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3),
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12),
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 8, -1, true), /* RX_ACL_SYSTEM_PORT */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6),
MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2),
MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER, 0x04, 20, 11, 0, true),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_3, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_4_7, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 0, 16),
};
@@ -319,16 +319,16 @@ const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.clear_block = mlxsw_sp2_afk_clear_block,
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 18, 12),
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+static const struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index 85519690b837..831921b9d4d5 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -23,6 +23,7 @@ config FBNIC
depends on !S390
depends on MAX_SKB_FRAGS < 22
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select PAGE_POOL
select PHYLINK
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index ed4533a73c57..239b2258ec65 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -7,10 +7,13 @@
obj-$(CONFIG_FBNIC) += fbnic.o
-fbnic-y := fbnic_devlink.o \
+fbnic-y := fbnic_csr.o \
+ fbnic_debugfs.o \
+ fbnic_devlink.o \
fbnic_ethtool.o \
fbnic_fw.o \
fbnic_hw_stats.o \
+ fbnic_hwmon.o \
fbnic_irq.o \
fbnic_mac.o \
fbnic_netdev.o \
@@ -18,4 +21,5 @@ fbnic-y := fbnic_devlink.o \
fbnic_phylink.o \
fbnic_rpc.o \
fbnic_tlv.o \
- fbnic_txrx.o
+ fbnic_txrx.o \
+ fbnic_time.o
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 0f9e8d79461c..706ae6104c8e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -6,6 +6,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -18,6 +19,8 @@
struct fbnic_dev {
struct device *dev;
struct net_device *netdev;
+ struct dentry *dbg_fbd;
+ struct device *hwmon;
u32 __iomem *uc_addr0;
u32 __iomem *uc_addr4;
@@ -30,6 +33,7 @@ struct fbnic_dev {
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
+ struct fbnic_fw_completion *cmpl_data;
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -45,10 +49,21 @@ struct fbnic_dev {
struct fbnic_act_tcam act_tcam[FBNIC_RPC_TCAM_ACT_NUM_ENTRIES];
struct fbnic_mac_addr mac_addr[FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES];
u8 mac_addr_boundary;
+ u8 tce_tcam_last;
/* Number of TCQs/RCQs available on hardware */
u16 max_num_queues;
+ /* Lock protecting writes to @time_high, @time_offset of fbnic_netdev,
+ * and the HW time CSR machinery.
+ */
+ spinlock_t time_lock;
+ /* Externally accessible PTP clock, may be NULL */
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+ /* Last @time_high refresh time in jiffies (to catch stalls) */
+ unsigned long last_read;
+
/* Local copy of hardware statistics */
struct fbnic_hw_stats hw_stats;
};
@@ -127,6 +142,9 @@ void fbnic_devlink_unregister(struct fbnic_dev *fbd);
int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+void fbnic_hwmon_register(struct fbnic_dev *fbd);
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
+
int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
@@ -139,6 +157,14 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd);
void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
const size_t str_sz);
+void fbnic_dbg_fbd_init(struct fbnic_dev *fbd);
+void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd);
+void fbnic_dbg_init(void);
+void fbnic_dbg_exit(void);
+
+void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
+int fbnic_csr_regs_len(struct fbnic_dev *fbd);
+
enum fbnic_boards {
fbnic_board_asic
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
new file mode 100644
index 000000000000..2118901b25e9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include "fbnic.h"
+
+#define FBNIC_BOUNDS(section) { \
+ .start = FBNIC_CSR_START_##section, \
+ .end = FBNIC_CSR_END_##section + 1, \
+}
+
+struct fbnic_csr_bounds {
+ u32 start;
+ u32 end;
+};
+
+static const struct fbnic_csr_bounds fbnic_csr_sects[] = {
+ FBNIC_BOUNDS(INTR),
+ FBNIC_BOUNDS(INTR_CQ),
+ FBNIC_BOUNDS(QM_TX),
+ FBNIC_BOUNDS(QM_RX),
+ FBNIC_BOUNDS(TCE),
+ FBNIC_BOUNDS(TCE_RAM),
+ FBNIC_BOUNDS(TMI),
+ FBNIC_BOUNDS(PTP),
+ FBNIC_BOUNDS(RXB),
+ FBNIC_BOUNDS(RPC),
+ FBNIC_BOUNDS(FAB),
+ FBNIC_BOUNDS(MASTER),
+ FBNIC_BOUNDS(PCS),
+ FBNIC_BOUNDS(RSFEC),
+ FBNIC_BOUNDS(MAC_MAC),
+ FBNIC_BOUNDS(SIG),
+ FBNIC_BOUNDS(PUL_USER),
+ FBNIC_BOUNDS(QUEUE),
+ FBNIC_BOUNDS(RPC_RAM),
+};
+
+#define FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY 14
+#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
+
+#define FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY 4
+#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+
+#define FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES 8
+
+#define FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY 9
+#define FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES 8
+
+#define FBNIC_RPC_RSS_TBL_DW_PER_ENTRY 2
+#define FBNIC_RPC_RSS_TBL_NUM_ENTRIES 256
+
+static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p)
+{
+ u32 start = FBNIC_CSR_START_RPC_RAM;
+ u32 end = FBNIC_CSR_END_RPC_RAM;
+ u32 *data = *data_p;
+ u32 i, j;
+
+ *(data++) = start;
+ *(data++) = end - 1;
+
+ /* FBNIC_RPC_TCAM_ACT */
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_ACT(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_MACDA */
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_MACDA(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_OUTER_IPSRC */
+ for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_OUTER_IPDST */
+ for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_IPSRC */
+ for (i = 0; i < FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPSRC(i, j));
+ }
+
+ /* FBNIC_RPC_TCAM_IPDST */
+ for (i = 0; i < FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPDST(i, j));
+ }
+
+ /* FBNIC_RPC_RSS_TBL */
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_NUM_ENTRIES; i++) {
+ for (j = 0; j < FBNIC_RPC_RSS_TBL_DW_PER_ENTRY; j++)
+ *(data++) = rd32(fbd, FBNIC_RPC_RSS_TBL(i, j));
+ }
+
+ *data_p = data;
+}
+
+void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version)
+{
+ const struct fbnic_csr_bounds *bound;
+ u32 *start = data;
+ int i, j;
+
+ *regs_version = 1u;
+
+ /* Skip RPC_RAM section which cannot be dumped linearly */
+ for (i = 0, bound = fbnic_csr_sects;
+ i < ARRAY_SIZE(fbnic_csr_sects) - 1; i++, ++bound) {
+ *(data++) = bound->start;
+ *(data++) = bound->end - 1;
+ for (j = bound->start; j < bound->end; j++)
+ *(data++) = rd32(fbd, j);
+ }
+
+ /* Dump the RPC_RAM as special case registers */
+ fbnic_csr_get_regs_rpc_ram(fbd, &data);
+
+ WARN_ON(data - start != fbnic_csr_regs_len(fbd));
+}
+
+int fbnic_csr_regs_len(struct fbnic_dev *fbd)
+{
+ int i, len = 0;
+
+ /* Dump includes start and end information of each section
+ * which results in an offset of 2
+ */
+ for (i = 0; i < ARRAY_SIZE(fbnic_csr_sects); i++)
+ len += fbnic_csr_sects[i].end - fbnic_csr_sects[i].start + 2;
+
+ return len;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 21db509acbc1..02bb81b3c506 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -397,6 +397,14 @@ enum {
#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1)
#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2)
+#define FBNIC_TCE_TCAM_IDX2DEST_MAP 0x0404A /* 0x10128 */
+#define FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 CSR_GENMASK(3, 0)
+enum {
+ FBNIC_TCE_TCAM_DEST_MAC = 1,
+ FBNIC_TCE_TCAM_DEST_BMC = 2,
+ FBNIC_TCE_TCAM_DEST_FW = 4,
+};
+
#define FBNIC_TCE_TXB_TX_BMC_Q_CTRL 0x0404B /* 0x1012c */
#define FBNIC_TCE_TXB_BMC_DWRR_CTRL 0x0404C /* 0x10130 */
#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
@@ -407,12 +415,62 @@ enum {
#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT 0x0404F /* 0x1013c */
#define FBNIC_CSR_END_TCE 0x04050 /* CSR section delimiter */
+/* TCE RAM registers */
+#define FBNIC_CSR_START_TCE_RAM 0x04200 /* CSR section delimiter */
+#define FBNIC_TCE_RAM_TCAM(m, n) \
+ (0x04200 + 0x8 * (n) + (m)) /* 0x10800 + 32*n + 4*m */
+#define FBNIC_TCE_RAM_TCAM_MASK CSR_GENMASK(15, 0)
+#define FBNIC_TCE_RAM_TCAM_VALUE CSR_GENMASK(31, 16)
+#define FBNIC_TCE_RAM_TCAM3(n) (0x04218 + (n)) /* 0x010860 + 4*n */
+#define FBNIC_TCE_RAM_TCAM3_DEST_MASK CSR_GENMASK(5, 3)
+#define FBNIC_TCE_RAM_TCAM3_MCQ_MASK CSR_BIT(7)
+#define FBNIC_TCE_RAM_TCAM3_VALIDATE CSR_BIT(31)
+#define FBNIC_CSR_END_TCE_RAM 0x0421F /* CSR section delimiter */
+
/* TMI registers */
#define FBNIC_CSR_START_TMI 0x04400 /* CSR section delimiter */
#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */
#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */
#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0)
#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
+
+/* Precision Time Protocol Registers */
+#define FBNIC_CSR_START_PTP 0x04800 /* CSR section delimiter */
+#define FBNIC_PTP_REG_BASE 0x04800 /* 0x12000 */
+
+#define FBNIC_PTP_CTRL 0x04800 /* 0x12000 */
+#define FBNIC_PTP_CTRL_EN CSR_BIT(0)
+#define FBNIC_PTP_CTRL_MONO_EN CSR_BIT(4)
+#define FBNIC_PTP_CTRL_TQS_OUT_EN CSR_BIT(8)
+#define FBNIC_PTP_CTRL_MAC_OUT_IVAL CSR_GENMASK(16, 12)
+#define FBNIC_PTP_CTRL_TICK_IVAL CSR_GENMASK(23, 20)
+
+#define FBNIC_PTP_ADJUST 0x04801 /* 0x12004 */
+#define FBNIC_PTP_ADJUST_INIT CSR_BIT(0)
+#define FBNIC_PTP_ADJUST_SUB_NUDGE CSR_BIT(8)
+#define FBNIC_PTP_ADJUST_ADD_NUDGE CSR_BIT(16)
+#define FBNIC_PTP_ADJUST_ADDEND_SET CSR_BIT(24)
+
+#define FBNIC_PTP_INIT_HI 0x04802 /* 0x12008 */
+#define FBNIC_PTP_INIT_LO 0x04803 /* 0x1200c */
+
+#define FBNIC_PTP_NUDGE_NS 0x04804 /* 0x12010 */
+#define FBNIC_PTP_NUDGE_SUBNS 0x04805 /* 0x12014 */
+
+#define FBNIC_PTP_ADD_VAL_NS 0x04806 /* 0x12018 */
+#define FBNIC_PTP_ADD_VAL_NS_MASK CSR_GENMASK(15, 0)
+#define FBNIC_PTP_ADD_VAL_SUBNS 0x04807 /* 0x1201c */
+
+#define FBNIC_PTP_CTR_VAL_HI 0x04808 /* 0x12020 */
+#define FBNIC_PTP_CTR_VAL_LO 0x04809 /* 0x12024 */
+
+#define FBNIC_PTP_MONO_PTP_CTR_HI 0x0480a /* 0x12028 */
+#define FBNIC_PTP_MONO_PTP_CTR_LO 0x0480b /* 0x1202c */
+
+#define FBNIC_PTP_CDC_FIFO_STATUS 0x0480c /* 0x12030 */
+#define FBNIC_PTP_SPARE 0x0480d /* 0x12034 */
+#define FBNIC_CSR_END_PTP 0x0480d /* CSR section delimiter */
+
/* Rx Buffer Registers */
#define FBNIC_CSR_START_RXB 0x08000 /* CSR section delimiter */
enum {
@@ -548,6 +606,7 @@ enum {
};
#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
+#define FBNIC_RPC_ACT_TBL0_TS_ENA CSR_BIT(28)
#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
@@ -579,6 +638,16 @@ enum {
FBNIC_RPC_RSS_KEY_DWORD_LEN * 32 - \
FBNIC_RPC_RSS_KEY_BIT_LEN)
+#define FBNIC_RPC_CNTR_TCP_OPT_ERR 0x0849e /* 0x21278 */
+#define FBNIC_RPC_CNTR_UNKN_ETYPE 0x0849f /* 0x2127c */
+#define FBNIC_RPC_CNTR_IPV4_FRAG 0x084a0 /* 0x21280 */
+#define FBNIC_RPC_CNTR_IPV6_FRAG 0x084a1 /* 0x21284 */
+#define FBNIC_RPC_CNTR_IPV4_ESP 0x084a2 /* 0x21288 */
+#define FBNIC_RPC_CNTR_IPV6_ESP 0x084a3 /* 0x2128c */
+#define FBNIC_RPC_CNTR_UNKN_EXT_HDR 0x084a4 /* 0x21290 */
+#define FBNIC_RPC_CNTR_OUT_OF_HDR_ERR 0x084a5 /* 0x21294 */
+#define FBNIC_RPC_CNTR_OVR_SIZE_ERR 0x084a6 /* 0x21298 */
+
#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */
#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */
@@ -606,6 +675,15 @@ enum {
#define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0)
#define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16)
+#define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\
+ (0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\
+ (0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IPSRC(m, n)\
+ (0x08c90 + 0x08 * (n) + (m)) /* 0x023240 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IPDST(m, n)\
+ (0x08cd8 + 0x08 * (n) + (m)) /* 0x023360 + 32*n + 4*m */
+
#define FBNIC_RPC_RSS_TBL(n, m) \
(0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */
#define FBNIC_RPC_RSS_TBL_COUNT 2
@@ -624,6 +702,13 @@ enum {
#define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */
#define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */
+/* MAC PCS registers */
+#define FBNIC_CSR_START_PCS 0x10000 /* CSR section delimiter */
+#define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */
+
+#define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */
+#define FBNIC_CSR_END_RSFEC 0x108c8 /* CSR section delimiter */
+
/* MAC MAC registers (ASIC only) */
#define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */
#define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */
@@ -843,6 +928,43 @@ enum {
#define FBNIC_MAX_QUEUES 128
#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1)
+/* PUL User Registers*/
+#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
+ 0x3106e /* 0xc41b8 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
+ 0x31070 /* 0xc41c0 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
+ 0x31071 /* 0xc41c4 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
+ 0x31072 /* 0xc41c8 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
+ 0x31073 /* 0xc41cc */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
+ 0x31074 /* 0xc41d0 */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
+ 0x31075 /* 0xc41d4 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
+ 0x31076 /* 0xc41d8 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
+ 0x31077 /* 0xc41dc */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
+ 0x31078 /* 0xc41e0 */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
+ 0x31079 /* 0xc41e4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
+ 0x3107a /* 0xc41e8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
+ 0x3107b /* 0xc41ec */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
+ 0x3107c /* 0xc41f0 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
+ 0x3107d /* 0xc41f4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
+ 0x3107e /* 0xc41f8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
+ 0x3107f /* 0xc41fc */
+#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+
/* BAR 4 CSRs */
/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
new file mode 100644
index 000000000000..59951b5abdb7
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/seq_file.h>
+
+#include "fbnic.h"
+
+static struct dentry *fbnic_dbg_root;
+
+static int fbnic_dbg_pcie_stats_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ rtnl_lock();
+ fbnic_get_hw_stats(fbd);
+
+ seq_printf(s, "ob_rd_tlp: %llu\n", fbd->hw_stats.pcie.ob_rd_tlp.value);
+ seq_printf(s, "ob_rd_dword: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_dword.value);
+ seq_printf(s, "ob_wr_tlp: %llu\n", fbd->hw_stats.pcie.ob_wr_tlp.value);
+ seq_printf(s, "ob_wr_dword: %llu\n",
+ fbd->hw_stats.pcie.ob_wr_dword.value);
+ seq_printf(s, "ob_cpl_tlp: %llu\n",
+ fbd->hw_stats.pcie.ob_cpl_tlp.value);
+ seq_printf(s, "ob_cpl_dword: %llu\n",
+ fbd->hw_stats.pcie.ob_cpl_dword.value);
+ seq_printf(s, "ob_rd_no_tag: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_no_tag.value);
+ seq_printf(s, "ob_rd_no_cpl_cred: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_no_cpl_cred.value);
+ seq_printf(s, "ob_rd_no_np_cred: %llu\n",
+ fbd->hw_stats.pcie.ob_rd_no_np_cred.value);
+ rtnl_unlock();
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_pcie_stats);
+
+void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ const char *name = pci_name(pdev);
+
+ fbd->dbg_fbd = debugfs_create_dir(name, fbnic_dbg_root);
+ debugfs_create_file("pcie_stats", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_pcie_stats_fops);
+}
+
+void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd)
+{
+ debugfs_remove_recursive(fbd->dbg_fbd);
+ fbd->dbg_fbd = NULL;
+}
+
+void fbnic_dbg_init(void)
+{
+ fbnic_dbg_root = debugfs_create_dir(fbnic_driver_name, NULL);
+}
+
+void fbnic_dbg_exit(void)
+{
+ debugfs_remove_recursive(fbnic_dbg_root);
+ fbnic_dbg_root = NULL;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 5d980e178941..cc8ca94529ca 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -1,3 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
@@ -6,6 +9,66 @@
#include "fbnic_netdev.h"
#include "fbnic_tlv.h"
+struct fbnic_stat {
+ u8 string[ETH_GSTRING_LEN];
+ unsigned int size;
+ unsigned int offset;
+};
+
+#define FBNIC_STAT_FIELDS(type, name, stat) { \
+ .string = name, \
+ .size = sizeof_field(struct type, stat), \
+ .offset = offsetof(struct type, stat), \
+}
+
+/* Hardware statistics not captured in rtnl_link_stats */
+#define FBNIC_HW_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
+ /* RPC */
+ FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
+ FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
+ FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
+ FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
+ FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
+ FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
+ FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
+ FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
+};
+
+#define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
+#define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN
+
+static int
+fbnic_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *tsinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+
+ tsinfo->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ tsinfo->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ tsinfo->rx_filters =
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static void
fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
@@ -22,6 +85,43 @@ static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
*stat = counter->value;
}
+static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+ int i;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < FBNIC_HW_STATS_LEN; i++)
+ ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
+ break;
+ }
+}
+
+static int fbnic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return FBNIC_HW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void fbnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_stat *stat;
+ int i;
+
+ fbnic_get_hw_stats(fbn->fbd);
+
+ for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
+ stat = &fbnic_gstrings_hw_stats[i];
+ data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+ }
+}
+
static void
fbnic_get_eth_mac_stats(struct net_device *netdev,
struct ethtool_eth_mac_stats *eth_mac_stats)
@@ -64,8 +164,53 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
&mac_stats->eth_mac.FrameTooLongErrors);
}
+static void fbnic_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u64 ts_packets, ts_lost;
+ struct fbnic_ring *ring;
+ unsigned int start;
+ int i;
+
+ ts_stats->pkts = fbn->tx_stats.ts_packets;
+ ts_stats->lost = fbn->tx_stats.ts_lost;
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ ring = fbn->tx[i];
+ do {
+ start = u64_stats_fetch_begin(&ring->stats.syncp);
+ ts_packets = ring->stats.ts_packets;
+ ts_lost = ring->stats.ts_lost;
+ } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
+ ts_stats->pkts += ts_packets;
+ ts_stats->lost += ts_lost;
+ }
+}
+
+static void fbnic_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
+}
+
+static int fbnic_get_regs_len(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
+}
+
static const struct ethtool_ops fbnic_ethtool_ops = {
.get_drvinfo = fbnic_get_drvinfo,
+ .get_regs_len = fbnic_get_regs_len,
+ .get_regs = fbnic_get_regs,
+ .get_strings = fbnic_get_strings,
+ .get_ethtool_stats = fbnic_get_ethtool_stats,
+ .get_sset_count = fbnic_get_sset_count,
+ .get_ts_info = fbnic_get_ts_info,
+ .get_ts_stats = fbnic_get_ts_stats,
.get_eth_mac_stats = fbnic_get_eth_mac_stats,
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 221faf8c6756..7cd8841920e4 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -44,6 +44,13 @@ struct fbnic_fw_cap {
u8 link_fec;
};
+struct fbnic_fw_completion {
+ struct {
+ s32 millivolts;
+ s32 millidegrees;
+ } tsene;
+};
+
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
index a0acc7606aa1..89ac6bc8c7fc 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -1,5 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
#include "fbnic.h"
+static void fbnic_hw_stat_rst32(struct fbnic_dev *fbd, u32 reg,
+ struct fbnic_stat_counter *stat)
+{
+ /* We do not touch the "value" field here.
+ * It gets zeroed out on fbd structure allocation.
+ * After that we want it to grow continuously
+ * through device resets and power state changes.
+ */
+ stat->u.old_reg_value_32 = rd32(fbd, reg);
+}
+
+static void fbnic_hw_stat_rd32(struct fbnic_dev *fbd, u32 reg,
+ struct fbnic_stat_counter *stat)
+{
+ u32 new_reg_value;
+
+ new_reg_value = rd32(fbd, reg);
+ stat->value += new_reg_value - stat->u.old_reg_value_32;
+ stat->u.old_reg_value_32 = new_reg_value;
+}
+
u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset)
{
u32 prev_upper, upper, lower, diff;
@@ -25,3 +49,172 @@ u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset)
*/
return ((u64)upper << 32);
}
+
+static void fbnic_hw_stat_rst64(struct fbnic_dev *fbd, u32 reg, s32 offset,
+ struct fbnic_stat_counter *stat)
+{
+ /* Record initial counter values and compute deltas from there to ensure
+ * stats start at 0 after reboot/reset. This avoids exposing absolute
+ * hardware counter values to userspace.
+ */
+ stat->u.old_reg_value_64 = fbnic_stat_rd64(fbd, reg, offset);
+}
+
+static void fbnic_hw_stat_rd64(struct fbnic_dev *fbd, u32 reg, s32 offset,
+ struct fbnic_stat_counter *stat)
+{
+ u64 new_reg_value;
+
+ new_reg_value = fbnic_stat_rd64(fbd, reg, offset);
+ stat->value += new_reg_value - stat->u.old_reg_value_64;
+ stat->u.old_reg_value_64 = new_reg_value;
+}
+
+static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd,
+ struct fbnic_rpc_stats *rpc)
+{
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_UNKN_ETYPE,
+ &rpc->unkn_etype);
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_UNKN_EXT_HDR,
+ &rpc->unkn_ext_hdr);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV4_FRAG, &rpc->ipv4_frag);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV6_FRAG, &rpc->ipv6_frag);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV4_ESP, &rpc->ipv4_esp);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_IPV6_ESP, &rpc->ipv6_esp);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RPC_CNTR_TCP_OPT_ERR, &rpc->tcp_opt_err);
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_OUT_OF_HDR_ERR,
+ &rpc->out_of_hdr_err);
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_RPC_CNTR_OVR_SIZE_ERR,
+ &rpc->ovr_size_err);
+}
+
+static void fbnic_get_rpc_stats32(struct fbnic_dev *fbd,
+ struct fbnic_rpc_stats *rpc)
+{
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_UNKN_ETYPE,
+ &rpc->unkn_etype);
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_UNKN_EXT_HDR,
+ &rpc->unkn_ext_hdr);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV4_FRAG, &rpc->ipv4_frag);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV6_FRAG, &rpc->ipv6_frag);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV4_ESP, &rpc->ipv4_esp);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_IPV6_ESP, &rpc->ipv6_esp);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RPC_CNTR_TCP_OPT_ERR, &rpc->tcp_opt_err);
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_OUT_OF_HDR_ERR,
+ &rpc->out_of_hdr_err);
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_RPC_CNTR_OVR_SIZE_ERR,
+ &rpc->ovr_size_err);
+}
+
+static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd,
+ struct fbnic_pcie_stats *pcie)
+{
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_rd_tlp);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_rd_dword);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_tlp);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_dword);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_wr_tlp);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_wr_dword);
+
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0,
+ 1,
+ &pcie->ob_rd_no_tag);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_cpl_cred);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_np_cred);
+}
+
+static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd,
+ struct fbnic_pcie_stats *pcie)
+{
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_rd_tlp);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_rd_dword);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_wr_tlp);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_wr_dword);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_tlp);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0,
+ 1,
+ &pcie->ob_cpl_dword);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0,
+ 1,
+ &pcie->ob_rd_no_tag);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_cpl_cred);
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0,
+ 1,
+ &pcie->ob_rd_no_np_cred);
+}
+
+void fbnic_reset_hw_stats(struct fbnic_dev *fbd)
+{
+ fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc);
+ fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie);
+}
+
+void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+{
+ fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc);
+}
+
+void fbnic_get_hw_stats(struct fbnic_dev *fbd)
+{
+ fbnic_get_hw_stats32(fbd);
+
+ fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
index 30348904b510..78df56b87745 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -1,3 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_HW_STATS_H_
+#define _FBNIC_HW_STATS_H_
+
#include <linux/ethtool.h>
#include "fbnic_csr.h"
@@ -31,10 +37,32 @@ struct fbnic_mac_stats {
struct fbnic_eth_mac_stats eth_mac;
};
+struct fbnic_rpc_stats {
+ struct fbnic_stat_counter unkn_etype, unkn_ext_hdr;
+ struct fbnic_stat_counter ipv4_frag, ipv6_frag, ipv4_esp, ipv6_esp;
+ struct fbnic_stat_counter tcp_opt_err, out_of_hdr_err, ovr_size_err;
+};
+
+struct fbnic_pcie_stats {
+ struct fbnic_stat_counter ob_rd_tlp, ob_rd_dword;
+ struct fbnic_stat_counter ob_wr_tlp, ob_wr_dword;
+ struct fbnic_stat_counter ob_cpl_tlp, ob_cpl_dword;
+
+ struct fbnic_stat_counter ob_rd_no_tag;
+ struct fbnic_stat_counter ob_rd_no_cpl_cred;
+ struct fbnic_stat_counter ob_rd_no_np_cred;
+};
+
struct fbnic_hw_stats {
struct fbnic_mac_stats mac;
+ struct fbnic_rpc_stats rpc;
+ struct fbnic_pcie_stats pcie;
};
u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
+void fbnic_reset_hw_stats(struct fbnic_dev *fbd);
+void fbnic_get_hw_stats32(struct fbnic_dev *fbd);
void fbnic_get_hw_stats(struct fbnic_dev *fbd);
+
+#endif /* _FBNIC_HW_STATS_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
new file mode 100644
index 000000000000..bcd1086e3768
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/hwmon.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+
+static int fbnic_hwmon_sensor_id(enum hwmon_sensor_types type)
+{
+ if (type == hwmon_temp)
+ return FBNIC_SENSOR_TEMP;
+ if (type == hwmon_in)
+ return FBNIC_SENSOR_VOLTAGE;
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t fbnic_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_temp && attr == hwmon_temp_input)
+ return 0444;
+ if (type == hwmon_in && attr == hwmon_in_input)
+ return 0444;
+
+ return 0;
+}
+
+static int fbnic_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ const struct fbnic_mac *mac = fbd->mac;
+ int id;
+
+ id = fbnic_hwmon_sensor_id(type);
+ return id < 0 ? id : mac->get_sensor(fbd, id, val);
+}
+
+static const struct hwmon_ops fbnic_hwmon_ops = {
+ .is_visible = fbnic_hwmon_is_visible,
+ .read = fbnic_hwmon_read,
+};
+
+static const struct hwmon_channel_info *fbnic_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info fbnic_chip_info = {
+ .ops = &fbnic_hwmon_ops,
+ .info = fbnic_hwmon_info,
+};
+
+void fbnic_hwmon_register(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return;
+
+ fbd->hwmon = hwmon_device_register_with_info(fbd->dev, "fbnic",
+ fbd, &fbnic_chip_info,
+ NULL);
+ if (IS_ERR(fbd->hwmon)) {
+ dev_notice(fbd->dev,
+ "Failed to register hwmon device %pe\n",
+ fbd->hwmon);
+ fbd->hwmon = NULL;
+ }
+}
+
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON) || !fbd->hwmon)
+ return;
+
+ hwmon_device_unregister(fbd->hwmon);
+ fbd->hwmon = NULL;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 7b654d0a6dac..80b82ff12c4d 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -686,6 +686,27 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
+static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, long *val)
+{
+ struct fbnic_fw_completion fw_cmpl;
+ s32 *sensor;
+
+ switch (id) {
+ case FBNIC_SENSOR_TEMP:
+ sensor = &fw_cmpl.tsene.millidegrees;
+ break;
+ case FBNIC_SENSOR_VOLTAGE:
+ sensor = &fw_cmpl.tsene.millivolts;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = *sensor;
+
+ return 0;
+}
+
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
.pcs_enable = fbnic_pcs_enable_asic,
@@ -695,6 +716,7 @@ static const struct fbnic_mac fbnic_mac_asic = {
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
+ .get_sensor = fbnic_mac_get_sensor_asic,
};
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 476239a9d381..05a591653e09 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -47,6 +47,11 @@ enum {
#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+enum fbnic_sensor_id {
+ FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
+ FBNIC_SENSOR_VOLTAGE, /* Voltage in millivolts */
+};
+
/* This structure defines the interface hooks for the MAC. The MAC hooks
* will be configured as a const struct provided with a set of function
* pointers.
@@ -83,6 +88,8 @@ struct fbnic_mac {
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+
+ int (*get_sensor)(struct fbnic_dev *fbd, int id, long *val);
};
int fbnic_mac_init(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index a400616a24d4..fc7d80db5fa6 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -42,18 +42,24 @@ int __fbnic_open(struct fbnic_net *fbn)
goto free_resources;
}
- err = fbnic_fw_init_heartbeat(fbd, false);
+ err = fbnic_time_start(fbn);
if (err)
goto release_ownership;
+ err = fbnic_fw_init_heartbeat(fbd, false);
+ if (err)
+ goto time_stop;
+
err = fbnic_pcs_irq_enable(fbd);
if (err)
- goto release_ownership;
+ goto time_stop;
/* Pull the BMC config and initialize the RPC */
fbnic_bmc_rpc_init(fbd);
fbnic_rss_reinit(fbd, fbn);
return 0;
+time_stop:
+ fbnic_time_stop(fbn);
release_ownership:
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
free_resources:
@@ -82,6 +88,7 @@ static int fbnic_stop(struct net_device *netdev)
fbnic_down(fbn);
fbnic_pcs_irq_disable(fbn->fbd);
+ fbnic_time_stop(fbn);
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
fbnic_free_resources(fbn);
@@ -266,6 +273,7 @@ void __fbnic_set_rx_mode(struct net_device *netdev)
/* Write updates to hardware */
fbnic_write_rules(fbd);
fbnic_write_macda(fbd);
+ fbnic_write_tce_tcam(fbd);
}
static void fbnic_set_rx_mode(struct net_device *netdev)
@@ -317,6 +325,84 @@ void fbnic_clear_rx_mode(struct net_device *netdev)
__dev_mc_unsync(netdev, NULL);
}
+static int fbnic_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ *config = fbn->hwtstamp_config;
+
+ return 0;
+}
+
+static int fbnic_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int old_rx_filter;
+
+ if (config->source != HWTSTAMP_SOURCE_NETDEV)
+ return -EOPNOTSUPP;
+
+ if (!kernel_hwtstamp_config_changed(config, &fbn->hwtstamp_config))
+ return 0;
+
+ /* Upscale the filters */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ break;
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Configure */
+ old_rx_filter = fbn->hwtstamp_config.rx_filter;
+ memcpy(&fbn->hwtstamp_config, config, sizeof(*config));
+
+ if (old_rx_filter != config->rx_filter && netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ /* Save / report back filter configuration
+ * Note that our filter configuration is inexact. Instead of
+ * filtering for a specific UDP port or L2 Ethertype we are
+ * filtering in all UDP or all non-IP packets for timestamping. So
+ * if anything other than FILTER_ALL is requested we report
+ * FILTER_SOME indicating that we will be timestamping a few
+ * additional packets.
+ */
+ if (config->rx_filter > HWTSTAMP_FILTER_ALL)
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
+
+ return 0;
+}
+
static void fbnic_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64)
{
@@ -394,6 +480,8 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_set_mac_address = fbnic_set_mac,
.ndo_set_rx_mode = fbnic_set_rx_mode,
.ndo_get_stats64 = fbnic_get_stats64,
+ .ndo_hwtstamp_get = fbnic_hwtstamp_get,
+ .ndo_hwtstamp_set = fbnic_hwtstamp_set,
};
static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index 6c27da09a612..b8417b300778 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -33,6 +33,15 @@ struct fbnic_net {
u8 fec;
u8 link_mode;
+ /* Cached top bits of the HW time counter for 40b -> 64b conversion */
+ u32 time_high;
+ /* Protect readers of @time_offset, writers take @time_lock. */
+ struct u64_stats_sync time_seq;
+ /* Offset in ns between free running NIC PHC and time set via PTP
+ * clock callbacks
+ */
+ s64 time_offset;
+
u16 num_tx_queues;
u16 num_rx_queues;
@@ -45,6 +54,9 @@ struct fbnic_net {
struct fbnic_queue_stats rx_stats;
u64 link_down_events;
+ /* Time stampinn filter config */
+ struct kernel_hwtstamp_config hwtstamp_config;
+
struct list_head napis;
};
@@ -60,6 +72,12 @@ void fbnic_reset_queues(struct fbnic_net *fbn,
unsigned int tx, unsigned int rx);
void fbnic_set_ethtool_ops(struct net_device *dev);
+int fbnic_ptp_setup(struct fbnic_dev *fbd);
+void fbnic_ptp_destroy(struct fbnic_dev *fbd);
+void fbnic_time_init(struct fbnic_net *fbn);
+int fbnic_time_start(struct fbnic_net *fbn);
+void fbnic_time_stop(struct fbnic_net *fbn);
+
void __fbnic_set_rx_mode(struct net_device *netdev);
void fbnic_clear_rx_mode(struct net_device *netdev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index a4809fe0fc24..32702dc4a066 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -9,6 +9,7 @@
#include "fbnic.h"
#include "fbnic_drvinfo.h"
+#include "fbnic_hw_stats.h"
#include "fbnic_netdev.h"
char fbnic_driver_name[] = DRV_NAME;
@@ -198,6 +199,8 @@ static void fbnic_service_task(struct work_struct *work)
rtnl_lock();
+ fbnic_get_hw_stats32(fbd);
+
fbnic_fw_check_heartbeat(fbd);
fbnic_health_check(fbd);
@@ -288,6 +291,12 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
fbnic_devlink_register(fbd);
+ fbnic_dbg_fbd_init(fbd);
+
+ /* Capture snapshot of hardware stats so netdev can calculate delta */
+ fbnic_reset_hw_stats(fbd);
+
+ fbnic_hwmon_register(fbd);
if (!fbd->dsn) {
dev_warn(&pdev->dev, "Reading serial number failed\n");
@@ -300,14 +309,20 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_failure_mode;
}
+ err = fbnic_ptp_setup(fbd);
+ if (err)
+ goto ifm_free_netdev;
+
err = fbnic_netdev_register(netdev);
if (err) {
dev_err(&pdev->dev, "Netdev registration failed: %d\n", err);
- goto ifm_free_netdev;
+ goto ifm_destroy_ptp;
}
return 0;
+ifm_destroy_ptp:
+ fbnic_ptp_destroy(fbd);
ifm_free_netdev:
fbnic_netdev_free(fbd);
init_failure_mode:
@@ -319,7 +334,6 @@ init_failure_mode:
free_irqs:
fbnic_free_irqs(fbd);
free_fbd:
- pci_disable_device(pdev);
fbnic_devlink_free(fbd);
return err;
@@ -342,14 +356,16 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_netdev_unregister(netdev);
cancel_delayed_work_sync(&fbd->service_task);
+ fbnic_ptp_destroy(fbd);
fbnic_netdev_free(fbd);
}
+ fbnic_hwmon_unregister(fbd);
+ fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
fbnic_fw_disable_mbx(fbd);
fbnic_free_irqs(fbd);
- pci_disable_device(pdev);
fbnic_devlink_free(fbd);
}
@@ -542,9 +558,13 @@ static int __init fbnic_init_module(void)
{
int err;
+ fbnic_dbg_init();
+
err = pci_register_driver(&fbnic_driver);
- if (err)
+ if (err) {
+ fbnic_dbg_exit();
goto out;
+ }
pr_info(DRV_SUMMARY " (%s)", fbnic_driver.name);
out:
@@ -560,5 +580,7 @@ module_init(fbnic_init_module);
static void __exit fbnic_exit_module(void)
{
pci_unregister_driver(&fbnic_driver);
+
+ fbnic_dbg_exit();
}
module_exit(fbnic_exit_module);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index c8aa29fc052b..908c098cd59e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -244,6 +244,12 @@ void fbnic_bmc_rpc_init(struct fbnic_dev *fbd)
((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) | \
((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0))
+#define FBNIC_TSTAMP_MASK(_all, _udp, _ether) \
+ (((_all) ? ((1u << FBNIC_NUM_HASH_OPT) - 1) : 0) | \
+ ((_udp) ? (1u << FBNIC_UDP6_HASH_OPT) | \
+ (1u << FBNIC_UDP4_HASH_OPT) : 0) | \
+ ((_ether) ? (1u << FBNIC_ETHER_HASH_OPT) : 0))
+
void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
{
static const u32 act1_value[FBNIC_NUM_HASH_OPT] = {
@@ -255,6 +261,7 @@ void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
FBNIC_ACT1_INIT(0, 0, 1, 0), /* IP4 */
0 /* Ether */
};
+ u32 tstamp_mask = 0;
unsigned int i;
/* To support scenarios where a BMC is present we must write the
@@ -264,6 +271,28 @@ void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES);
BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT);
+ /* Set timestamp mask with 1b per flow type */
+ if (fbn->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ switch (fbn->hwtstamp_config.rx_filter) {
+ case HWTSTAMP_FILTER_ALL:
+ tstamp_mask = FBNIC_TSTAMP_MASK(1, 1, 1);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 1);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 0);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_mask = FBNIC_TSTAMP_MASK(0, 0, 1);
+ break;
+ default:
+ netdev_warn(fbn->netdev, "Unsupported hwtstamp_rx_filter\n");
+ break;
+ }
+ }
+
/* Program RSS hash enable mask for host in action TCAM/table. */
for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST;
i < FBNIC_RSS_EN_NUM_ENTRIES; i++) {
@@ -287,6 +316,8 @@ void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
if (!dest)
dest = FBNIC_RPC_ACT_TBL0_DROP;
+ else if (tstamp_mask & (1u << flow_type))
+ dest |= FBNIC_RPC_ACT_TBL0_TS_ENA;
if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID)
dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
@@ -556,6 +587,116 @@ static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0);
}
+static void fbnic_clear_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_TCE_TCAM_WORD_LEN; i++)
+ wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i), 0);
+}
+
+static void fbnic_write_tce_tcam_dest(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ u32 dest = FBNIC_TCE_TCAM_DEST_BMC;
+ u32 idx2dest_map;
+
+ if (is_multicast_ether_addr(mac_addr->value.addr8))
+ dest |= FBNIC_TCE_TCAM_DEST_MAC;
+
+ idx2dest_map = rd32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP);
+ idx2dest_map &= ~(FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 << (4 * idx));
+ idx2dest_map |= dest << (4 * idx);
+
+ wr32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP, idx2dest_map);
+}
+
+static void fbnic_write_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &mac_addr->mask.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1];
+ value = &mac_addr->value.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_TCE_TCAM_WORD_LEN; i++)
+ wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i),
+ FIELD_PREP(FBNIC_TCE_RAM_TCAM_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_TCE_RAM_TCAM_VALUE, ntohs(*value--)));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_TCE_RAM_TCAM3(idx), FBNIC_TCE_RAM_TCAM3_MCQ_MASK |
+ FBNIC_TCE_RAM_TCAM3_DEST_MASK |
+ FBNIC_TCE_RAM_TCAM3_VALIDATE);
+}
+
+static void __fbnic_write_tce_tcam_rev(struct fbnic_dev *fbd)
+{
+ int tcam_idx = FBNIC_TCE_TCAM_NUM_ENTRIES;
+ int mac_idx;
+
+ for (mac_idx = ARRAY_SIZE(fbd->mac_addr); mac_idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (!tcam_idx) {
+ dev_err(fbd->dev, "TCE TCAM overflow\n");
+ return;
+ }
+
+ tcam_idx--;
+ fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr);
+ fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr);
+ }
+
+ while (tcam_idx)
+ fbnic_clear_tce_tcam_entry(fbd, --tcam_idx);
+
+ fbd->tce_tcam_last = tcam_idx;
+}
+
+static void __fbnic_write_tce_tcam(struct fbnic_dev *fbd)
+{
+ int tcam_idx = 0;
+ int mac_idx;
+
+ for (mac_idx = 0; mac_idx < ARRAY_SIZE(fbd->mac_addr); mac_idx++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES) {
+ dev_err(fbd->dev, "TCE TCAM overflow\n");
+ return;
+ }
+
+ fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr);
+ fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr);
+ tcam_idx++;
+ }
+
+ while (tcam_idx < FBNIC_TCE_TCAM_NUM_ENTRIES)
+ fbnic_clear_tce_tcam_entry(fbd, tcam_idx++);
+
+ fbd->tce_tcam_last = tcam_idx;
+}
+
+void fbnic_write_tce_tcam(struct fbnic_dev *fbd)
+{
+ if (fbd->tce_tcam_last)
+ __fbnic_write_tce_tcam_rev(fbd);
+ else
+ __fbnic_write_tce_tcam(fbd);
+}
+
void fbnic_clear_rules(struct fbnic_dev *fbd)
{
u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
index d62935f722a2..0d8285fa5b45 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -35,6 +35,9 @@ enum {
#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
+#define FBNIC_TCE_TCAM_WORD_LEN 3
+#define FBNIC_TCE_TCAM_NUM_ENTRIES 8
+
struct fbnic_mac_addr {
union {
unsigned char addr8[ETH_ALEN];
@@ -186,4 +189,5 @@ static inline int __fbnic_mc_unsync(struct fbnic_mac_addr *mac_addr)
void fbnic_clear_rules(struct fbnic_dev *fbd);
void fbnic_write_rules(struct fbnic_dev *fbd);
+void fbnic_write_tce_tcam(struct fbnic_dev *fbd);
#endif /* _FBNIC_RPC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_time.c b/drivers/net/ethernet/meta/fbnic/fbnic_time.c
new file mode 100644
index 000000000000..39d99677b71e
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_time.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/jiffies.h>
+#include <linux/limits.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timer.h>
+
+#include "fbnic.h"
+#include "fbnic_csr.h"
+#include "fbnic_netdev.h"
+
+/* FBNIC timing & PTP implementation
+ * Datapath uses truncated 40b timestamps for scheduling and event reporting.
+ * We need to promote those to full 64b, hence we periodically cache the top
+ * 32bit of the HW time counter. Since this makes our time reporting non-atomic
+ * we leave the HW clock free running and adjust time offsets in SW as needed.
+ * Time offset is 64bit - we need a seq counter for 32bit machines.
+ * Time offset and the cache of top bits are independent so we don't need
+ * a coherent snapshot of both - READ_ONCE()/WRITE_ONCE() + writer side lock
+ * are enough.
+ */
+
+/* Period of refresh of top bits of timestamp, give ourselves a 8x margin.
+ * This should translate to once a minute.
+ * The use of nsecs_to_jiffies() should be safe for a <=40b nsec value.
+ */
+#define FBNIC_TS_HIGH_REFRESH_JIF nsecs_to_jiffies((1ULL << 40) / 16)
+
+static struct fbnic_dev *fbnic_from_ptp_info(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct fbnic_dev, ptp_info);
+}
+
+/* This function is "slow" because we could try guessing which high part
+ * is correct based on low instead of re-reading, and skip reading @hi
+ * twice altogether if @lo is far enough from 0.
+ */
+static u64 __fbnic_time_get_slow(struct fbnic_dev *fbd)
+{
+ u32 hi, lo;
+
+ lockdep_assert_held(&fbd->time_lock);
+
+ do {
+ hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
+ lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
+ } while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
+
+ return (u64)hi << 32 | lo;
+}
+
+static void __fbnic_time_set_addend(struct fbnic_dev *fbd, u64 addend)
+{
+ lockdep_assert_held(&fbd->time_lock);
+
+ fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_NS,
+ FIELD_PREP(FBNIC_PTP_ADD_VAL_NS_MASK, addend >> 32));
+ fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_SUBNS, (u32)addend);
+}
+
+static void fbnic_ptp_fresh_check(struct fbnic_dev *fbd)
+{
+ if (time_is_after_jiffies(fbd->last_read +
+ FBNIC_TS_HIGH_REFRESH_JIF * 3 / 2))
+ return;
+
+ dev_warn(fbd->dev, "NIC timestamp refresh stall, delayed by %lu sec\n",
+ (jiffies - fbd->last_read - FBNIC_TS_HIGH_REFRESH_JIF) / HZ);
+}
+
+static void fbnic_ptp_refresh_time(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ unsigned long flags;
+ u32 hi;
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+ hi = fbnic_rd32(fbn->fbd, FBNIC_PTP_CTR_VAL_HI);
+ if (!fbnic_present(fbd))
+ goto out; /* Don't bother handling, reset is pending */
+ /* Let's keep high cached value a bit lower to avoid race with
+ * incoming timestamps. The logic in fbnic_ts40_to_ns() will
+ * take care of overflow in this case. It will make cached time
+ * ~1 minute lower and incoming timestamp will always be later
+ * then cached time.
+ */
+ WRITE_ONCE(fbn->time_high, hi - 16);
+ fbd->last_read = jiffies;
+ out:
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+}
+
+static long fbnic_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ struct fbnic_net *fbn;
+
+ fbn = netdev_priv(fbd->netdev);
+
+ fbnic_ptp_fresh_check(fbd);
+ fbnic_ptp_refresh_time(fbd, fbn);
+
+ return FBNIC_TS_HIGH_REFRESH_JIF;
+}
+
+static int fbnic_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ u64 addend, dclk_period;
+ unsigned long flags;
+
+ /* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
+ dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
+ addend = adjust_by_scaled_ppm(dclk_period, scaled_ppm);
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+ __fbnic_time_set_addend(fbd, addend);
+ fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_ADDEND_SET);
+
+ /* Flush, make sure FBNIC_PTP_ADD_VAL_* is stable for at least 4 clks */
+ fbnic_rd32(fbd, FBNIC_PTP_SPARE);
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ return fbnic_present(fbd) ? 0 : -EIO;
+}
+
+static int fbnic_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ struct fbnic_net *fbn;
+ unsigned long flags;
+
+ fbn = netdev_priv(fbd->netdev);
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+ u64_stats_update_begin(&fbn->time_seq);
+ WRITE_ONCE(fbn->time_offset, READ_ONCE(fbn->time_offset) + delta);
+ u64_stats_update_end(&fbn->time_seq);
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ return 0;
+}
+
+static int
+fbnic_ptp_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ struct fbnic_net *fbn;
+ unsigned long flags;
+ u64 time_ns;
+ u32 hi, lo;
+
+ fbn = netdev_priv(fbd->netdev);
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+
+ do {
+ hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
+ ptp_read_system_prets(sts);
+ lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
+ ptp_read_system_postts(sts);
+ /* Similarly to comment above __fbnic_time_get_slow()
+ * - this can be optimized if needed.
+ */
+ } while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
+
+ time_ns = ((u64)hi << 32 | lo) + fbn->time_offset;
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ if (!fbnic_present(fbd))
+ return -EIO;
+
+ *ts = ns_to_timespec64(time_ns);
+
+ return 0;
+}
+
+static int
+fbnic_ptp_settime64(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
+ struct fbnic_net *fbn;
+ unsigned long flags;
+ u64 dev_ns, host_ns;
+ int ret;
+
+ fbn = netdev_priv(fbd->netdev);
+
+ host_ns = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&fbd->time_lock, flags);
+
+ dev_ns = __fbnic_time_get_slow(fbd);
+
+ if (fbnic_present(fbd)) {
+ u64_stats_update_begin(&fbn->time_seq);
+ WRITE_ONCE(fbn->time_offset, host_ns - dev_ns);
+ u64_stats_update_end(&fbn->time_seq);
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ return ret;
+}
+
+static const struct ptp_clock_info fbnic_ptp_info = {
+ .owner = THIS_MODULE,
+ /* 1,000,000,000 - 1 PPB to ensure increment is positive
+ * after max negative adjustment.
+ */
+ .max_adj = 999999999,
+ .do_aux_work = fbnic_ptp_do_aux_work,
+ .adjfine = fbnic_ptp_adjfine,
+ .adjtime = fbnic_ptp_adjtime,
+ .gettimex64 = fbnic_ptp_gettimex64,
+ .settime64 = fbnic_ptp_settime64,
+};
+
+static void fbnic_ptp_reset(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ u64 dclk_period;
+
+ fbnic_wr32(fbd, FBNIC_PTP_CTRL,
+ FBNIC_PTP_CTRL_EN |
+ FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
+
+ /* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
+ dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
+
+ __fbnic_time_set_addend(fbd, dclk_period);
+
+ fbnic_wr32(fbd, FBNIC_PTP_INIT_HI, 0);
+ fbnic_wr32(fbd, FBNIC_PTP_INIT_LO, 0);
+
+ fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_INIT);
+
+ fbnic_wr32(fbd, FBNIC_PTP_CTRL,
+ FBNIC_PTP_CTRL_EN |
+ FBNIC_PTP_CTRL_TQS_OUT_EN |
+ FIELD_PREP(FBNIC_PTP_CTRL_MAC_OUT_IVAL, 3) |
+ FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
+
+ fbnic_rd32(fbd, FBNIC_PTP_SPARE);
+
+ fbn->time_offset = 0;
+ fbn->time_high = 0;
+}
+
+void fbnic_time_init(struct fbnic_net *fbn)
+{
+ /* This is not really a statistic, but the lockng primitive fits
+ * our usecase perfectly, we need an atomic 8 bytes READ_ONCE() /
+ * WRITE_ONCE() behavior.
+ */
+ u64_stats_init(&fbn->time_seq);
+}
+
+int fbnic_time_start(struct fbnic_net *fbn)
+{
+ fbnic_ptp_refresh_time(fbn->fbd, fbn);
+ /* Assume that fbnic_ptp_do_aux_work() will never be called if not
+ * scheduled here
+ */
+ return ptp_schedule_worker(fbn->fbd->ptp, FBNIC_TS_HIGH_REFRESH_JIF);
+}
+
+void fbnic_time_stop(struct fbnic_net *fbn)
+{
+ ptp_cancel_worker_sync(fbn->fbd->ptp);
+ fbnic_ptp_fresh_check(fbn->fbd);
+}
+
+int fbnic_ptp_setup(struct fbnic_dev *fbd)
+{
+ struct device *dev = fbd->dev;
+ unsigned long flags;
+
+ spin_lock_init(&fbd->time_lock);
+
+ spin_lock_irqsave(&fbd->time_lock, flags); /* Appease lockdep */
+ fbnic_ptp_reset(fbd);
+ spin_unlock_irqrestore(&fbd->time_lock, flags);
+
+ memcpy(&fbd->ptp_info, &fbnic_ptp_info, sizeof(fbnic_ptp_info));
+
+ fbd->ptp = ptp_clock_register(&fbd->ptp_info, dev);
+ if (IS_ERR(fbd->ptp))
+ dev_err(dev, "Failed to register PTP: %pe\n", fbd->ptp);
+
+ return PTR_ERR_OR_ZERO(fbd->ptp);
+}
+
+void fbnic_ptp_destroy(struct fbnic_dev *fbd)
+{
+ if (!fbd->ptp)
+ return;
+ ptp_clock_unregister(fbd->ptp);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 6a6d7e22f1a7..b5050fabe8fe 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -12,9 +12,14 @@
#include "fbnic_netdev.h"
#include "fbnic_txrx.h"
+enum {
+ FBNIC_XMIT_CB_TS = 0x01,
+};
+
struct fbnic_xmit_cb {
u32 bytecount;
u8 desc_count;
+ u8 flags;
int hw_head;
};
@@ -43,6 +48,46 @@ static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val)
writel(val, csr_base + csr);
}
+/**
+ * fbnic_ts40_to_ns() - convert descriptor timestamp to PHC time
+ * @fbn: netdev priv of the FB NIC
+ * @ts40: timestamp read from a descriptor
+ *
+ * Return: u64 value of PHC time in nanoseconds
+ *
+ * Convert truncated 40 bit device timestamp as read from a descriptor
+ * to the full PHC time in nanoseconds.
+ */
+static __maybe_unused u64 fbnic_ts40_to_ns(struct fbnic_net *fbn, u64 ts40)
+{
+ unsigned int s;
+ u64 time_ns;
+ s64 offset;
+ u8 ts_top;
+ u32 high;
+
+ do {
+ s = u64_stats_fetch_begin(&fbn->time_seq);
+ offset = READ_ONCE(fbn->time_offset);
+ } while (u64_stats_fetch_retry(&fbn->time_seq, s));
+
+ high = READ_ONCE(fbn->time_high);
+
+ /* Bits 63..40 from periodic clock reads, 39..0 from ts40 */
+ time_ns = (u64)(high >> 8) << 40 | ts40;
+
+ /* Compare bits 32-39 between periodic reads and ts40,
+ * see if HW clock may have wrapped since last read. We are sure
+ * that periodic reads are always at least ~1 minute behind, so
+ * this logic works perfectly fine.
+ */
+ ts_top = ts40 >> 32;
+ if (ts_top < (u8)high && (u8)high - ts_top > U8_MAX / 2)
+ time_ns += 1ULL << 40;
+
+ return time_ns + offset;
+}
+
static unsigned int fbnic_desc_unused(struct fbnic_ring *ring)
{
return (ring->head - ring->tail - 1) & ring->size_mask;
@@ -110,11 +155,32 @@ static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd)
#define FBNIC_TWD_TYPE(_type) \
cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type))
+static bool fbnic_tx_tstamp(struct sk_buff *skb)
+{
+ struct fbnic_net *fbn;
+
+ if (!unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ fbn = netdev_priv(skb->dev);
+ if (fbn->hwtstamp_config.tx_type == HWTSTAMP_TX_OFF)
+ return false;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ FBNIC_XMIT_CB(skb)->flags |= FBNIC_XMIT_CB_TS;
+ FBNIC_XMIT_CB(skb)->hw_head = -1;
+
+ return true;
+}
+
static bool
fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
{
unsigned int l2len, i3len;
+ if (fbnic_tx_tstamp(skb))
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_TS);
+
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
return false;
@@ -205,6 +271,9 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
ring->tail = tail;
+ /* Record SW timestamp */
+ skb_tx_timestamp(skb);
+
/* Verify there is room for another packet */
fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC);
@@ -316,7 +385,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
struct fbnic_ring *ring, bool discard,
unsigned int hw_head)
{
- u64 total_bytes = 0, total_packets = 0;
+ u64 total_bytes = 0, total_packets = 0, ts_lost = 0;
unsigned int head = ring->head;
struct netdev_queue *txq;
unsigned int clean_desc;
@@ -331,6 +400,13 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
if (desc_cnt > clean_desc)
break;
+ if (unlikely(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS)) {
+ FBNIC_XMIT_CB(skb)->hw_head = hw_head;
+ if (likely(!discard))
+ break;
+ ts_lost++;
+ }
+
ring->tx_buf[head] = NULL;
clean_desc -= desc_cnt;
@@ -368,6 +444,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
if (unlikely(discard)) {
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.dropped += total_packets;
+ ring->stats.ts_lost += ts_lost;
u64_stats_update_end(&ring->stats.syncp);
netdev_tx_completed_queue(txq, total_packets, total_bytes);
@@ -384,6 +461,56 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
FBNIC_TX_DESC_WAKEUP);
}
+static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *ring,
+ u64 tcd, int *ts_head, int *head0)
+{
+ struct skb_shared_hwtstamps hwtstamp;
+ struct fbnic_net *fbn;
+ struct sk_buff *skb;
+ int head;
+ u64 ns;
+
+ head = (*ts_head < 0) ? ring->head : *ts_head;
+
+ do {
+ unsigned int desc_cnt;
+
+ if (head == ring->tail) {
+ if (unlikely(net_ratelimit()))
+ netdev_err(nv->napi.dev,
+ "Tx timestamp without matching packet\n");
+ return;
+ }
+
+ skb = ring->tx_buf[head];
+ desc_cnt = FBNIC_XMIT_CB(skb)->desc_count;
+
+ head += desc_cnt;
+ head &= ring->size_mask;
+ } while (!(FBNIC_XMIT_CB(skb)->flags & FBNIC_XMIT_CB_TS));
+
+ fbn = netdev_priv(nv->napi.dev);
+ ns = fbnic_ts40_to_ns(fbn, FIELD_GET(FBNIC_TCD_TYPE1_TS_MASK, tcd));
+
+ memset(&hwtstamp, 0, sizeof(hwtstamp));
+ hwtstamp.hwtstamp = ns_to_ktime(ns);
+
+ *ts_head = head;
+
+ FBNIC_XMIT_CB(skb)->flags &= ~FBNIC_XMIT_CB_TS;
+ if (*head0 < 0) {
+ head = FBNIC_XMIT_CB(skb)->hw_head;
+ if (head >= 0)
+ *head0 = head;
+ }
+
+ skb_tstamp_tx(skb, &hwtstamp);
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.ts_packets++;
+ u64_stats_update_end(&ring->stats.syncp);
+}
+
static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
struct page *page)
{
@@ -417,10 +544,12 @@ static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
}
static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
- struct fbnic_q_triad *qt, s32 head0)
+ struct fbnic_q_triad *qt, s32 ts_head, s32 head0)
{
if (head0 >= 0)
fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
+ else if (ts_head >= 0)
+ fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, ts_head);
}
static void
@@ -428,9 +557,9 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
int napi_budget)
{
struct fbnic_ring *cmpl = &qt->cmpl;
+ s32 head0 = -1, ts_head = -1;
__le64 *raw_tcd, done;
u32 head = cmpl->head;
- s32 head0 = -1;
done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE);
raw_tcd = &cmpl->desc[head & cmpl->size_mask];
@@ -453,6 +582,12 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
* they are skipped for now.
*/
break;
+ case FBNIC_TCD_TYPE_1:
+ if (WARN_ON_ONCE(tcd & FBNIC_TCD_TWQ1))
+ break;
+
+ fbnic_clean_tsq(nv, &qt->sub0, tcd, &ts_head, &head0);
+ break;
default:
break;
}
@@ -472,7 +607,7 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
}
/* Unmap and free processed buffers */
- fbnic_clean_twq(nv, napi_budget, qt, head0);
+ fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0);
}
static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
@@ -707,6 +842,10 @@ static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
/* Set MAC header specific fields */
skb->protocol = eth_type_trans(skb, nv->napi.dev);
+ /* Add timestamp if present */
+ if (pkt->hwtstamp)
+ skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp;
+
return skb;
}
@@ -717,6 +856,23 @@ static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
PKT_HASH_TYPE_L2;
}
+static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_net *fbn;
+ u64 ns, ts;
+
+ if (!FIELD_GET(FBNIC_RCD_OPT_META_TS, rcd))
+ return;
+
+ fbn = netdev_priv(nv->napi.dev);
+ ts = FIELD_GET(FBNIC_RCD_OPT_META_TS_MASK, rcd);
+ ns = fbnic_ts40_to_ns(fbn, ts);
+
+ /* Add timestamp to shared info */
+ pkt->hwtstamp = ns_to_ktime(ns);
+}
+
static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
u64 rcd, struct sk_buff *skb,
struct fbnic_q_triad *qt)
@@ -781,6 +937,8 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd))
break;
+ fbnic_rx_tstamp(nv, rcd, pkt);
+
/* We currently ignore the action table index */
break;
case FBNIC_RCD_TYPE_META:
@@ -907,6 +1065,8 @@ static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
fbn->tx_stats.bytes += stats->bytes;
fbn->tx_stats.packets += stats->packets;
fbn->tx_stats.dropped += stats->dropped;
+ fbn->tx_stats.ts_lost += stats->ts_lost;
+ fbn->tx_stats.ts_packets += stats->ts_packets;
}
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 2f91f68d11d5..8d626287c3f4 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -47,6 +47,7 @@ struct fbnic_net;
struct fbnic_pkt_buff {
struct xdp_buff buff;
+ ktime_t hwtstamp;
u32 data_truesize;
u16 data_len;
u16 nr_frags;
@@ -56,6 +57,8 @@ struct fbnic_queue_stats {
u64 packets;
u64 bytes;
u64 dropped;
+ u64 ts_packets;
+ u64 ts_lost;
struct u64_stats_sync syncp;
};
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index ddd87ef71caf..c7b0b09c2b09 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1247,7 +1247,7 @@ static struct platform_driver ks8842_platform_driver = {
.name = DRV_NAME,
},
.probe = ks8842_probe,
- .remove_new = ks8842_remove,
+ .remove = ks8842_remove,
};
module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 7fa1820db9cc..bb5138806c3f 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -216,22 +216,6 @@ static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
}
/**
- * ks8851_dbg_dumpkkt - dump initial packet contents to debug
- * @ks: The device state
- * @rxpkt: The data for the received packet
- *
- * Dump the initial data from the packet to dev_dbg().
- */
-static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
-{
- netdev_dbg(ks->netdev,
- "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
- rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
- rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
- rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
-}
-
-/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
* @rxq: Queue of packets received in this function.
@@ -296,8 +280,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq)
ks->rdfifo(ks, rxpkt, rxalign + 8);
- if (netif_msg_pktdata(ks))
- ks8851_dbg_dumpkkt(ks, rxpkt);
+ netif_dbg(ks, pktdata, ks->netdev,
+ "pkt %12ph\n", &rxpkt[4]);
skb->protocol = eth_type_trans(skb, ks->netdev);
__skb_queue_tail(rxq, skb);
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 381b9cd285eb..78695be2570b 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -334,7 +334,7 @@ static struct platform_driver ks8851_driver = {
.pm = &ks8851_pm_ops,
},
.probe = ks8851_probe_par,
- .remove_new = ks8851_remove_par,
+ .remove = ks8851_remove_par,
};
module_platform_driver(ks8851_driver);
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index ee046468652c..73832fb2bc32 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -59,6 +59,7 @@ config LAN743X
source "drivers/net/ethernet/microchip/lan865x/Kconfig"
source "drivers/net/ethernet/microchip/lan966x/Kconfig"
+source "drivers/net/ethernet/microchip/lan969x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
source "drivers/net/ethernet/microchip/vcap/Kconfig"
source "drivers/net/ethernet/microchip/fdma/Kconfig"
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 3c65baed9fd8..7770df82200f 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -11,6 +11,7 @@ lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
obj-$(CONFIG_LAN865X) += lan865x/
obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
+obj-$(CONFIG_LAN969X_SWITCH) += lan969x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
obj-$(CONFIG_VCAP) += vcap/
obj-$(CONFIG_FDMA) += fdma/
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 534d4716d5f7..3234a960fcc3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -1285,7 +1285,7 @@ static void lan966x_remove(struct platform_device *pdev)
static struct platform_driver lan966x_driver = {
.probe = lan966x_probe,
- .remove_new = lan966x_remove,
+ .remove = lan966x_remove,
.driver = {
.name = "lan966x-switch",
.of_match_table = lan966x_match,
diff --git a/drivers/net/ethernet/microchip/lan969x/Kconfig b/drivers/net/ethernet/microchip/lan969x/Kconfig
new file mode 100644
index 000000000000..c5c6122ae2ec
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan969x/Kconfig
@@ -0,0 +1,5 @@
+config LAN969X_SWITCH
+ bool "Lan969x switch driver"
+ depends on SPARX5_SWITCH
+ help
+ This driver supports the lan969x family of network switch devices.
diff --git a/drivers/net/ethernet/microchip/lan969x/Makefile b/drivers/net/ethernet/microchip/lan969x/Makefile
new file mode 100644
index 000000000000..316405cbbc71
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan969x/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip lan969x network device drivers.
+#
+
+obj-$(CONFIG_SPARX5_SWITCH) += lan969x-switch.o
+
+lan969x-switch-y := lan969x_regs.o lan969x.o lan969x_calendar.o \
+ lan969x_vcap_ag_api.o lan969x_vcap_impl.o
+
+# Provide include files
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/fdma
+ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x.c b/drivers/net/ethernet/microchip/lan969x/lan969x.c
new file mode 100644
index 000000000000..ac37d0f74ee3
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan969x/lan969x.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+#define LAN969X_SDLB_GRP_CNT 5
+#define LAN969X_HSCH_LEAK_GRP_CNT 4
+
+static const struct sparx5_main_io_resource lan969x_main_iomap[] = {
+ { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */
+ { TARGET_GCB, 0x2010000, 1 }, /* 0xe2010000 */
+ { TARGET_QS, 0x2030000, 1 }, /* 0xe2030000 */
+ { TARGET_PTP, 0x2040000, 1 }, /* 0xe2040000 */
+ { TARGET_ANA_ACL, 0x2050000, 1 }, /* 0xe2050000 */
+ { TARGET_LRN, 0x2060000, 1 }, /* 0xe2060000 */
+ { TARGET_VCAP_SUPER, 0x2080000, 1 }, /* 0xe2080000 */
+ { TARGET_QSYS, 0x20a0000, 1 }, /* 0xe20a0000 */
+ { TARGET_QFWD, 0x20b0000, 1 }, /* 0xe20b0000 */
+ { TARGET_XQS, 0x20c0000, 1 }, /* 0xe20c0000 */
+ { TARGET_VCAP_ES2, 0x20d0000, 1 }, /* 0xe20d0000 */
+ { TARGET_VCAP_ES0, 0x20e0000, 1 }, /* 0xe20e0000 */
+ { TARGET_ANA_AC_POL, 0x2200000, 1 }, /* 0xe2200000 */
+ { TARGET_QRES, 0x2280000, 1 }, /* 0xe2280000 */
+ { TARGET_EACL, 0x22c0000, 1 }, /* 0xe22c0000 */
+ { TARGET_ANA_CL, 0x2400000, 1 }, /* 0xe2400000 */
+ { TARGET_ANA_L3, 0x2480000, 1 }, /* 0xe2480000 */
+ { TARGET_ANA_AC_SDLB, 0x2500000, 1 }, /* 0xe2500000 */
+ { TARGET_HSCH, 0x2580000, 1 }, /* 0xe2580000 */
+ { TARGET_REW, 0x2600000, 1 }, /* 0xe2600000 */
+ { TARGET_ANA_L2, 0x2800000, 1 }, /* 0xe2800000 */
+ { TARGET_ANA_AC, 0x2900000, 1 }, /* 0xe2900000 */
+ { TARGET_VOP, 0x2a00000, 1 }, /* 0xe2a00000 */
+ { TARGET_DEV2G5, 0x3004000, 1 }, /* 0xe3004000 */
+ { TARGET_DEV10G, 0x3008000, 1 }, /* 0xe3008000 */
+ { TARGET_PCS10G_BR, 0x300c000, 1 }, /* 0xe300c000 */
+ { TARGET_DEV2G5 + 1, 0x3010000, 1 }, /* 0xe3010000 */
+ { TARGET_DEV2G5 + 2, 0x3014000, 1 }, /* 0xe3014000 */
+ { TARGET_DEV2G5 + 3, 0x3018000, 1 }, /* 0xe3018000 */
+ { TARGET_DEV2G5 + 4, 0x301c000, 1 }, /* 0xe301c000 */
+ { TARGET_DEV10G + 1, 0x3020000, 1 }, /* 0xe3020000 */
+ { TARGET_PCS10G_BR + 1, 0x3024000, 1 }, /* 0xe3024000 */
+ { TARGET_DEV2G5 + 5, 0x3028000, 1 }, /* 0xe3028000 */
+ { TARGET_DEV2G5 + 6, 0x302c000, 1 }, /* 0xe302c000 */
+ { TARGET_DEV2G5 + 7, 0x3030000, 1 }, /* 0xe3030000 */
+ { TARGET_DEV2G5 + 8, 0x3034000, 1 }, /* 0xe3034000 */
+ { TARGET_DEV10G + 2, 0x3038000, 1 }, /* 0xe3038000 */
+ { TARGET_PCS10G_BR + 2, 0x303c000, 1 }, /* 0xe303c000 */
+ { TARGET_DEV2G5 + 9, 0x3040000, 1 }, /* 0xe3040000 */
+ { TARGET_DEV5G, 0x3044000, 1 }, /* 0xe3044000 */
+ { TARGET_PCS5G_BR, 0x3048000, 1 }, /* 0xe3048000 */
+ { TARGET_DEV2G5 + 10, 0x304c000, 1 }, /* 0xe304c000 */
+ { TARGET_DEV2G5 + 11, 0x3050000, 1 }, /* 0xe3050000 */
+ { TARGET_DEV2G5 + 12, 0x3054000, 1 }, /* 0xe3054000 */
+ { TARGET_DEV10G + 3, 0x3058000, 1 }, /* 0xe3058000 */
+ { TARGET_PCS10G_BR + 3, 0x305c000, 1 }, /* 0xe305c000 */
+ { TARGET_DEV2G5 + 13, 0x3060000, 1 }, /* 0xe3060000 */
+ { TARGET_DEV5G + 1, 0x3064000, 1 }, /* 0xe3064000 */
+ { TARGET_PCS5G_BR + 1, 0x3068000, 1 }, /* 0xe3068000 */
+ { TARGET_DEV2G5 + 14, 0x306c000, 1 }, /* 0xe306c000 */
+ { TARGET_DEV2G5 + 15, 0x3070000, 1 }, /* 0xe3070000 */
+ { TARGET_DEV2G5 + 16, 0x3074000, 1 }, /* 0xe3074000 */
+ { TARGET_DEV10G + 4, 0x3078000, 1 }, /* 0xe3078000 */
+ { TARGET_PCS10G_BR + 4, 0x307c000, 1 }, /* 0xe307c000 */
+ { TARGET_DEV2G5 + 17, 0x3080000, 1 }, /* 0xe3080000 */
+ { TARGET_DEV5G + 2, 0x3084000, 1 }, /* 0xe3084000 */
+ { TARGET_PCS5G_BR + 2, 0x3088000, 1 }, /* 0xe3088000 */
+ { TARGET_DEV2G5 + 18, 0x308c000, 1 }, /* 0xe308c000 */
+ { TARGET_DEV2G5 + 19, 0x3090000, 1 }, /* 0xe3090000 */
+ { TARGET_DEV2G5 + 20, 0x3094000, 1 }, /* 0xe3094000 */
+ { TARGET_DEV10G + 5, 0x3098000, 1 }, /* 0xe3098000 */
+ { TARGET_PCS10G_BR + 5, 0x309c000, 1 }, /* 0xe309c000 */
+ { TARGET_DEV2G5 + 21, 0x30a0000, 1 }, /* 0xe30a0000 */
+ { TARGET_DEV5G + 3, 0x30a4000, 1 }, /* 0xe30a4000 */
+ { TARGET_PCS5G_BR + 3, 0x30a8000, 1 }, /* 0xe30a8000 */
+ { TARGET_DEV2G5 + 22, 0x30ac000, 1 }, /* 0xe30ac000 */
+ { TARGET_DEV2G5 + 23, 0x30b0000, 1 }, /* 0xe30b0000 */
+ { TARGET_DEV2G5 + 24, 0x30b4000, 1 }, /* 0xe30b4000 */
+ { TARGET_DEV10G + 6, 0x30b8000, 1 }, /* 0xe30b8000 */
+ { TARGET_PCS10G_BR + 6, 0x30bc000, 1 }, /* 0xe30bc000 */
+ { TARGET_DEV2G5 + 25, 0x30c0000, 1 }, /* 0xe30c0000 */
+ { TARGET_DEV10G + 7, 0x30c4000, 1 }, /* 0xe30c4000 */
+ { TARGET_PCS10G_BR + 7, 0x30c8000, 1 }, /* 0xe30c8000 */
+ { TARGET_DEV2G5 + 26, 0x30cc000, 1 }, /* 0xe30cc000 */
+ { TARGET_DEV10G + 8, 0x30d0000, 1 }, /* 0xe30d0000 */
+ { TARGET_PCS10G_BR + 8, 0x30d4000, 1 }, /* 0xe30d4000 */
+ { TARGET_DEV2G5 + 27, 0x30d8000, 1 }, /* 0xe30d8000 */
+ { TARGET_DEV10G + 9, 0x30dc000, 1 }, /* 0xe30dc000 */
+ { TARGET_PCS10G_BR + 9, 0x30e0000, 1 }, /* 0xe30e0000 */
+ { TARGET_DSM, 0x30ec000, 1 }, /* 0xe30ec000 */
+ { TARGET_PORT_CONF, 0x30f0000, 1 }, /* 0xe30f0000 */
+ { TARGET_ASM, 0x3200000, 1 }, /* 0xe3200000 */
+};
+
+static struct sparx5_sdlb_group lan969x_sdlb_groups[LAN969X_SDLB_GRP_CNT] = {
+ { 1000000000, 8192 / 2, 64 }, /* 1 G */
+ { 500000000, 8192 / 2, 64 }, /* 500 M */
+ { 100000000, 8192 / 4, 64 }, /* 100 M */
+ { 50000000, 8192 / 4, 64 }, /* 50 M */
+ { 5000000, 8192 / 8, 64 }, /* 10 M */
+};
+
+static u32 lan969x_hsch_max_group_rate[LAN969X_HSCH_LEAK_GRP_CNT] = {
+ 655355, 1048568, 6553550, 10485680
+};
+
+static struct sparx5_sdlb_group *lan969x_get_sdlb_group(int idx)
+{
+ return &lan969x_sdlb_groups[idx];
+}
+
+static u32 lan969x_get_hsch_max_group_rate(int grp)
+{
+ return lan969x_hsch_max_group_rate[grp];
+}
+
+static u32 lan969x_get_dev_mode_bit(struct sparx5 *sparx5, int port)
+{
+ if (lan969x_port_is_2g5(port) || lan969x_port_is_5g(port))
+ return port;
+
+ /* 10G */
+ switch (port) {
+ case 0:
+ return 12;
+ case 4:
+ return 13;
+ case 8:
+ return 14;
+ case 12:
+ return 0;
+ default:
+ return port;
+ }
+}
+
+static u32 lan969x_port_dev_mapping(struct sparx5 *sparx5, int port)
+{
+ if (lan969x_port_is_5g(port)) {
+ switch (port) {
+ case 9:
+ return 0;
+ case 13:
+ return 1;
+ case 17:
+ return 2;
+ case 21:
+ return 3;
+ }
+ }
+
+ if (lan969x_port_is_10g(port)) {
+ switch (port) {
+ case 0:
+ return 0;
+ case 4:
+ return 1;
+ case 8:
+ return 2;
+ case 12:
+ return 3;
+ case 16:
+ return 4;
+ case 20:
+ return 5;
+ case 24:
+ return 6;
+ case 25:
+ return 7;
+ case 26:
+ return 8;
+ case 27:
+ return 9;
+ }
+ }
+
+ /* 2g5 port */
+ return port;
+}
+
+static int lan969x_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 portno = port->portno;
+ u32 inst;
+
+ if (port->conf.portmode == conf->portmode)
+ return 0; /* Nothing to do */
+
+ switch (conf->portmode) {
+ case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
+ inst = (portno - portno % 4) / 4;
+ spx5_rmw(BIT(inst), BIT(inst), sparx5, PORT_CONF_QSGMII_ENA);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static irqreturn_t lan969x_ptp_irq_handler(int irq, void *args)
+{
+ int budget = SPARX5_MAX_PTP_ID;
+ struct sparx5 *sparx5 = args;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sparx5_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 val, id, txport;
+ u32 delay;
+
+ val = spx5_rd(sparx5, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ WARN_ON(val & PTP_TWOSTEP_CTRL_PTP_OVFL);
+
+ if (!(val & PTP_TWOSTEP_CTRL_STAMP_TX))
+ continue;
+
+ /* Retrieve the ts Tx port */
+ txport = PTP_TWOSTEP_CTRL_STAMP_PORT_GET(val);
+
+ /* Retrieve its associated skb */
+ port = sparx5->ports[txport];
+
+ /* Retrieve the delay */
+ delay = spx5_rd(sparx5, PTP_TWOSTEP_STAMP_NSEC);
+ delay = PTP_TWOSTEP_STAMP_NSEC_NS_GET(delay);
+
+ /* Get next timestamp from fifo, which needs to be the
+ * rx timestamp which represents the id of the frame
+ */
+ spx5_rmw(PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, PTP_TWOSTEP_CTRL);
+
+ val = spx5_rd(sparx5, PTP_TWOSTEP_CTRL);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & PTP_TWOSTEP_CTRL_PTP_VLD))
+ break;
+
+ /* Read RX timestamping to get the ID */
+ id = spx5_rd(sparx5, PTP_TWOSTEP_STAMP_NSEC);
+ id <<= 8;
+ id |= spx5_rd(sparx5, PTP_TWOSTEP_STAMP_SUBNS);
+
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (SPARX5_SKB_CB(skb)->ts_id != id)
+ continue;
+
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ /* Next ts */
+ spx5_rmw(PTP_TWOSTEP_CTRL_PTP_NXT_SET(1),
+ PTP_TWOSTEP_CTRL_PTP_NXT,
+ sparx5, PTP_TWOSTEP_CTRL);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ spin_lock(&sparx5->ptp_ts_id_lock);
+ sparx5->ptp_skbs--;
+ spin_unlock(&sparx5->ptp_ts_id_lock);
+
+ /* Get the h/w timestamp */
+ sparx5_get_hwtimestamp(sparx5, &ts, delay);
+
+ /* Set the timestamp in the skb */
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
+
+ dev_kfree_skb_any(skb_match);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct sparx5_regs lan969x_regs = {
+ .tsize = lan969x_tsize,
+ .gaddr = lan969x_gaddr,
+ .gcnt = lan969x_gcnt,
+ .gsize = lan969x_gsize,
+ .raddr = lan969x_raddr,
+ .rcnt = lan969x_rcnt,
+ .fpos = lan969x_fpos,
+ .fsize = lan969x_fsize,
+};
+
+static const struct sparx5_consts lan969x_consts = {
+ .n_ports = 30,
+ .n_ports_all = 35,
+ .n_hsch_l1_elems = 32,
+ .n_hsch_queues = 4,
+ .n_lb_groups = 5,
+ .n_pgids = 1054, /* (1024 + n_ports) */
+ .n_sio_clks = 1,
+ .n_own_upsids = 1,
+ .n_auto_cals = 4,
+ .n_filters = 256,
+ .n_gates = 256,
+ .n_sdlbs = 496,
+ .n_dsm_cal_taxis = 5,
+ .buf_size = 1572864,
+ .qres_max_prio_idx = 315,
+ .qres_max_colour_idx = 323,
+ .tod_pin = 4,
+ .vcaps = lan969x_vcaps,
+ .vcap_stats = &lan969x_vcap_stats,
+ .vcaps_cfg = lan969x_vcap_inst_cfg,
+};
+
+static const struct sparx5_ops lan969x_ops = {
+ .is_port_2g5 = &lan969x_port_is_2g5,
+ .is_port_5g = &lan969x_port_is_5g,
+ .is_port_10g = &lan969x_port_is_10g,
+ .is_port_25g = &lan969x_port_is_25g,
+ .get_port_dev_index = &lan969x_port_dev_mapping,
+ .get_port_dev_bit = &lan969x_get_dev_mode_bit,
+ .get_hsch_max_group_rate = &lan969x_get_hsch_max_group_rate,
+ .get_sdlb_group = &lan969x_get_sdlb_group,
+ .set_port_mux = &lan969x_port_mux_set,
+ .ptp_irq_handler = &lan969x_ptp_irq_handler,
+ .dsm_calendar_calc = &lan969x_dsm_calendar_calc,
+};
+
+const struct sparx5_match_data lan969x_desc = {
+ .iomap = lan969x_main_iomap,
+ .iomap_size = ARRAY_SIZE(lan969x_main_iomap),
+ .ioranges = 2,
+ .regs = &lan969x_regs,
+ .consts = &lan969x_consts,
+ .ops = &lan969x_ops,
+};
+EXPORT_SYMBOL_GPL(lan969x_desc);
+
+MODULE_DESCRIPTION("Microchip lan969x switch driver");
+MODULE_AUTHOR("Daniel Machon <daniel.machon@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x.h b/drivers/net/ethernet/microchip/lan969x/lan969x.h
new file mode 100644
index 000000000000..2489d0d32dfd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan969x/lan969x.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __LAN969X_H__
+#define __LAN969X_H__
+
+#include "../sparx5/sparx5_main.h"
+#include "../sparx5/sparx5_regs.h"
+#include "../sparx5/sparx5_vcap_impl.h"
+
+/* lan969x.c */
+extern const struct sparx5_match_data lan969x_desc;
+
+/* lan969x_vcap_ag_api.c */
+extern const struct vcap_statistics lan969x_vcap_stats;
+extern const struct vcap_info lan969x_vcaps[];
+
+/* lan969x_vcap_impl.c */
+extern const struct sparx5_vcap_inst lan969x_vcap_inst_cfg[];
+
+/* lan969x_regs.c */
+extern const unsigned int lan969x_tsize[TSIZE_LAST];
+extern const unsigned int lan969x_raddr[RADDR_LAST];
+extern const unsigned int lan969x_rcnt[RCNT_LAST];
+extern const unsigned int lan969x_gaddr[GADDR_LAST];
+extern const unsigned int lan969x_gcnt[GCNT_LAST];
+extern const unsigned int lan969x_gsize[GSIZE_LAST];
+extern const unsigned int lan969x_fpos[FPOS_LAST];
+extern const unsigned int lan969x_fsize[FSIZE_LAST];
+
+static inline bool lan969x_port_is_2g5(int portno)
+{
+ return portno == 1 || portno == 2 || portno == 3 ||
+ portno == 5 || portno == 6 || portno == 7 ||
+ portno == 10 || portno == 11 || portno == 14 ||
+ portno == 15 || portno == 18 || portno == 19 ||
+ portno == 22 || portno == 23;
+}
+
+static inline bool lan969x_port_is_5g(int portno)
+{
+ return portno == 9 || portno == 13 || portno == 17 ||
+ portno == 21;
+}
+
+static inline bool lan969x_port_is_10g(int portno)
+{
+ return portno == 0 || portno == 4 || portno == 8 ||
+ portno == 12 || portno == 16 || portno == 20 ||
+ portno == 24 || portno == 25 || portno == 26 ||
+ portno == 27;
+}
+
+static inline bool lan969x_port_is_25g(int portno)
+{
+ return false;
+}
+
+/* lan969x_calendar.c */
+int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data);
+#endif
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c b/drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c
new file mode 100644
index 000000000000..e857640df185
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan969x/lan969x_calendar.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+#define LAN969X_DSM_CAL_DEVS_PER_TAXI 10
+#define LAN969X_DSM_CAL_TAXIS 5
+
+enum lan969x_dsm_cal_dev {
+ DSM_CAL_DEV_2G5,
+ DSM_CAL_DEV_5G,
+ DSM_CAL_DEV_10G,
+ DSM_CAL_DEV_OTHER, /* 1G or less */
+ DSM_CAL_DEV_MAX
+};
+
+/* Each entry in the following struct defines properties for a given speed
+ * (10G, 5G, 2.5G, or 1G or less).
+ */
+struct lan969x_dsm_cal_dev_speed {
+ /* Number of devices that requires this speed. */
+ u32 n_devs;
+
+ /* Array of devices that requires this speed. */
+ u32 devs[LAN969X_DSM_CAL_DEVS_PER_TAXI];
+
+ /* Number of slots required for one device running this speed. */
+ u32 n_slots;
+
+ /* Gap between two slots for one device running this speed. */
+ u32 gap;
+};
+
+static u32
+lan969x_taxi_ports[LAN969X_DSM_CAL_TAXIS][LAN969X_DSM_CAL_DEVS_PER_TAXI] = {
+ { 0, 4, 1, 2, 3, 5, 6, 7, 28, 29 },
+ { 8, 12, 9, 13, 10, 11, 14, 15, 99, 99 },
+ { 16, 20, 17, 21, 18, 19, 22, 23, 99, 99 },
+ { 24, 25, 99, 99, 99, 99, 99, 99, 99, 99 },
+ { 26, 27, 99, 99, 99, 99, 99, 99, 99, 99 }
+};
+
+static int lan969x_dsm_cal_idx_get(u32 *calendar, u32 cal_len, u32 *cal_idx)
+{
+ if (*cal_idx >= cal_len)
+ return -EINVAL;
+
+ do {
+ if (calendar[*cal_idx] == SPX5_DSM_CAL_EMPTY)
+ return 0;
+
+ (*cal_idx)++;
+ } while (*cal_idx < cal_len);
+
+ return -ENOENT;
+}
+
+static enum lan969x_dsm_cal_dev lan969x_dsm_cal_get_dev(int speed)
+{
+ return (speed == 10000 ? DSM_CAL_DEV_10G :
+ speed == 5000 ? DSM_CAL_DEV_5G :
+ speed == 2500 ? DSM_CAL_DEV_2G5 :
+ DSM_CAL_DEV_OTHER);
+}
+
+static int lan969x_dsm_cal_get_speed(enum lan969x_dsm_cal_dev dev)
+{
+ return (dev == DSM_CAL_DEV_10G ? 10000 :
+ dev == DSM_CAL_DEV_5G ? 5000 :
+ dev == DSM_CAL_DEV_2G5 ? 2500 :
+ 1000);
+}
+
+int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data)
+{
+ struct lan969x_dsm_cal_dev_speed dev_speeds[DSM_CAL_DEV_MAX] = {};
+ u32 cal_len, n_slots, taxi_bw, n_devs = 0, required_bw = 0;
+ struct lan969x_dsm_cal_dev_speed *speed;
+ int err;
+
+ /* Maximum bandwidth for this taxi */
+ taxi_bw = (128 * 1000000) / sparx5_clk_period(sparx5->coreclock);
+
+ memcpy(data->taxi_ports, &lan969x_taxi_ports[taxi],
+ LAN969X_DSM_CAL_DEVS_PER_TAXI * sizeof(u32));
+
+ for (int i = 0; i < LAN969X_DSM_CAL_DEVS_PER_TAXI; i++) {
+ u32 portno = data->taxi_ports[i];
+ enum sparx5_cal_bw bw;
+
+ bw = sparx5_get_port_cal_speed(sparx5, portno);
+
+ if (portno < sparx5->data->consts->n_ports_all)
+ data->taxi_speeds[i] = sparx5_cal_speed_to_value(bw);
+ else
+ data->taxi_speeds[i] = 0;
+ }
+
+ /* Determine the different port types (10G, 5G, 2.5G, <= 1G) in the
+ * this taxi map.
+ */
+ for (int i = 0; i < LAN969X_DSM_CAL_DEVS_PER_TAXI; i++) {
+ u32 taxi_speed = data->taxi_speeds[i];
+ enum lan969x_dsm_cal_dev dev;
+
+ if (taxi_speed == 0)
+ continue;
+
+ required_bw += taxi_speed;
+
+ dev = lan969x_dsm_cal_get_dev(taxi_speed);
+ speed = &dev_speeds[dev];
+ speed->devs[speed->n_devs++] = i;
+ n_devs++;
+ }
+
+ if (required_bw > taxi_bw) {
+ pr_err("Required bandwidth: %u is higher than total taxi bandwidth: %u",
+ required_bw, taxi_bw);
+ return -EINVAL;
+ }
+
+ if (n_devs == 0) {
+ data->schedule[0] = SPX5_DSM_CAL_EMPTY;
+ return 0;
+ }
+
+ cal_len = n_devs;
+
+ /* Search for a calendar length that fits all active devices. */
+ while (cal_len < SPX5_DSM_CAL_LEN) {
+ u32 bw_per_slot = taxi_bw / cal_len;
+
+ n_slots = 0;
+
+ for (int i = 0; i < DSM_CAL_DEV_MAX; i++) {
+ speed = &dev_speeds[i];
+
+ if (speed->n_devs == 0)
+ continue;
+
+ required_bw = lan969x_dsm_cal_get_speed(i);
+ speed->n_slots = DIV_ROUND_UP(required_bw, bw_per_slot);
+
+ if (speed->n_slots)
+ speed->gap = DIV_ROUND_UP(cal_len,
+ speed->n_slots);
+ else
+ speed->gap = 0;
+
+ n_slots += speed->n_slots * speed->n_devs;
+ }
+
+ if (n_slots <= cal_len)
+ break; /* Found a suitable calendar length. */
+
+ /* Not good enough yet. */
+ cal_len = n_slots;
+ }
+
+ if (cal_len > SPX5_DSM_CAL_LEN) {
+ pr_err("Invalid length: %u for taxi: %u", cal_len, taxi);
+ return -EINVAL;
+ }
+
+ for (u32 i = 0; i < SPX5_DSM_CAL_LEN; i++)
+ data->schedule[i] = SPX5_DSM_CAL_EMPTY;
+
+ /* Place the remaining devices */
+ for (u32 i = 0; i < DSM_CAL_DEV_MAX; i++) {
+ speed = &dev_speeds[i];
+ for (u32 dev = 0; dev < speed->n_devs; dev++) {
+ u32 idx = 0;
+
+ for (n_slots = 0; n_slots < speed->n_slots; n_slots++) {
+ err = lan969x_dsm_cal_idx_get(data->schedule,
+ cal_len, &idx);
+ if (err)
+ return err;
+ data->schedule[idx] = speed->devs[dev];
+ idx += speed->gap;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_regs.c b/drivers/net/ethernet/microchip/lan969x/lan969x_regs.c
new file mode 100644
index 000000000000..ace4ba21eec4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan969x/lan969x_regs.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2024-09-30 11:48:29 +0200.
+ * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b
+ */
+
+#include "lan969x.h"
+
+const unsigned int lan969x_tsize[TSIZE_LAST] = {
+ [TC_DEV10G] = 10,
+ [TC_DEV2G5] = 28,
+ [TC_DEV5G] = 4,
+ [TC_PCS10G_BR] = 10,
+ [TC_PCS5G_BR] = 4,
+};
+
+const unsigned int lan969x_raddr[RADDR_LAST] = {
+ [RA_CPU_PROC_CTRL] = 160,
+ [RA_GCB_SOFT_RST] = 12,
+ [RA_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 20,
+};
+
+const unsigned int lan969x_rcnt[RCNT_LAST] = {
+ [RC_ANA_AC_OWN_UPSID] = 1,
+ [RC_ANA_ACL_VCAP_S2_CFG] = 35,
+ [RC_ANA_ACL_OWN_UPSID] = 1,
+ [RC_ANA_CL_OWN_UPSID] = 1,
+ [RC_ANA_L2_OWN_UPSID] = 1,
+ [RC_ASM_PORT_CFG] = 32,
+ [RC_DSM_BUF_CFG] = 32,
+ [RC_DSM_DEV_TX_STOP_WM_CFG] = 32,
+ [RC_DSM_RX_PAUSE_CFG] = 32,
+ [RC_DSM_MAC_CFG] = 32,
+ [RC_DSM_MAC_ADDR_BASE_HIGH_CFG] = 30,
+ [RC_DSM_MAC_ADDR_BASE_LOW_CFG] = 30,
+ [RC_DSM_TAXI_CAL_CFG] = 6,
+ [RC_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 30,
+ [RC_HSCH_PORT_MODE] = 35,
+ [RC_QFWD_SWITCH_PORT_MODE] = 35,
+ [RC_QSYS_PAUSE_CFG] = 35,
+ [RC_QSYS_ATOP] = 35,
+ [RC_QSYS_FWD_PRESSURE] = 35,
+ [RC_QSYS_CAL_AUTO] = 4,
+ [RC_REW_OWN_UPSID] = 1,
+ [RC_REW_RTAG_ETAG_CTRL] = 35,
+};
+
+const unsigned int lan969x_gaddr[GADDR_LAST] = {
+ [GA_ANA_AC_RAM_CTRL] = 202000,
+ [GA_ANA_AC_PS_COMMON] = 202880,
+ [GA_ANA_AC_MIRROR_PROBE] = 203232,
+ [GA_ANA_AC_SRC] = 201728,
+ [GA_ANA_AC_PGID] = 131072,
+ [GA_ANA_AC_TSN_SF] = 202028,
+ [GA_ANA_AC_TSN_SF_CFG] = 148480,
+ [GA_ANA_AC_TSN_SF_STATUS] = 147936,
+ [GA_ANA_AC_SG_ACCESS] = 202032,
+ [GA_ANA_AC_SG_CONFIG] = 202752,
+ [GA_ANA_AC_SG_STATUS] = 147952,
+ [GA_ANA_AC_SG_STATUS_STICKY] = 202044,
+ [GA_ANA_AC_STAT_GLOBAL_CFG_PORT] = 202048,
+ [GA_ANA_AC_STAT_CNT_CFG_PORT] = 204800,
+ [GA_ANA_AC_STAT_GLOBAL_CFG_ACL] = 202068,
+ [GA_ANA_ACL_COMMON] = 8192,
+ [GA_ANA_ACL_KEY_SEL] = 9204,
+ [GA_ANA_ACL_CNT_B] = 4096,
+ [GA_ANA_ACL_STICKY] = 10852,
+ [GA_ANA_AC_POL_POL_ALL_CFG] = 17504,
+ [GA_ANA_AC_POL_COMMON_BDLB] = 19464,
+ [GA_ANA_AC_POL_COMMON_BUM_SLB] = 19472,
+ [GA_ANA_AC_SDLB_LBGRP_TBL] = 31788,
+ [GA_ANA_CL_PORT] = 65536,
+ [GA_ANA_CL_COMMON] = 87040,
+ [GA_ANA_L2_COMMON] = 561928,
+ [GA_ANA_L3_COMMON] = 370752,
+ [GA_ANA_L3_VLAN_ARP_L3MC_STICKY] = 368580,
+ [GA_ASM_CFG] = 18304,
+ [GA_ASM_PFC_TIMER_CFG] = 15568,
+ [GA_ASM_LBK_WM_CFG] = 15596,
+ [GA_ASM_LBK_MISC_CFG] = 15608,
+ [GA_ASM_RAM_CTRL] = 15684,
+ [GA_EACL_ES2_KEY_SELECT_PROFILE] = 36864,
+ [GA_EACL_CNT_TBL] = 30720,
+ [GA_EACL_POL_CFG] = 38400,
+ [GA_EACL_ES2_STICKY] = 29072,
+ [GA_EACL_RAM_CTRL] = 29112,
+ [GA_GCB_SIO_CTRL] = 560,
+ [GA_HSCH_HSCH_DWRR] = 36480,
+ [GA_HSCH_HSCH_MISC] = 36608,
+ [GA_HSCH_HSCH_LEAK_LISTS] = 37256,
+ [GA_HSCH_SYSTEM] = 37384,
+ [GA_HSCH_MMGT] = 36260,
+ [GA_HSCH_TAS_CONFIG] = 37696,
+ [GA_PTP_PTP_CFG] = 512,
+ [GA_PTP_PTP_TOD_DOMAINS] = 528,
+ [GA_PTP_PHASE_DETECTOR_CTRL] = 628,
+ [GA_QSYS_CALCFG] = 2164,
+ [GA_QSYS_RAM_CTRL] = 2204,
+ [GA_REW_COMMON] = 98304,
+ [GA_REW_PORT] = 49152,
+ [GA_REW_VOE_PORT_LM_CNT] = 90112,
+ [GA_REW_RAM_CTRL] = 93992,
+ [GA_VOP_RAM_CTRL] = 16368,
+ [GA_XQS_SYSTEM] = 5744,
+ [GA_XQS_QLIMIT_SHR] = 6912,
+};
+
+const unsigned int lan969x_gcnt[GCNT_LAST] = {
+ [GC_ANA_AC_SRC] = 67,
+ [GC_ANA_AC_PGID] = 1054,
+ [GC_ANA_AC_TSN_SF_CFG] = 256,
+ [GC_ANA_AC_STAT_CNT_CFG_PORT] = 35,
+ [GC_ANA_ACL_KEY_SEL] = 99,
+ [GC_ANA_ACL_CNT_A] = 1024,
+ [GC_ANA_ACL_CNT_B] = 1024,
+ [GC_ANA_AC_SDLB_LBGRP_TBL] = 5,
+ [GC_ANA_AC_SDLB_LBSET_TBL] = 496,
+ [GC_ANA_CL_PORT] = 35,
+ [GC_ANA_L2_ISDX_LIMIT] = 256,
+ [GC_ANA_L2_ISDX] = 1024,
+ [GC_ANA_L3_VLAN] = 4608,
+ [GC_ASM_DEV_STATISTICS] = 30,
+ [GC_EACL_ES2_KEY_SELECT_PROFILE] = 68,
+ [GC_EACL_CNT_TBL] = 512,
+ [GC_GCB_SIO_CTRL] = 1,
+ [GC_HSCH_HSCH_CFG] = 1120,
+ [GC_HSCH_HSCH_DWRR] = 32,
+ [GC_PTP_PTP_PINS] = 8,
+ [GC_PTP_PHASE_DETECTOR_CTRL] = 8,
+ [GC_REW_PORT] = 35,
+ [GC_REW_VOE_PORT_LM_CNT] = 240,
+};
+
+const unsigned int lan969x_gsize[GSIZE_LAST] = {
+ [GW_ANA_AC_SRC] = 4,
+ [GW_ANA_L2_COMMON] = 712,
+ [GW_ASM_CFG] = 1092,
+ [GW_CPU_CPU_REGS] = 180,
+ [GW_DEV2G5_PHASE_DETECTOR_CTRL] = 12,
+ [GW_FDMA_FDMA] = 448,
+ [GW_GCB_CHIP_REGS] = 180,
+ [GW_HSCH_TAS_CONFIG] = 16,
+ [GW_PTP_PHASE_DETECTOR_CTRL] = 12,
+ [GW_QSYS_PAUSE_CFG] = 988,
+};
+
+const unsigned int lan969x_fpos[FPOS_LAST] = {
+ [FP_CPU_PROC_CTRL_AARCH64_MODE_ENA] = 7,
+ [FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS] = 6,
+ [FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS] = 5,
+ [FP_CPU_PROC_CTRL_BE_EXCEP_MODE] = 4,
+ [FP_CPU_PROC_CTRL_VINITHI] = 3,
+ [FP_CPU_PROC_CTRL_CFGTE] = 2,
+ [FP_CPU_PROC_CTRL_CP15S_DISABLE] = 1,
+ [FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE] = 0,
+ [FP_CPU_PROC_CTRL_L2_FLUSH_REQ] = 8,
+ [FP_DEV2G5_PHAD_CTRL_PHAD_ENA] = 5,
+ [FP_DEV2G5_PHAD_CTRL_PHAD_FAILED] = 3,
+ [FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE] = 5,
+ [FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY] = 4,
+ [FP_FDMA_CH_CFG_CH_INJ_PORT] = 3,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION] = 27,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC] = 25,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL] = 24,
+ [FP_PTP_PHAD_CTRL_PHAD_ENA] = 5,
+ [FP_PTP_PHAD_CTRL_PHAD_FAILED] = 3,
+};
+
+const unsigned int lan969x_fsize[FSIZE_LAST] = {
+ [FW_ANA_AC_PROBE_PORT_CFG_PROBE_PORT_MASK] = 30,
+ [FW_ANA_AC_SRC_CFG_PORT_MASK] = 30,
+ [FW_ANA_AC_PGID_CFG_PORT_MASK] = 30,
+ [FW_ANA_AC_TSN_SF_PORT_NUM] = 7,
+ [FW_ANA_AC_TSN_SF_CFG_TSN_SGID] = 8,
+ [FW_ANA_AC_TSN_SF_STATUS_TSN_SFID] = 8,
+ [FW_ANA_AC_SG_ACCESS_CTRL_SGID] = 8,
+ [FW_ANA_AC_PORT_SGE_CFG_MASK] = 17,
+ [FW_ANA_AC_SDLB_XLB_START_LBSET_START] = 9,
+ [FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT] = 3,
+ [FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT] = 9,
+ [FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT] = 9,
+ [FW_ANA_AC_SDLB_XLB_NEXT_LBGRP] = 3,
+ [FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR] = 9,
+ [FW_ANA_L2_AUTO_LRN_CFG_AUTO_LRN_ENA] = 30,
+ [FW_ANA_L2_DLB_CFG_DLB_IDX] = 9,
+ [FW_ANA_L2_TSN_CFG_TSN_SFID] = 8,
+ [FW_ANA_L3_VLAN_MASK_CFG_VLAN_PORT_MASK] = 30,
+ [FW_FDMA_CH_CFG_CH_DCB_DB_CNT] = 2,
+ [FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL] = 7,
+ [FW_HSCH_SE_CFG_SE_DWRR_CNT] = 5,
+ [FW_HSCH_SE_CONNECT_SE_LEAK_LINK] = 14,
+ [FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT] = 6,
+ [FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX] = 11,
+ [FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST] = 14,
+ [FW_HSCH_FLUSH_CTRL_FLUSH_PORT] = 6,
+ [FW_HSCH_FLUSH_CTRL_FLUSH_HIER] = 14,
+ [FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW] = 13,
+ [FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX] = 8,
+ [FW_LRN_AUTOAGE_CFG_2_NEXT_ROW] = 13,
+ [FW_PTP_PTP_PIN_INTR_INTR_PTP] = 8,
+ [FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA] = 8,
+ [FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT] = 8,
+ [FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT] = 3,
+ [FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL] = 6,
+ [FW_QRES_RES_CFG_WM_HIGH] = 11,
+ [FW_QRES_RES_STAT_MAXUSE] = 19,
+ [FW_QRES_RES_STAT_CUR_INUSE] = 19,
+ [FW_QSYS_PAUSE_CFG_PAUSE_START] = 11,
+ [FW_QSYS_PAUSE_CFG_PAUSE_STOP] = 11,
+ [FW_QSYS_ATOP_ATOP] = 11,
+ [FW_QSYS_ATOP_TOT_CFG_ATOP_TOT] = 11,
+ [FW_REW_RTAG_ETAG_CTRL_IPE_TBL] = 6,
+ [FW_XQS_STAT_CFG_STAT_VIEW] = 10,
+ [FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP] = 14,
+ [FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP] = 14,
+ [FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP] = 14,
+ [FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM] = 14,
+};
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_ag_api.c b/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_ag_api.c
new file mode 100644
index 000000000000..7acc5bcf337a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_ag_api.c
@@ -0,0 +1,3843 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (C) 2024 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP API
+ */
+
+/* This file is autogenerated by cml-utils 2024-10-07 11:10:56 +0200.
+ * Commit ID: b5ddc8e244eb2481a9524f1ddc630a8b41e7c391
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include "lan969x.h"
+
+/* keyfields */
+static const struct vcap_field is0_normal_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 10,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 16,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 83,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 93,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 105,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 111,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 124,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 130,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 131,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 192,
+ .width = 48,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 240,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 241,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 242,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 258,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 260,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 262,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 263,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 264,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 270,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 398,
+ .width = 128,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 526,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 527,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 528,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 544,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 10,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 15,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 17,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 94,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 106,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 112,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 125,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 128,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 131,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 132,
+ .width = 12,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 145,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 147,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 157,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 189,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 221,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 231,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 239,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 89,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 137,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 185,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 202,
+ .width = 64,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 266,
+ .width = 16,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 282,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 283,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 85,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 133,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 136,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 137,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 138,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 139,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 173,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 205,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 206,
+ .width = 16,
+ },
+};
+
+static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 103,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 135,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 169,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 185,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 201,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 217,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 220,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 221,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 222,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 225,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field is2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 93,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 103,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 135,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 168,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 176,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 192,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field is2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 4,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 4,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 32,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 56,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 90,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 219,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 227,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 243,
+ .width = 40,
+ },
+};
+
+static const struct vcap_field is2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_PAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 8,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 4,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 98,
+ .width = 13,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 111,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 3,
+ },
+ [VCAP_KF_L2_FWD_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 115,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 118,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 119,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 120,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 168,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 219,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 220,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 228,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 356,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 484,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 485,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 486,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 487,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 503,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 519,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 535,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 536,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 537,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 538,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 539,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 540,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 541,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 542,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 543,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es0_isdx_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_IF_EGR_PORT_NO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 7,
+ .width = 13,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 27,
+ .width = 1,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field es2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 96,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 193,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 209,
+ .width = 64,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 273,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 274,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 95,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 143,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 144,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 145,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 148,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 149,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 151,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 183,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 215,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 100,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 101,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 173,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 174,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 175,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 191,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 207,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 223,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 224,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 226,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 231,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 97,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 100,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 101,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 141,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 173,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 182,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field es2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 73,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 74,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 88,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 93,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 141,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 191,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 193,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 201,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 329,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 457,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 458,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 459,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 460,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 476,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 492,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 508,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 509,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 510,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 511,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 512,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 513,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 514,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 515,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 516,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 10,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 76,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 77,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 96,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 97,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 225,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 226,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 234,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 250,
+ .width = 40,
+ },
+};
+
+/* keyfield_set */
+static const struct vcap_set is0_keyfield_set[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = {
+ .type_id = 0,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+static const struct vcap_set is2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 5,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 6,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es0_keyfield_set[] = {
+ [VCAP_KFS_ISDX] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = -1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
+/* keyfield_set map */
+static const struct vcap_field *is0_keyfield_set_map[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
+};
+
+static const struct vcap_field *is2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = is2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = is2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
+ [VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
+};
+
+static const struct vcap_field *es0_keyfield_set_map[] = {
+ [VCAP_KFS_ISDX] = es0_isdx_keyfield,
+};
+
+static const struct vcap_field *es2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = es2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
+};
+
+/* keyfield_set map sizes */
+static int is0_keyfield_set_map_size[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
+};
+
+static int is2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(is2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
+};
+
+static int es0_keyfield_set_map_size[] = {
+ [VCAP_KFS_ISDX] = ARRAY_SIZE(es0_isdx_keyfield),
+};
+
+static int es2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
+};
+
+/* actionfields */
+static const struct vcap_field is0_classification_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 7,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 43,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 66,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 67,
+ .width = 10,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 115,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 167,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 170,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field is0_full_actionfield[] = {
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 7,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 36,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 65,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 10,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 76,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 79,
+ .width = 37,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 182,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 266,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 269,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field is0_class_reduced_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 47,
+ .width = 10,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 92,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field is2_base_type_actionfield[] = {
+ [VCAP_AF_PIPELINE_FORCE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 5,
+ },
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 3,
+ },
+ [VCAP_AF_LRN_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_RT_DIS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 5,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 23,
+ .width = 1,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 30,
+ .width = 37,
+ },
+ [VCAP_AF_MIRROR_PROBE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 2,
+ },
+ [VCAP_AF_MATCH_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 131,
+ .width = 16,
+ },
+ [VCAP_AF_MATCH_ID_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 147,
+ .width = 16,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 163,
+ .width = 10,
+ },
+};
+
+static const struct vcap_field es0_es0_actionfield[] = {
+ [VCAP_AF_PUSH_OUTER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_AF_PUSH_INNER_TAG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 3,
+ },
+ [VCAP_AF_VID_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 34,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_A_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_AF_VID_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_B_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 65,
+ .width = 1,
+ },
+ [VCAP_AF_VID_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_C_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_AF_POP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 2,
+ },
+ [VCAP_AF_UNTAG_VID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_C_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 2,
+ },
+ [VCAP_AF_DSCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 6,
+ },
+ [VCAP_AF_ESDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 319,
+ .width = 10,
+ },
+ [VCAP_AF_FWD_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 438,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_QU] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 440,
+ .width = 3,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 443,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 445,
+ .width = 1,
+ },
+ [VCAP_AF_SWAP_MACS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 454,
+ .width = 1,
+ },
+ [VCAP_AF_LOOP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 455,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_AF_COPY_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 14,
+ },
+ [VCAP_AF_COPY_PORT_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 18,
+ .width = 6,
+ },
+ [VCAP_AF_MIRROR_PROBE_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 26,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 30,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_REMARK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 5,
+ },
+ [VCAP_AF_ES2_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 3,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 9,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+};
+
+/* actionfield_set */
+static const struct vcap_set is0_actionfield_set[] = {
+ [VCAP_AFS_CLASSIFICATION] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_AFS_FULL] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_AFS_CLASS_REDUCED] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 12,
+ },
+};
+
+static const struct vcap_set is2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+static const struct vcap_set es0_actionfield_set[] = {
+ [VCAP_AFS_ES0] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
+/* actionfield_set map */
+static const struct vcap_field *is0_actionfield_set_map[] = {
+ [VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
+ [VCAP_AFS_FULL] = is0_full_actionfield,
+ [VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
+};
+
+static const struct vcap_field *is2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
+};
+
+static const struct vcap_field *es0_actionfield_set_map[] = {
+ [VCAP_AFS_ES0] = es0_es0_actionfield,
+};
+
+static const struct vcap_field *es2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield,
+};
+
+/* actionfield_set map size */
+static int is0_actionfield_set_map_size[] = {
+ [VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
+ [VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
+ [VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
+};
+
+static int is2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
+};
+
+static int es0_actionfield_set_map_size[] = {
+ [VCAP_AFS_ES0] = ARRAY_SIZE(es0_es0_actionfield),
+};
+
+static int es2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield),
+};
+
+/* Type Groups */
+static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 5,
+ .value = 16,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 4,
+ .value = 0,
+ },
+ {
+ .offset = 364,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 416,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 520,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 572,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 4,
+ .value = 8,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = {
+ [12] = is0_x12_keyfield_set_typegroups,
+ [6] = is0_x6_keyfield_set_typegroups,
+ [3] = is0_x3_keyfield_set_typegroups,
+ [2] = is0_x2_keyfield_set_typegroups,
+ [1] = is0_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
+ [12] = is2_x12_keyfield_set_typegroups,
+ [6] = is2_x6_keyfield_set_typegroups,
+ [3] = is2_x3_keyfield_set_typegroups,
+ [1] = is2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es0_keyfield_set_typegroups[] = {
+ [1] = es0_x1_keyfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = {
+ [12] = es2_x12_keyfield_set_typegroups,
+ [6] = es2_x6_keyfield_set_typegroups,
+ [3] = es2_x3_keyfield_set_typegroups,
+ [1] = es2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 103,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 206,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 103,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 95,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 190,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es0_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 19,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 38,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = {
+ [3] = is0_x3_actionfield_set_typegroups,
+ [2] = is0_x2_actionfield_set_typegroups,
+ [1] = is0_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
+ [3] = is2_x3_actionfield_set_typegroups,
+ [1] = is2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup *es0_actionfield_set_typegroups[] = {
+ [1] = es0_x1_actionfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = {
+ [3] = es2_x3_actionfield_set_typegroups,
+ [1] = es2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
+/* Keyfieldset names */
+static const char * const vcap_keyfield_set_names[] = {
+ [VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
+ [VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
+ [VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
+ [VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
+ [VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
+};
+
+/* Actionfieldset names */
+static const char * const vcap_actionfield_set_names[] = {
+ [VCAP_AFS_NO_VALUE] = "(None)",
+ [VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
+};
+
+/* Keyfield names */
+static const char * const vcap_keyfield_names[] = {
+ [VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
+ [VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
+ [VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
+ [VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
+ [VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
+ [VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = "ARP_OPCODE_UNKNOWN_IS",
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
+ [VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
+ [VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
+ [VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
+ [VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
+ [VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
+ [VCAP_KF_L2_BC_IS] = "L2_BC_IS",
+ [VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
+ [VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
+ [VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
+ [VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
+ [VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
+ [VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
+ [VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
+ [VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
+ [VCAP_KF_L3_TOS] = "L3_TOS",
+ [VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
+ [VCAP_KF_L4_ACK] = "L4_ACK",
+ [VCAP_KF_L4_DPORT] = "L4_DPORT",
+ [VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
+ [VCAP_KF_L4_PSH] = "L4_PSH",
+ [VCAP_KF_L4_RNG] = "L4_RNG",
+ [VCAP_KF_L4_RST] = "L4_RST",
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = "L4_SEQUENCE_EQ0_IS",
+ [VCAP_KF_L4_SPORT] = "L4_SPORT",
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = "L4_SPORT_EQ_DPORT_IS",
+ [VCAP_KF_L4_SYN] = "L4_SYN",
+ [VCAP_KF_L4_URG] = "L4_URG",
+ [VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
+ [VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
+ [VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
+ [VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
+ [VCAP_KF_TYPE] = "TYPE",
+};
+
+/* Actionfield names */
+static const char * const vcap_actionfield_names[] = {
+ [VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
+ [VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
+ [VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
+ [VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
+ [VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
+ [VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
+ [VCAP_AF_POLICE_ENA] = "POLICE_ENA",
+ [VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
+ [VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
+};
+
+/* VCAPs */
+const struct vcap_info lan969x_vcaps[] = {
+ [VCAP_TYPE_IS0] = {
+ .name = "is0",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 103,
+ .default_cnt = 70,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is0_keyfield_set),
+ .actionfield_set = is0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is0_actionfield_set),
+ .keyfield_set_map = is0_keyfield_set_map,
+ .keyfield_set_map_size = is0_keyfield_set_map_size,
+ .actionfield_set_map = is0_actionfield_set_map,
+ .actionfield_set_map_size = is0_actionfield_set_map_size,
+ .keyfield_set_typegroups = is0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_IS2] = {
+ .name = "is2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 103,
+ .default_cnt = 38,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is2_keyfield_set),
+ .actionfield_set = is2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is2_actionfield_set),
+ .keyfield_set_map = is2_keyfield_set_map,
+ .keyfield_set_map_size = is2_keyfield_set_map_size,
+ .actionfield_set_map = is2_actionfield_set_map,
+ .actionfield_set_map_size = is2_actionfield_set_map_size,
+ .keyfield_set_typegroups = is2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is2_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES0] = {
+ .name = "es0",
+ .rows = 1536,
+ .sw_count = 1,
+ .sw_width = 51,
+ .sticky_width = 1,
+ .act_width = 469,
+ .default_cnt = 35,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es0_keyfield_set),
+ .actionfield_set = es0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es0_actionfield_set),
+ .keyfield_set_map = es0_keyfield_set_map,
+ .keyfield_set_map_size = es0_keyfield_set_map_size,
+ .actionfield_set_map = es0_actionfield_set_map,
+ .actionfield_set_map_size = es0_actionfield_set_map_size,
+ .keyfield_set_typegroups = es0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES2] = {
+ .name = "es2",
+ .rows = 256,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 19,
+ .default_cnt = 39,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set),
+ .actionfield_set = es2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set),
+ .keyfield_set_map = es2_keyfield_set_map,
+ .keyfield_set_map_size = es2_keyfield_set_map_size,
+ .actionfield_set_map = es2_actionfield_set_map,
+ .actionfield_set_map_size = es2_actionfield_set_map_size,
+ .keyfield_set_typegroups = es2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es2_actionfield_set_typegroups,
+ },
+};
+
+const struct vcap_statistics lan969x_vcap_stats = {
+ .name = "lan969x",
+ .count = 4,
+ .keyfield_set_names = vcap_keyfield_set_names,
+ .actionfield_set_names = vcap_actionfield_set_names,
+ .keyfield_names = vcap_keyfield_names,
+ .actionfield_names = vcap_actionfield_names,
+};
diff --git a/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_impl.c b/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_impl.c
new file mode 100644
index 000000000000..543a1f2bf6bd
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan969x/lan969x_vcap_impl.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "vcap_api.h"
+#include "lan969x.h"
+
+const struct sparx5_vcap_inst lan969x_vcap_inst_cfg[] = {
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-0 */
+ .vinst = 0,
+ .map_id = 1,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L0,
+ .last_cid = SPARX5_VCAP_CID_IS0_L2 - 1,
+ .blockno = 2,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-1 */
+ .vinst = 1,
+ .map_id = 2,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L2,
+ .last_cid = SPARX5_VCAP_CID_IS0_L4 - 1,
+ .blockno = 3,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-2 */
+ .vinst = 2,
+ .map_id = 3,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L4,
+ .last_cid = SPARX5_VCAP_CID_IS0_MAX,
+ .blockno = 4,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-0 */
+ .vinst = 0,
+ .map_id = 4,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L0,
+ .last_cid = SPARX5_VCAP_CID_IS2_L2 - 1,
+ .blockno = 0,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS2, /* IS2-1 */
+ .vinst = 1,
+ .map_id = 5,
+ .lookups = SPARX5_IS2_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2,
+ .first_cid = SPARX5_VCAP_CID_IS2_L2,
+ .last_cid = SPARX5_VCAP_CID_IS2_MAX,
+ .blockno = 1,
+ .blocks = 1,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_ES0,
+ .lookups = SPARX5_ES0_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES0_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES0_L0,
+ .last_cid = SPARX5_VCAP_CID_ES0_MAX,
+ .count = 1536,
+ .ingress = false,
+ },
+ {
+ .vtype = VCAP_TYPE_ES2,
+ .lookups = SPARX5_ES2_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES2_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES2_L0,
+ .last_cid = SPARX5_VCAP_CID_ES2_MAX,
+ .count = 1024,
+ .ingress = false,
+ },
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 288de95add18..3435ca86dd70 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -11,7 +11,7 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \
sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o \
- sparx5_psfp.o sparx5_mirror.o
+ sparx5_psfp.o sparx5_mirror.o sparx5_regs.o
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
index 76a8bb596aec..5fe941c66c17 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c
@@ -15,9 +15,6 @@
#define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */
/* DSM calendar information */
-#define SPX5_DSM_CAL_LEN 64
-#define SPX5_DSM_CAL_EMPTY 0xFFFF
-#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
#define SPX5_DSM_CAL_TAXIS 8
#define SPX5_DSM_CAL_BW_LOSS 553
@@ -37,19 +34,6 @@ static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI]
{64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
};
-struct sparx5_calendar_data {
- u32 schedule[SPX5_DSM_CAL_LEN];
- u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
- u32 new_slots[SPX5_DSM_CAL_LEN];
- u32 temp_sched[SPX5_DSM_CAL_LEN];
- u32 indices[SPX5_DSM_CAL_LEN];
- u32 short_list[SPX5_DSM_CAL_LEN];
- u32 long_list[SPX5_DSM_CAL_LEN];
-};
-
static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
{
switch (sparx5->target_ct) {
@@ -68,27 +52,32 @@ static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
case SPX5_TARGET_CT_7558:
case SPX5_TARGET_CT_7558TSN:
return 201000;
+ case SPX5_TARGET_CT_LAN9691VAO:
+ return 46000;
+ case SPX5_TARGET_CT_LAN9694RED:
+ case SPX5_TARGET_CT_LAN9694TSN:
+ case SPX5_TARGET_CT_LAN9694:
+ return 68000;
+ case SPX5_TARGET_CT_LAN9696RED:
+ case SPX5_TARGET_CT_LAN9696TSN:
+ case SPX5_TARGET_CT_LAN9692VAO:
+ case SPX5_TARGET_CT_LAN9696:
+ return 88000;
+ case SPX5_TARGET_CT_LAN9698RED:
+ case SPX5_TARGET_CT_LAN9698TSN:
+ case SPX5_TARGET_CT_LAN9693VAO:
+ case SPX5_TARGET_CT_LAN9698:
+ return 101000;
default:
return 0;
}
}
-/* This is used in calendar configuration */
-enum sparx5_cal_bw {
- SPX5_CAL_SPEED_NONE = 0,
- SPX5_CAL_SPEED_1G = 1,
- SPX5_CAL_SPEED_2G5 = 2,
- SPX5_CAL_SPEED_5G = 3,
- SPX5_CAL_SPEED_10G = 4,
- SPX5_CAL_SPEED_25G = 5,
- SPX5_CAL_SPEED_0G5 = 6,
- SPX5_CAL_SPEED_12G5 = 7
-};
-
static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
{
switch (cclock) {
case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
+ case SPX5_CORE_CLOCK_328MHZ: return 109375; /* 328000 / 3 */
case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
case SPX5_CORE_CLOCK_625MHZ: return 208000; /* 625000 / 3 */
default: return 0;
@@ -96,7 +85,7 @@ static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
return 0;
}
-static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
+u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
{
switch (speed) {
case SPX5_CAL_SPEED_1G: return 1000;
@@ -109,6 +98,7 @@ static u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
default: return 0;
}
}
+EXPORT_SYMBOL_GPL(sparx5_cal_speed_to_value);
static u32 sparx5_bandwidth_to_calendar(u32 bw)
{
@@ -126,23 +116,28 @@ static u32 sparx5_bandwidth_to_calendar(u32 bw)
}
}
-static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
- u32 portno)
+enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, u32 portno)
{
struct sparx5_port *port;
- if (portno >= SPX5_PORTS) {
+ if (portno >= sparx5->data->consts->n_ports) {
/* Internal ports */
- if (portno == SPX5_PORT_CPU_0 || portno == SPX5_PORT_CPU_1) {
+ if (portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0) ||
+ portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1)) {
/* Equals 1.25G */
return SPX5_CAL_SPEED_2G5;
- } else if (portno == SPX5_PORT_VD0) {
+ } else if (portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_VD0)) {
/* IPMC only idle BW */
return SPX5_CAL_SPEED_NONE;
- } else if (portno == SPX5_PORT_VD1) {
+ } else if (portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_VD1)) {
/* OAM only idle BW */
return SPX5_CAL_SPEED_NONE;
- } else if (portno == SPX5_PORT_VD2) {
+ } else if (portno ==
+ sparx5_get_internal_port(sparx5, SPX5_PORT_VD2)) {
/* IPinIP gets only idle BW */
return SPX5_CAL_SPEED_NONE;
}
@@ -155,10 +150,12 @@ static enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5,
return SPX5_CAL_SPEED_NONE;
return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
}
+EXPORT_SYMBOL_GPL(sparx5_get_port_cal_speed);
/* Auto configure the QSYS calendar based on port configuration */
int sparx5_config_auto_calendar(struct sparx5 *sparx5)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
u32 cal[7], value, idx, portno;
u32 max_core_bw;
u32 total_bw = 0, used_port_bw = 0;
@@ -174,7 +171,7 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5)
}
/* Setup the calendar with the bandwidth to each port */
- for (portno = 0; portno < SPX5_PORTS_ALL; portno++) {
+ for (portno = 0; portno < consts->n_ports_all; portno++) {
u64 reg, offset, this_bw;
spd = sparx5_get_port_cal_speed(sparx5, portno);
@@ -182,7 +179,7 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5)
continue;
this_bw = sparx5_cal_speed_to_value(spd);
- if (portno < SPX5_PORTS)
+ if (portno < consts->n_ports)
used_port_bw += this_bw;
else
/* Internal ports are granted half the value */
@@ -208,12 +205,13 @@ int sparx5_config_auto_calendar(struct sparx5 *sparx5)
}
/* Halt the calendar while changing it */
- spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
- QSYS_CAL_CTRL_CAL_MODE,
- sparx5, QSYS_CAL_CTRL);
+ if (is_sparx5(sparx5))
+ spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
+ QSYS_CAL_CTRL_CAL_MODE,
+ sparx5, QSYS_CAL_CTRL);
/* Assign port bandwidth to auto calendar */
- for (idx = 0; idx < ARRAY_SIZE(cal); idx++)
+ for (idx = 0; idx < consts->n_auto_cals; idx++)
spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
/* Increase grant rate of all ports to account for
@@ -278,8 +276,8 @@ static u32 sparx5_dsm_cp_cal(u32 *sched)
return SPX5_DSM_CAL_EMPTY;
}
-static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
- struct sparx5_calendar_data *data)
+int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data)
{
bool slow_mode;
u32 gcd, idx, sum, min, factor;
@@ -304,7 +302,7 @@ static int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
u32 portno = data->taxi_ports[idx];
- if (portno < SPX5_TAXI_PORT_MAX) {
+ if (portno < sparx5->data->consts->n_ports_all) {
data->taxi_speeds[idx] = sparx5_cal_speed_to_value
(sparx5_get_port_cal_speed(sparx5, portno));
} else {
@@ -533,12 +531,23 @@ check_err:
static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data)
{
- u32 idx;
- u32 cal_len = sparx5_dsm_cal_len(data->schedule), len;
+ u32 cal_len = sparx5_dsm_cal_len(data->schedule), len, idx;
+
+ if (!is_sparx5(sparx5)) {
+ u32 val, act;
+
+ val = spx5_rd(sparx5, DSM_TAXI_CAL_CFG(taxi));
+ act = DSM_TAXI_CAL_CFG_CAL_SEL_STAT_GET(val);
- spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
- sparx5,
- DSM_TAXI_CAL_CFG(taxi));
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_SEL_SET(!act),
+ DSM_TAXI_CAL_CFG_CAL_PGM_SEL,
+ sparx5, DSM_TAXI_CAL_CFG(taxi));
+ }
+
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
+ DSM_TAXI_CAL_CFG_CAL_PGM_ENA,
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
for (idx = 0; idx < cal_len; idx++) {
spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
DSM_TAXI_CAL_CFG_CAL_IDX,
@@ -549,13 +558,21 @@ static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
sparx5,
DSM_TAXI_CAL_CFG(taxi));
}
- spx5_wr(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
- sparx5,
- DSM_TAXI_CAL_CFG(taxi));
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
+ DSM_TAXI_CAL_CFG_CAL_PGM_ENA,
+ sparx5,
+ DSM_TAXI_CAL_CFG(taxi));
len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
DSM_TAXI_CAL_CFG(taxi)));
if (len != cal_len - 1)
goto update_err;
+
+ if (!is_sparx5(sparx5)) {
+ spx5_rmw(DSM_TAXI_CAL_CFG_CAL_SWITCH_SET(1),
+ DSM_TAXI_CAL_CFG_CAL_SWITCH,
+ sparx5, DSM_TAXI_CAL_CFG(taxi));
+ }
+
return 0;
update_err:
dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
@@ -565,6 +582,7 @@ update_err:
/* Configure the DSM calendar based on port configuration */
int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
int taxi;
struct sparx5_calendar_data *data;
int err = 0;
@@ -573,8 +591,8 @@ int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
if (!data)
return -ENOMEM;
- for (taxi = 0; taxi < SPX5_DSM_CAL_TAXIS; ++taxi) {
- err = sparx5_dsm_calendar_calc(sparx5, taxi, data);
+ for (taxi = 0; taxi < sparx5->data->consts->n_dsm_cal_taxis; ++taxi) {
+ err = ops->dsm_calendar_calc(sparx5, taxi, data);
if (err) {
dev_err(sparx5->dev, "DSM calendar calculation failed\n");
goto cal_out;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
index 2d763664dcda..10224ad63a78 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
@@ -234,10 +234,11 @@ static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev,
struct dcb_app *))
{
struct sparx5_port *port = netdev_priv(dev);
+ struct sparx5 *sparx5 = port->sparx5;
struct sparx5_port *port_itr;
int err, i;
- for (i = 0; i < SPX5_PORTS; i++) {
+ for (i = 0; i < sparx5->data->consts->n_ports; i++) {
port_itr = port->sparx5->ports[i];
if (!port_itr)
continue;
@@ -386,7 +387,7 @@ int sparx5_dcb_init(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
- for (i = 0; i < SPX5_PORTS; i++) {
+ for (i = 0; i < sparx5->data->consts->n_ports; i++) {
port = sparx5->ports[i];
if (!port)
continue;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index d898a7238b48..832f4ae57c83 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -505,8 +505,8 @@ static void sparx5_get_dev_misc_stats(u64 *portstats, void __iomem *inst, u32
static void sparx5_get_device_stats(struct sparx5 *sparx5, int portno)
{
u64 *portstats = &sparx5->stats[portno * sparx5->num_stats];
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
void __iomem *inst;
inst = spx5_inst_get(sparx5, dev, tinst);
@@ -819,8 +819,8 @@ static void sparx5_get_eth_phy_stats(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_phy_stats(portstats, inst, tinst);
@@ -844,8 +844,8 @@ static void sparx5_get_eth_mac_stats(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_mac_stats(portstats, inst, tinst);
@@ -912,8 +912,8 @@ static void sparx5_get_eth_mac_ctrl_stats(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_mac_ctrl_stats(portstats, inst, tinst);
@@ -944,8 +944,8 @@ static void sparx5_get_eth_rmon_stats(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_rmon_stats(portstats, inst, tinst);
@@ -1027,8 +1027,8 @@ static void sparx5_get_sset_data(struct net_device *ndev,
portstats = &sparx5->stats[portno * sparx5->num_stats];
if (sparx5_is_baser(port->conf.portmode)) {
- u32 tinst = sparx5_port_dev_index(portno);
- u32 dev = sparx5_to_high_dev(portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, portno);
+ u32 dev = sparx5_to_high_dev(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
sparx5_get_dev_misc_stats(portstats, inst, tinst);
@@ -1122,7 +1122,7 @@ static void sparx5_update_stats(struct sparx5 *sparx5)
{
int idx;
- for (idx = 0; idx < SPX5_PORTS; idx++)
+ for (idx = 0; idx < sparx5->data->consts->n_ports; idx++)
if (sparx5->ports[idx])
sparx5_update_port_stats(sparx5, idx);
}
@@ -1189,7 +1189,7 @@ static int sparx5_get_ts_info(struct net_device *dev,
struct sparx5 *sparx5 = port->sparx5;
struct sparx5_phc *phc;
- if (!sparx5->ptp)
+ if (!sparx5->ptp && is_sparx5(sparx5))
return ethtool_op_get_ts_info(dev, info);
phc = &sparx5->phc[SPARX5_PHC_PORT];
@@ -1228,6 +1228,7 @@ const struct ethtool_ops sparx5_ethtool_ops = {
int sparx_stats_init(struct sparx5 *sparx5)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
char queue_name[32];
int portno;
@@ -1235,14 +1236,15 @@ int sparx_stats_init(struct sparx5 *sparx5)
sparx5->num_stats = spx5_stats_count;
sparx5->num_ethtool_stats = ARRAY_SIZE(sparx5_stats_layout);
sparx5->stats = devm_kcalloc(sparx5->dev,
- SPX5_PORTS_ALL * sparx5->num_stats,
+ consts->n_ports_all *
+ sparx5->num_stats,
sizeof(u64), GFP_KERNEL);
if (!sparx5->stats)
return -ENOMEM;
mutex_init(&sparx5->queue_stats_lock);
sparx5_config_stats(sparx5);
- for (portno = 0; portno < SPX5_PORTS; portno++)
+ for (portno = 0; portno < consts->n_ports; portno++)
if (sparx5->ports[portno])
sparx5_config_port_stats(sparx5, portno);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 61df874b7623..0027144a2af2 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -154,9 +154,11 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
skb = rx->skb[fdma->dcb_index][fdma->db_index];
skb_put(skb, fdma_db_len_get(db_hw));
/* Now do the normal processing of the skb */
- sparx5_ifh_parse((u32 *)skb->data, &fi);
+ sparx5_ifh_parse(sparx5, (u32 *)skb->data, &fi);
/* Map to port netdev */
- port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL;
+ port = fi.src_port < sparx5->data->consts->n_ports ?
+ sparx5->ports[fi.src_port] :
+ NULL;
if (!port || !port->ndev) {
dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
sparx5_xtr_flush(sparx5, XTR_QUEUE);
@@ -296,7 +298,7 @@ static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
fdma->ops.dataptr_cb = &sparx5_fdma_rx_dataptr_cb;
fdma->ops.nextptr_cb = &fdma_nextptr_cb;
/* Fetch a netdev for SKB and NAPI use, any will do */
- for (idx = 0; idx < SPX5_PORTS; ++idx) {
+ for (idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
struct sparx5_port *port = sparx5->ports[idx];
if (port && port->ndev) {
@@ -362,7 +364,9 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
/* CPU ports capture setup */
- for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
+ for (portno = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0);
+ portno <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1);
+ portno++) {
/* ASM CPU port: No preamble, IFH, enable padding */
spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 75868b3f548e..f5584244612c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -80,15 +80,16 @@ static void sparx5_mact_select(struct sparx5 *sparx5,
int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
const unsigned char mac[ETH_ALEN], u16 vid)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int addr, type, ret;
- if (pgid < SPX5_PORTS) {
+ if (pgid < consts->n_ports) {
type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
addr = pgid % 32;
addr += (pgid / 32) << 5; /* Add upsid */
} else {
type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
- addr = pgid - SPX5_PORTS;
+ addr = pgid - consts->n_ports;
}
mutex_lock(&sparx5->lock);
@@ -128,7 +129,8 @@ int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
- return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
+ return sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
+ addr, port->pvid);
}
static int sparx5_mact_get(struct sparx5 *sparx5,
@@ -371,7 +373,7 @@ static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
return;
port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
- if (port >= SPX5_PORTS)
+ if (port >= sparx5->data->consts->n_ports)
return;
if (!test_bit(port, sparx5->bridge_mask))
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index b64c814eac11..2f1013f870fb 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -24,13 +24,17 @@
#include <linux/types.h>
#include <linux/reset.h>
+#include "../lan969x/lan969x.h" /* for lan969x match data */
+
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
#include "sparx5_qos.h"
+#include "sparx5_vcap_ag_api.h"
+#include "sparx5_vcap_impl.h"
+
+const struct sparx5_regs *regs;
-#define QLIM_WM(fraction) \
- ((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
#define IO_RANGES 3
struct initial_port_config {
@@ -45,12 +49,6 @@ struct sparx5_ram_config {
u32 init_val;
};
-struct sparx5_main_io_resource {
- enum sparx5_target id;
- phys_addr_t offset;
- int range;
-};
-
static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_CPU, 0, 0 }, /* 0x600000000 */
{ TARGET_FDMA, 0x80000, 0 }, /* 0x600080000 */
@@ -214,23 +212,79 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_VOP, 0x11a00000, 2 }, /* 0x611a00000 */
};
+bool is_sparx5(struct sparx5 *sparx5)
+{
+ switch (sparx5->target_ct) {
+ case SPX5_TARGET_CT_7546:
+ case SPX5_TARGET_CT_7549:
+ case SPX5_TARGET_CT_7552:
+ case SPX5_TARGET_CT_7556:
+ case SPX5_TARGET_CT_7558:
+ case SPX5_TARGET_CT_7546TSN:
+ case SPX5_TARGET_CT_7549TSN:
+ case SPX5_TARGET_CT_7552TSN:
+ case SPX5_TARGET_CT_7556TSN:
+ case SPX5_TARGET_CT_7558TSN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void sparx5_init_features(struct sparx5 *sparx5)
+{
+ switch (sparx5->target_ct) {
+ case SPX5_TARGET_CT_7546:
+ case SPX5_TARGET_CT_7549:
+ case SPX5_TARGET_CT_7552:
+ case SPX5_TARGET_CT_7556:
+ case SPX5_TARGET_CT_7558:
+ case SPX5_TARGET_CT_7546TSN:
+ case SPX5_TARGET_CT_7549TSN:
+ case SPX5_TARGET_CT_7552TSN:
+ case SPX5_TARGET_CT_7556TSN:
+ case SPX5_TARGET_CT_7558TSN:
+ case SPX5_TARGET_CT_LAN9691VAO:
+ case SPX5_TARGET_CT_LAN9694TSN:
+ case SPX5_TARGET_CT_LAN9694RED:
+ case SPX5_TARGET_CT_LAN9692VAO:
+ case SPX5_TARGET_CT_LAN9696TSN:
+ case SPX5_TARGET_CT_LAN9696RED:
+ case SPX5_TARGET_CT_LAN9693VAO:
+ case SPX5_TARGET_CT_LAN9698TSN:
+ case SPX5_TARGET_CT_LAN9698RED:
+ sparx5->features = (SPX5_FEATURE_PSFP | SPX5_FEATURE_PTP);
+ break;
+ default:
+ break;
+ }
+}
+
+bool sparx5_has_feature(struct sparx5 *sparx5, enum sparx5_feature feature)
+{
+ return sparx5->features & feature;
+}
+
static int sparx5_create_targets(struct sparx5 *sparx5)
{
+ const struct sparx5_main_io_resource *iomap = sparx5->data->iomap;
+ int iomap_size = sparx5->data->iomap_size;
+ int ioranges = sparx5->data->ioranges;
struct resource *iores[IO_RANGES];
void __iomem *iomem[IO_RANGES];
void __iomem *begin[IO_RANGES];
int range_id[IO_RANGES];
int idx, jdx;
- for (idx = 0, jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
- const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+ for (idx = 0, jdx = 0; jdx < iomap_size; jdx++) {
+ const struct sparx5_main_io_resource *io = &iomap[jdx];
- if (idx == iomap->range) {
+ if (idx == io->range) {
range_id[idx] = jdx;
idx++;
}
}
- for (idx = 0; idx < IO_RANGES; idx++) {
+ for (idx = 0; idx < ioranges; idx++) {
iores[idx] = platform_get_resource(sparx5->pdev, IORESOURCE_MEM,
idx);
if (!iores[idx]) {
@@ -245,12 +299,12 @@ static int sparx5_create_targets(struct sparx5 *sparx5)
iores[idx]->name);
return -ENOMEM;
}
- begin[idx] = iomem[idx] - sparx5_main_iomap[range_id[idx]].offset;
+ begin[idx] = iomem[idx] - iomap[range_id[idx]].offset;
}
- for (jdx = 0; jdx < ARRAY_SIZE(sparx5_main_iomap); jdx++) {
- const struct sparx5_main_io_resource *iomap = &sparx5_main_iomap[jdx];
+ for (jdx = 0; jdx < iomap_size; jdx++) {
+ const struct sparx5_main_io_resource *io = &iomap[jdx];
- sparx5->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+ sparx5->regs[io->id] = begin[io->range] + io->offset;
}
return 0;
}
@@ -459,56 +513,74 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
else if (sparx5->coreclock == SPX5_CORE_CLOCK_250MHZ)
freq = 0; /* Not supported */
break;
+ case SPX5_TARGET_CT_LAN9694:
+ case SPX5_TARGET_CT_LAN9691VAO:
+ case SPX5_TARGET_CT_LAN9694TSN:
+ case SPX5_TARGET_CT_LAN9694RED:
+ case SPX5_TARGET_CT_LAN9696:
+ case SPX5_TARGET_CT_LAN9692VAO:
+ case SPX5_TARGET_CT_LAN9696TSN:
+ case SPX5_TARGET_CT_LAN9696RED:
+ case SPX5_TARGET_CT_LAN9698:
+ case SPX5_TARGET_CT_LAN9693VAO:
+ case SPX5_TARGET_CT_LAN9698TSN:
+ case SPX5_TARGET_CT_LAN9698RED:
+ freq = SPX5_CORE_CLOCK_328MHZ;
+ break;
default:
dev_err(sparx5->dev, "Target (%#04x) not supported\n",
sparx5->target_ct);
return -ENODEV;
}
- switch (freq) {
- case SPX5_CORE_CLOCK_250MHZ:
- clk_div = 10;
- pol_upd_int = 312;
- break;
- case SPX5_CORE_CLOCK_500MHZ:
- clk_div = 5;
- pol_upd_int = 624;
- break;
- case SPX5_CORE_CLOCK_625MHZ:
- clk_div = 4;
- pol_upd_int = 780;
- break;
- default:
- dev_err(sparx5->dev, "%d coreclock not supported on (%#04x)\n",
- sparx5->coreclock, sparx5->target_ct);
- return -EINVAL;
+ if (is_sparx5(sparx5)) {
+ switch (freq) {
+ case SPX5_CORE_CLOCK_250MHZ:
+ clk_div = 10;
+ pol_upd_int = 312;
+ break;
+ case SPX5_CORE_CLOCK_500MHZ:
+ clk_div = 5;
+ pol_upd_int = 624;
+ break;
+ case SPX5_CORE_CLOCK_625MHZ:
+ clk_div = 4;
+ pol_upd_int = 780;
+ break;
+ default:
+ dev_err(sparx5->dev,
+ "%d coreclock not supported on (%#04x)\n",
+ sparx5->coreclock, sparx5->target_ct);
+ return -EINVAL;
+ }
+
+ /* Configure the LCPLL */
+ spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
+ CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
+ sparx5, CLKGEN_LCPLL1_CORE_CLK_CFG);
+ } else {
+ pol_upd_int = 820; // SPX5_CORE_CLOCK_328MHZ
}
/* Update state with chosen frequency */
sparx5->coreclock = freq;
-
- /* Configure the LCPLL */
- spx5_rmw(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(clk_div) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV_SET(0) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR_SET(0) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL_SET(0) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA_SET(0) |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_SET(1),
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_PRE_DIV |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_DIR |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_SEL |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_ROT_ENA |
- CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA,
- sparx5,
- CLKGEN_LCPLL1_CORE_CLK_CFG);
-
clk_period = sparx5_clk_period(freq);
- spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100),
- HSCH_SYS_CLK_PER_100PS,
- sparx5,
- HSCH_SYS_CLK_PER);
+ if (is_sparx5(sparx5))
+ spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100),
+ HSCH_SYS_CLK_PER_100PS,
+ sparx5,
+ HSCH_SYS_CLK_PER);
spx5_rmw(ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(clk_period / 100),
ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS,
@@ -525,7 +597,7 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
sparx5,
LRN_AUTOAGE_CFG_1);
- for (idx = 0; idx < 3; idx++)
+ for (idx = 0; idx < sparx5->data->consts->n_sio_clks; idx++)
spx5_rmw(GCB_SIO_CLOCK_SYS_CLK_PERIOD_SET(clk_period / 100),
GCB_SIO_CLOCK_SYS_CLK_PERIOD,
sparx5,
@@ -545,25 +617,36 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
return 0;
}
+static u32 qlim_wm(struct sparx5 *sparx5, int fraction)
+{
+ return (sparx5->data->consts->buf_size / SPX5_BUFFER_CELL_SZ - 100) *
+ fraction / 100;
+}
+
static int sparx5_qlim_set(struct sparx5 *sparx5)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
u32 res, dp, prio;
for (res = 0; res < 2; res++) {
for (prio = 0; prio < 8; prio++)
spx5_wr(0xFFF, sparx5,
- QRES_RES_CFG(prio + 630 + res * 1024));
+ QRES_RES_CFG(prio +
+ consts->qres_max_prio_idx +
+ res * 1024));
for (dp = 0; dp < 4; dp++)
spx5_wr(0xFFF, sparx5,
- QRES_RES_CFG(dp + 638 + res * 1024));
+ QRES_RES_CFG(dp +
+ consts->qres_max_colour_idx +
+ res * 1024));
}
/* Set 80,90,95,100% of memory size for top watermarks */
- spx5_wr(QLIM_WM(80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0));
- spx5_wr(QLIM_WM(90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0));
- spx5_wr(QLIM_WM(95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0));
- spx5_wr(QLIM_WM(100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0));
+ spx5_wr(qlim_wm(sparx5, 80), sparx5, XQS_QLIMIT_SHR_QLIM_CFG(0));
+ spx5_wr(qlim_wm(sparx5, 90), sparx5, XQS_QLIMIT_SHR_CTOP_CFG(0));
+ spx5_wr(qlim_wm(sparx5, 95), sparx5, XQS_QLIMIT_SHR_ATOP_CFG(0));
+ spx5_wr(qlim_wm(sparx5, 100), sparx5, XQS_QLIMIT_SHR_TOP_CFG(0));
return 0;
}
@@ -585,7 +668,7 @@ static void sparx5_board_init(struct sparx5 *sparx5)
GCB_HW_SGPIO_SD_CFG);
/* Refer to LOS SGPIO */
- for (idx = 0; idx < SPX5_PORTS; idx++)
+ for (idx = 0; idx < sparx5->data->consts->n_ports; idx++)
if (sparx5->ports[idx])
if (sparx5->ports[idx]->conf.sd_sgpio != ~0)
spx5_wr(sparx5->ports[idx]->conf.sd_sgpio,
@@ -596,12 +679,14 @@ static void sparx5_board_init(struct sparx5 *sparx5)
static int sparx5_start(struct sparx5 *sparx5)
{
u8 broadcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ const struct sparx5_consts *consts = sparx5->data->consts;
+ const struct sparx5_ops *ops = sparx5->data->ops;
char queue_name[32];
u32 idx;
int err;
/* Setup own UPSIDs */
- for (idx = 0; idx < 3; idx++) {
+ for (idx = 0; idx < consts->n_own_upsids; idx++) {
spx5_wr(idx, sparx5, ANA_AC_OWN_UPSID(idx));
spx5_wr(idx, sparx5, ANA_CL_OWN_UPSID(idx));
spx5_wr(idx, sparx5, ANA_L2_OWN_UPSID(idx));
@@ -609,7 +694,7 @@ static int sparx5_start(struct sparx5 *sparx5)
}
/* Enable CPU ports */
- for (idx = SPX5_PORTS; idx < SPX5_PORTS_ALL; idx++)
+ for (idx = consts->n_ports; idx < consts->n_ports_all; idx++)
spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1),
QFWD_SWITCH_PORT_MODE_PORT_ENA,
sparx5,
@@ -619,13 +704,14 @@ static int sparx5_start(struct sparx5 *sparx5)
sparx5_update_fwd(sparx5);
/* CPU copy CPU pgids */
- spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
- sparx5, ANA_AC_PGID_MISC_CFG(PGID_CPU));
- spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
- sparx5, ANA_AC_PGID_MISC_CFG(PGID_BCAST));
+ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5,
+ ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_CPU)));
+ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5,
+ ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_BCAST)));
/* Recalc injected frame FCS */
- for (idx = SPX5_PORT_CPU_0; idx <= SPX5_PORT_CPU_1; idx++)
+ for (idx = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0);
+ idx <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1); idx++)
spx5_rmw(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_SET(1),
ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA,
sparx5, ANA_CL_FILTER_CTRL(idx));
@@ -640,7 +726,8 @@ static int sparx5_start(struct sparx5 *sparx5)
sparx5_vlan_init(sparx5);
/* Add host mode BC address (points only to CPU) */
- sparx5_mact_learn(sparx5, PGID_CPU, broadcast, NULL_VID);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU), broadcast,
+ NULL_VID);
/* Enable queue limitation watermarks */
sparx5_qlim_set(sparx5);
@@ -691,7 +778,7 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Start Frame DMA with fallback to register based INJ/XTR */
err = -ENXIO;
- if (sparx5->fdma_irq >= 0) {
+ if (sparx5->fdma_irq >= 0 && is_sparx5(sparx5)) {
if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
err = devm_request_threaded_irq(sparx5->dev,
sparx5->fdma_irq,
@@ -718,9 +805,10 @@ static int sparx5_start(struct sparx5 *sparx5)
sparx5->xtr_irq = -ENXIO;
}
- if (sparx5->ptp_irq >= 0) {
+ if (sparx5->ptp_irq >= 0 &&
+ sparx5_has_feature(sparx5, SPX5_FEATURE_PTP)) {
err = devm_request_threaded_irq(sparx5->dev, sparx5->ptp_irq,
- NULL, sparx5_ptp_irq_handler,
+ NULL, ops->ptp_irq_handler,
IRQF_ONESHOT, "sparx5-ptp",
sparx5);
if (err)
@@ -759,6 +847,12 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
sparx5->dev = &pdev->dev;
spin_lock_init(&sparx5->tx_lock);
+ sparx5->data = device_get_match_data(sparx5->dev);
+ if (!sparx5->data)
+ return -EINVAL;
+
+ regs = sparx5->data->regs;
+
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
if (IS_ERR(reset))
@@ -856,6 +950,9 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
sparx5->target_ct = (enum spx5_target_chiptype)
GCB_CHIP_ID_PART_ID_GET(sparx5->chip_id);
+ /* Initialize the features based on the target */
+ sparx5_init_features(sparx5);
+
/* Initialize Switchcore and internal RAMs */
err = sparx5_init_switchcore(sparx5);
if (err) {
@@ -937,15 +1034,75 @@ static void mchp_sparx5_remove(struct platform_device *pdev)
destroy_workqueue(sparx5->mact_queue);
}
+static const struct sparx5_regs sparx5_regs = {
+ .tsize = sparx5_tsize,
+ .gaddr = sparx5_gaddr,
+ .gcnt = sparx5_gcnt,
+ .gsize = sparx5_gsize,
+ .raddr = sparx5_raddr,
+ .rcnt = sparx5_rcnt,
+ .fpos = sparx5_fpos,
+ .fsize = sparx5_fsize,
+};
+
+static const struct sparx5_consts sparx5_consts = {
+ .n_ports = 65,
+ .n_ports_all = 70,
+ .n_hsch_l1_elems = 64,
+ .n_hsch_queues = 8,
+ .n_lb_groups = 10,
+ .n_pgids = 2113, /* (2048 + n_ports) */
+ .n_sio_clks = 3,
+ .n_own_upsids = 3,
+ .n_auto_cals = 7,
+ .n_filters = 1024,
+ .n_gates = 1024,
+ .n_sdlbs = 4096,
+ .n_dsm_cal_taxis = 8,
+ .buf_size = 4194280,
+ .qres_max_prio_idx = 630,
+ .qres_max_colour_idx = 638,
+ .tod_pin = 4,
+ .vcaps = sparx5_vcaps,
+ .vcaps_cfg = sparx5_vcap_inst_cfg,
+ .vcap_stats = &sparx5_vcap_stats,
+};
+
+static const struct sparx5_ops sparx5_ops = {
+ .is_port_2g5 = &sparx5_port_is_2g5,
+ .is_port_5g = &sparx5_port_is_5g,
+ .is_port_10g = &sparx5_port_is_10g,
+ .is_port_25g = &sparx5_port_is_25g,
+ .get_port_dev_index = &sparx5_port_dev_mapping,
+ .get_port_dev_bit = &sparx5_port_dev_mapping,
+ .get_hsch_max_group_rate = &sparx5_get_hsch_max_group_rate,
+ .get_sdlb_group = &sparx5_get_sdlb_group,
+ .set_port_mux = &sparx5_port_mux_set,
+ .ptp_irq_handler = &sparx5_ptp_irq_handler,
+ .dsm_calendar_calc = &sparx5_dsm_calendar_calc,
+};
+
+static const struct sparx5_match_data sparx5_desc = {
+ .iomap = sparx5_main_iomap,
+ .iomap_size = ARRAY_SIZE(sparx5_main_iomap),
+ .ioranges = 3,
+ .regs = &sparx5_regs,
+ .consts = &sparx5_consts,
+ .ops = &sparx5_ops,
+};
+
static const struct of_device_id mchp_sparx5_match[] = {
- { .compatible = "microchip,sparx5-switch" },
+ { .compatible = "microchip,sparx5-switch", .data = &sparx5_desc },
+#if IS_ENABLED(CONFIG_LAN969X_SWITCH)
+ { .compatible = "microchip,lan9691-switch", .data = &lan969x_desc },
+#endif
{ }
};
MODULE_DEVICE_TABLE(of, mchp_sparx5_match);
static struct platform_driver mchp_sparx5_driver = {
.probe = mchp_sparx5_probe,
- .remove_new = mchp_sparx5_remove,
+ .remove = mchp_sparx5_remove,
.driver = {
.name = "sparx5-switch",
.of_match_table = mchp_sparx5_match,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 3309060b1e4c..d5dd953b0a71 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -26,16 +26,28 @@
/* Target chip type */
enum spx5_target_chiptype {
- SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
- SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */
- SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */
- SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */
- SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */
- SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
- SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
- SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
- SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
- SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
+ SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
+ SPX5_TARGET_CT_7549 = 0x7549, /* SparX-5-90 Enterprise */
+ SPX5_TARGET_CT_7552 = 0x7552, /* SparX-5-128 Enterprise */
+ SPX5_TARGET_CT_7556 = 0x7556, /* SparX-5-160 Enterprise */
+ SPX5_TARGET_CT_7558 = 0x7558, /* SparX-5-200 Enterprise */
+ SPX5_TARGET_CT_7546TSN = 0x47546, /* SparX-5-64i Industrial */
+ SPX5_TARGET_CT_7549TSN = 0x47549, /* SparX-5-90i Industrial */
+ SPX5_TARGET_CT_7552TSN = 0x47552, /* SparX-5-128i Industrial */
+ SPX5_TARGET_CT_7556TSN = 0x47556, /* SparX-5-160i Industrial */
+ SPX5_TARGET_CT_7558TSN = 0x47558, /* SparX-5-200i Industrial */
+ SPX5_TARGET_CT_LAN9694 = 0x9694, /* lan969x-40 */
+ SPX5_TARGET_CT_LAN9691VAO = 0x9691, /* lan969x-40-VAO */
+ SPX5_TARGET_CT_LAN9694TSN = 0x9695, /* lan969x-40-TSN */
+ SPX5_TARGET_CT_LAN9694RED = 0x969A, /* lan969x-40-RED */
+ SPX5_TARGET_CT_LAN9696 = 0x9696, /* lan969x-60 */
+ SPX5_TARGET_CT_LAN9692VAO = 0x9692, /* lan969x-65-VAO */
+ SPX5_TARGET_CT_LAN9696TSN = 0x9697, /* lan969x-60-TSN */
+ SPX5_TARGET_CT_LAN9696RED = 0x969B, /* lan969x-60-RED */
+ SPX5_TARGET_CT_LAN9698 = 0x9698, /* lan969x-100 */
+ SPX5_TARGET_CT_LAN9693VAO = 0x9693, /* lan969x-100-VAO */
+ SPX5_TARGET_CT_LAN9698TSN = 0x9699, /* lan969x-100-TSN */
+ SPX5_TARGET_CT_LAN9698RED = 0x969C, /* lan969x-100-RED */
};
enum sparx5_port_max_tags {
@@ -51,25 +63,41 @@ enum sparx5_vlan_port_type {
SPX5_VLAN_PORT_TYPE_S_CUSTOM /* S-port using custom type */
};
+/* This is used in calendar configuration */
+enum sparx5_cal_bw {
+ SPX5_CAL_SPEED_NONE = 0,
+ SPX5_CAL_SPEED_1G = 1,
+ SPX5_CAL_SPEED_2G5 = 2,
+ SPX5_CAL_SPEED_5G = 3,
+ SPX5_CAL_SPEED_10G = 4,
+ SPX5_CAL_SPEED_25G = 5,
+ SPX5_CAL_SPEED_0G5 = 6,
+ SPX5_CAL_SPEED_12G5 = 7
+};
+
+enum sparx5_feature {
+ SPX5_FEATURE_PSFP = BIT(0),
+ SPX5_FEATURE_PTP = BIT(1),
+};
+
#define SPX5_PORTS 65
-#define SPX5_PORT_CPU (SPX5_PORTS) /* Next port is CPU port */
-#define SPX5_PORT_CPU_0 (SPX5_PORT_CPU + 0) /* CPU Port 65 */
-#define SPX5_PORT_CPU_1 (SPX5_PORT_CPU + 1) /* CPU Port 66 */
-#define SPX5_PORT_VD0 (SPX5_PORT_CPU + 2) /* VD0/Port 67 used for IPMC */
-#define SPX5_PORT_VD1 (SPX5_PORT_CPU + 3) /* VD1/Port 68 used for AFI/OAM */
-#define SPX5_PORT_VD2 (SPX5_PORT_CPU + 4) /* VD2/Port 69 used for IPinIP*/
-#define SPX5_PORTS_ALL (SPX5_PORT_CPU + 5) /* Total number of ports */
-
-#define PGID_BASE SPX5_PORTS /* Starts after port PGIDs */
-#define PGID_UC_FLOOD (PGID_BASE + 0)
-#define PGID_MC_FLOOD (PGID_BASE + 1)
-#define PGID_IPV4_MC_DATA (PGID_BASE + 2)
-#define PGID_IPV4_MC_CTRL (PGID_BASE + 3)
-#define PGID_IPV6_MC_DATA (PGID_BASE + 4)
-#define PGID_IPV6_MC_CTRL (PGID_BASE + 5)
-#define PGID_BCAST (PGID_BASE + 6)
-#define PGID_CPU (PGID_BASE + 7)
-#define PGID_MCAST_START (PGID_BASE + 8)
+#define SPX5_PORTS_ALL 70 /* Total number of ports */
+
+#define SPX5_PORT_CPU_0 0 /* CPU Port 0 */
+#define SPX5_PORT_CPU_1 1 /* CPU Port 1 */
+#define SPX5_PORT_VD0 2 /* VD0/Port used for IPMC */
+#define SPX5_PORT_VD1 3 /* VD1/Port used for AFI/OAM */
+#define SPX5_PORT_VD2 4 /* VD2/Port used for IPinIP*/
+
+#define PGID_UC_FLOOD 0
+#define PGID_MC_FLOOD 1
+#define PGID_IPV4_MC_DATA 2
+#define PGID_IPV4_MC_CTRL 3
+#define PGID_IPV6_MC_DATA 4
+#define PGID_IPV6_MC_CTRL 5
+#define PGID_BCAST 6
+#define PGID_CPU 7
+#define PGID_MCAST_START 8
#define PGID_TABLE_SIZE 3290
@@ -100,8 +128,27 @@ enum sparx5_vlan_port_type {
#define IFH_PDU_TYPE_IPV4_UDP_PTP 0x6
#define IFH_PDU_TYPE_IPV6_UDP_PTP 0x7
+#define SPX5_DSM_CAL_LEN 64
+#define SPX5_DSM_CAL_MAX_DEVS_PER_TAXI 13
+#define SPX5_DSM_CAL_EMPTY 0xFFFF
+
+#define SPARX5_MAX_PTP_ID 512
+
struct sparx5;
+struct sparx5_calendar_data {
+ u32 schedule[SPX5_DSM_CAL_LEN];
+ u32 avg_dist[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_ports[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 taxi_speeds[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 dev_slots[SPX5_DSM_CAL_MAX_DEVS_PER_TAXI];
+ u32 new_slots[SPX5_DSM_CAL_LEN];
+ u32 temp_sched[SPX5_DSM_CAL_LEN];
+ u32 indices[SPX5_DSM_CAL_LEN];
+ u32 short_list[SPX5_DSM_CAL_LEN];
+ u32 long_list[SPX5_DSM_CAL_LEN];
+};
+
/* Frame DMA receive state:
* For each DB, there is a SKB, and the skb data pointer is mapped in
* the DB. Once a frame is received the skb is given to the upper layers
@@ -177,6 +224,7 @@ struct sparx5_port {
enum sparx5_core_clockfreq {
SPX5_CORE_CLOCK_DEFAULT, /* Defaults to the highest supported frequency */
SPX5_CORE_CLOCK_250MHZ, /* 250MHZ core clock frequency */
+ SPX5_CORE_CLOCK_328MHZ, /* 328MHZ core clock frequency */
SPX5_CORE_CLOCK_500MHZ, /* 500MHZ core clock frequency */
SPX5_CORE_CLOCK_625MHZ, /* 625MHZ core clock frequency */
};
@@ -226,11 +274,78 @@ struct sparx5_mall_entry {
#define SPARX5_SKB_CB(skb) \
((struct sparx5_skb_cb *)((skb)->cb))
+struct sparx5_regs {
+ const unsigned int *tsize;
+ const unsigned int *gaddr;
+ const unsigned int *gcnt;
+ const unsigned int *gsize;
+ const unsigned int *raddr;
+ const unsigned int *rcnt;
+ const unsigned int *fpos;
+ const unsigned int *fsize;
+};
+
+struct sparx5_consts {
+ u32 n_ports; /* Number of front ports */
+ u32 n_ports_all; /* Number of front ports + internal ports */
+ u32 n_hsch_l1_elems; /* Number of HSCH layer 1 elements */
+ u32 n_hsch_queues; /* Number of HSCH queues */
+ u32 n_lb_groups; /* Number of leacky bucket groupd */
+ u32 n_pgids; /* Number of PGID's */
+ u32 n_sio_clks; /* Number of serial IO clocks */
+ u32 n_own_upsids; /* Number of own UPSID's */
+ u32 n_auto_cals; /* Number of auto calendars */
+ u32 n_filters; /* Number of PSFP filters */
+ u32 n_gates; /* Number of PSFP gates */
+ u32 n_sdlbs; /* Number of service dual leaky buckets */
+ u32 n_dsm_cal_taxis; /* Number of DSM calendar taxis */
+ u32 buf_size; /* Amount of QLIM watermark memory */
+ u32 qres_max_prio_idx; /* Maximum QRES prio index */
+ u32 qres_max_colour_idx; /* Maximum QRES colour index */
+ u32 tod_pin; /* PTP TOD pin */
+ const struct sparx5_vcap_inst *vcaps_cfg;
+ const struct vcap_info *vcaps;
+ const struct vcap_statistics *vcap_stats;
+};
+
+struct sparx5_ops {
+ bool (*is_port_2g5)(int portno);
+ bool (*is_port_5g)(int portno);
+ bool (*is_port_10g)(int portno);
+ bool (*is_port_25g)(int portno);
+ u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port);
+ u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port);
+ u32 (*get_hsch_max_group_rate)(int grp);
+ struct sparx5_sdlb_group *(*get_sdlb_group)(int idx);
+ int (*set_port_mux)(struct sparx5 *sparx5, struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+ irqreturn_t (*ptp_irq_handler)(int irq, void *args);
+ int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data);
+};
+
+struct sparx5_main_io_resource {
+ enum sparx5_target id;
+ phys_addr_t offset;
+ int range;
+};
+
+struct sparx5_match_data {
+ const struct sparx5_regs *regs;
+ const struct sparx5_consts *consts;
+ const struct sparx5_ops *ops;
+ const struct sparx5_main_io_resource *iomap;
+ int ioranges;
+ int iomap_size;
+};
+
struct sparx5 {
struct platform_device *pdev;
struct device *dev;
u32 chip_id;
enum spx5_target_chiptype target_ct;
+ u32 features;
void __iomem *regs[NUM_TARGETS];
int port_count;
struct mutex lock; /* MAC reg lock */
@@ -293,8 +408,13 @@ struct sparx5 {
struct list_head mall_entries;
/* Common root for debugfs */
struct dentry *debugfs_root;
+ const struct sparx5_match_data *data;
};
+/* sparx5_main.c */
+bool is_sparx5(struct sparx5 *sparx5);
+bool sparx5_has_feature(struct sparx5 *sparx5, enum sparx5_feature feature);
+
/* sparx5_switchdev.c */
int sparx5_register_notifier_blocks(struct sparx5 *sparx5);
void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5);
@@ -306,7 +426,7 @@ struct frame_info {
};
void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
-void sparx5_ifh_parse(u32 *ifh, struct frame_info *info);
+void sparx5_ifh_parse(struct sparx5 *sparx5, u32 *ifh, struct frame_info *info);
irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
int sparx5_manual_injection_mode(struct sparx5 *sparx5);
@@ -355,6 +475,11 @@ void sparx5_vlan_port_apply(struct sparx5 *sparx5, struct sparx5_port *port);
/* sparx5_calendar.c */
int sparx5_config_auto_calendar(struct sparx5 *sparx5);
int sparx5_config_dsm_calendar(struct sparx5 *sparx5);
+int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
+ struct sparx5_calendar_data *data);
+u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed);
+enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, u32 portno);
+
/* sparx5_ethtool.c */
void sparx5_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
@@ -371,11 +496,14 @@ static inline int sparx5_dcb_init(struct sparx5 *sparx5)
#endif
/* sparx5_netdev.c */
-void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp);
+void sparx5_set_port_ifh_timestamp(struct sparx5 *sparx5, void *ifh_hdr,
+ u64 timestamp);
void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op);
-void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type);
-void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset);
-void sparx5_set_port_ifh(void *ifh_hdr, u16 portno);
+void sparx5_set_port_ifh_pdu_type(struct sparx5 *sparx5, void *ifh_hdr,
+ u32 pdu_type);
+void sparx5_set_port_ifh_pdu_w16_offset(struct sparx5 *sparx5, void *ifh_hdr,
+ u32 pdu_w16_offset);
+void sparx5_set_port_ifh(struct sparx5 *sparx5, void *ifh_hdr, u16 portno);
bool sparx5_netdevice_check(const struct net_device *dev);
struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno);
int sparx5_register_netdevs(struct sparx5 *sparx5);
@@ -398,6 +526,9 @@ void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
struct sk_buff *skb);
irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
+ struct timespec64 *ts,
+ u32 nsec);
/* sparx5_vcap_impl.c */
int sparx5_vcap_init(struct sparx5 *sparx5);
@@ -413,6 +544,7 @@ enum sparx5_pgid_type {
void sparx5_pgid_init(struct sparx5 *spx5);
int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+int sparx5_get_pgid(struct sparx5 *sparx5, int pgid);
/* sparx5_pool.c */
struct sparx5_pool_entry {
@@ -426,6 +558,11 @@ int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id);
int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
u32 *id);
+/* sparx5_port.c */
+int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+int sparx5_get_internal_port(struct sparx5 *sparx5, int port);
+
/* sparx5_sdlb.c */
#define SPX5_SDLB_PUP_TOKEN_DISABLE 0x1FFF
#define SPX5_SDLB_PUP_TOKEN_MAX (SPX5_SDLB_PUP_TOKEN_DISABLE - 1)
@@ -444,10 +581,11 @@ struct sparx5_sdlb_group {
};
extern struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT];
+struct sparx5_sdlb_group *sparx5_get_sdlb_group(int idx);
int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval,
u64 rate);
-int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5);
+u64 sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5);
int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst);
int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group);
@@ -549,6 +687,8 @@ static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
switch (cclock) {
case SPX5_CORE_CLOCK_250MHZ:
return 4000;
+ case SPX5_CORE_CLOCK_328MHZ:
+ return 3048;
case SPX5_CORE_CLOCK_500MHZ:
return 2000;
case SPX5_CORE_CLOCK_625MHZ:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 22acc1f3380c..561344f19062 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0+
* Microchip Sparx5 Switch driver
*
- * Copyright (c) 2021 Microchip Technology Inc.
+ * Copyright (c) 2024 Microchip Technology Inc.
*/
-/* This file is autogenerated by cml-utils 2023-02-10 11:18:53 +0100.
- * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
+/* This file is autogenerated by cml-utils 2024-10-04 10:40:40 +0200.
+ * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b
*/
#ifndef _SPARX5_MAIN_REGS_H_
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <linux/bug.h>
+#include "sparx5_regs.h"
+
enum sparx5_target {
TARGET_ANA_AC = 1,
TARGET_ANA_ACL = 2,
@@ -52,14 +54,27 @@ enum sparx5_target {
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
- NUM_TARGETS = 332
+ NUM_TARGETS = 517
};
+/* sparx5_main.c
+ *
+ * This is used by the register macros to access chip differences (if any) in:
+ * target size, register address, register count, group address, group count,
+ * group size, field position and field size.
+ */
+extern const struct sparx5_regs *regs;
+
+/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
+#define spx5_field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#define spx5_field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
#define __REG(...) __VA_ARGS__
-/* ANA_AC:RAM_CTRL:RAM_INIT */
-#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC,\
- 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
+/* ANA_AC:RAM_CTRL:RAM_INIT */
+#define ANA_AC_RAM_INIT \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_RAM_CTRL], 0, 1, 4, 0,\
+ 0, 1, 4)
#define ANA_AC_RAM_INIT_RAM_INIT BIT(1)
#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\
@@ -73,9 +88,10 @@ enum sparx5_target {
#define ANA_AC_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
-/* ANA_AC:PS_COMMON:OWN_UPSID */
-#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC,\
- 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
+/* ANA_AC:PS_COMMON:OWN_UPSID */
+#define ANA_AC_OWN_UPSID(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PS_COMMON], 0, 1, 352,\
+ 52, r, regs->rcnt[RC_ANA_AC_OWN_UPSID], 4)
#define ANA_AC_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -83,75 +99,86 @@ enum sparx5_target {
#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
-/* ANA_AC:MIRROR_PROBE:PROBE_CFG */
-#define ANA_AC_PROBE_CFG(g) \
- __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 0, 0, 1, 4)
+/* ANA_AC:MIRROR_PROBE:PROBE_CFG */
+#define ANA_AC_PROBE_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \
+ 32, 0, 0, 1, 4)
-#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD GENMASK(31, 27)
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD GENMASK(31, 27)
#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_SET(x)\
FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x)
#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_GET(x)\
FIELD_GET(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x)
-#define ANA_AC_PROBE_CFG_PROBE_CPU_SET GENMASK(26, 19)
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET GENMASK(26, 19)
#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_SET(x)\
FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x)
#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_GET(x)\
FIELD_GET(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x)
-#define ANA_AC_PROBE_CFG_PROBE_VID GENMASK(18, 6)
+#define ANA_AC_PROBE_CFG_PROBE_VID GENMASK(18, 6)
#define ANA_AC_PROBE_CFG_PROBE_VID_SET(x)\
FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VID, x)
#define ANA_AC_PROBE_CFG_PROBE_VID_GET(x)\
FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VID, x)
-#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE GENMASK(5, 4)
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE GENMASK(5, 4)
#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_SET(x)\
FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x)
#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_GET(x)\
FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x)
-#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE GENMASK(3, 2)
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE GENMASK(3, 2)
#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_SET(x)\
FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x)
#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_GET(x)\
FIELD_GET(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x)
-#define ANA_AC_PROBE_CFG_PROBE_DIRECTION GENMASK(1, 0)
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION GENMASK(1, 0)
#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(x)\
FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x)
#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(x)\
FIELD_GET(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x)
-/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG */
-#define ANA_AC_PROBE_PORT_CFG(g) \
- __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 8, 0, 1, 4)
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG */
+#define ANA_AC_PROBE_PORT_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \
+ 32, 8, 0, 1, 4)
-/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG1 */
-#define ANA_AC_PROBE_PORT_CFG1(g) \
- __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 12, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG1 */
+#define ANA_AC_PROBE_PORT_CFG1(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \
+ 32, 12, 0, 1, 4)
-/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG2 */
-#define ANA_AC_PROBE_PORT_CFG2(g) \
- __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 16, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG2 */
+#define ANA_AC_PROBE_PORT_CFG2(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_MIRROR_PROBE], g, 3, \
+ 32, 16, 0, 1, 4)
-#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2 BIT(0)
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2 BIT(0)
#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_SET(x)\
FIELD_PREP(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x)
#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_GET(x)\
FIELD_GET(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x)
-/* ANA_AC:SRC:SRC_CFG */
-#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC,\
- 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
+/* ANA_AC:SRC:SRC_CFG */
+#define ANA_AC_SRC_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SRC], g, \
+ regs->gcnt[GC_ANA_AC_SRC], regs->gsize[GW_ANA_AC_SRC], 0, 0, 1, 4)
-/* ANA_AC:SRC:SRC_CFG1 */
-#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC,\
- 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_AC:SRC:SRC_CFG1 */
+#define ANA_AC_SRC_CFG1(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SRC], g, \
+ regs->gcnt[GC_ANA_AC_SRC], regs->gsize[GW_ANA_AC_SRC], 4, 0, 1, 4)
-/* ANA_AC:SRC:SRC_CFG2 */
-#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC,\
- 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_AC:SRC:SRC_CFG2 */
+#define ANA_AC_SRC_CFG2(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SRC], g, \
+ regs->gcnt[GC_ANA_AC_SRC], regs->gsize[GW_ANA_AC_SRC], 8, 0, 1, 4)
#define ANA_AC_SRC_CFG2_PORT_MASK2 BIT(0)
#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\
@@ -159,17 +186,22 @@ enum sparx5_target {
#define ANA_AC_SRC_CFG2_PORT_MASK2_GET(x)\
FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x)
-/* ANA_AC:PGID:PGID_CFG */
-#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC,\
- 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
+/* ANA_AC:PGID:PGID_CFG */
+#define ANA_AC_PGID_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \
+ regs->gcnt[GC_ANA_AC_PGID], 16, 0, 0, 1, 4)
-/* ANA_AC:PGID:PGID_CFG1 */
-#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC,\
- 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_AC:PGID:PGID_CFG1 */
+#define ANA_AC_PGID_CFG1(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \
+ regs->gcnt[GC_ANA_AC_PGID], 16, 4, 0, 1, 4)
-/* ANA_AC:PGID:PGID_CFG2 */
-#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC,\
- 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_AC:PGID:PGID_CFG2 */
+#define ANA_AC_PGID_CFG2(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \
+ regs->gcnt[GC_ANA_AC_PGID], 16, 8, 0, 1, 4)
#define ANA_AC_PGID_CFG2_PORT_MASK2 BIT(0)
#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\
@@ -177,9 +209,10 @@ enum sparx5_target {
#define ANA_AC_PGID_CFG2_PORT_MASK2_GET(x)\
FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x)
-/* ANA_AC:PGID:PGID_MISC_CFG */
-#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC,\
- 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
+/* ANA_AC:PGID:PGID_MISC_CFG */
+#define ANA_AC_PGID_MISC_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_PGID], g, \
+ regs->gcnt[GC_ANA_AC_PGID], 16, 12, 0, 1, 4)
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU GENMASK(6, 4)
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\
@@ -199,9 +232,10 @@ enum sparx5_target {
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\
FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
-/* ANA_AC:TSN_SF:TSN_SF */
-#define ANA_AC_TSN_SF __REG(TARGET_ANA_AC,\
- 0, 1, 839136, 0, 1, 4, 0, 0, 1, 4)
+/* ANA_AC:TSN_SF:TSN_SF */
+#define ANA_AC_TSN_SF \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_TSN_SF], 0, 1, 4, 0, \
+ 0, 1, 4)
#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY BIT(9)
#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_SET(x)\
@@ -209,21 +243,24 @@ enum sparx5_target {
#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_GET(x)\
FIELD_GET(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
-#define ANA_AC_TSN_SF_PORT_NUM GENMASK(8, 0)
+#define ANA_AC_TSN_SF_PORT_NUM\
+ GENMASK(regs->fsize[FW_ANA_AC_TSN_SF_PORT_NUM] + 0 - 1, 0)
#define ANA_AC_TSN_SF_PORT_NUM_SET(x)\
- FIELD_PREP(ANA_AC_TSN_SF_PORT_NUM, x)
+ spx5_field_prep(ANA_AC_TSN_SF_PORT_NUM, x)
#define ANA_AC_TSN_SF_PORT_NUM_GET(x)\
- FIELD_GET(ANA_AC_TSN_SF_PORT_NUM, x)
+ spx5_field_get(ANA_AC_TSN_SF_PORT_NUM, x)
-/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */
-#define ANA_AC_TSN_SF_CFG(g) __REG(TARGET_ANA_AC,\
- 0, 1, 839680, g, 1024, 4, 0, 0, 1, 4)
+/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */
+#define ANA_AC_TSN_SF_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_TSN_SF_CFG], g, \
+ regs->gcnt[GC_ANA_AC_TSN_SF_CFG], 4, 0, 0, 1, 4)
-#define ANA_AC_TSN_SF_CFG_TSN_SGID GENMASK(25, 16)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID\
+ GENMASK(regs->fsize[FW_ANA_AC_TSN_SF_CFG_TSN_SGID] + 16 - 1, 16)
#define ANA_AC_TSN_SF_CFG_TSN_SGID_SET(x)\
- FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+ spx5_field_prep(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
#define ANA_AC_TSN_SF_CFG_TSN_SGID_GET(x)\
- FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+ spx5_field_get(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU GENMASK(15, 2)
#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(x)\
@@ -243,9 +280,10 @@ enum sparx5_target {
#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_GET(x)\
FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
-/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */
-#define ANA_AC_TSN_SF_STATUS __REG(TARGET_ANA_AC,\
- 0, 1, 839072, 0, 1, 16, 0, 0, 1, 4)
+/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */
+#define ANA_AC_TSN_SF_STATUS \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_TSN_SF_STATUS], 0, 1, \
+ 16, 0, 0, 1, 4)
#define ANA_AC_TSN_SF_STATUS_FRM_LEN GENMASK(25, 12)
#define ANA_AC_TSN_SF_STATUS_FRM_LEN_SET(x)\
@@ -259,11 +297,12 @@ enum sparx5_target {
#define ANA_AC_TSN_SF_STATUS_DLB_DROP_GET(x)\
FIELD_GET(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
-#define ANA_AC_TSN_SF_STATUS_TSN_SFID GENMASK(10, 1)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID\
+ GENMASK(regs->fsize[FW_ANA_AC_TSN_SF_STATUS_TSN_SFID] + 1 - 1, 1)
#define ANA_AC_TSN_SF_STATUS_TSN_SFID_SET(x)\
- FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+ spx5_field_prep(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
#define ANA_AC_TSN_SF_STATUS_TSN_SFID_GET(x)\
- FIELD_GET(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+ spx5_field_get(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD BIT(0)
#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_SET(x)\
@@ -271,15 +310,17 @@ enum sparx5_target {
#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_GET(x)\
FIELD_GET(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
-/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */
-#define ANA_AC_SG_ACCESS_CTRL __REG(TARGET_ANA_AC,\
- 0, 1, 839140, 0, 1, 12, 0, 0, 1, 4)
+/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */
+#define ANA_AC_SG_ACCESS_CTRL \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_ACCESS], 0, 1, 12, \
+ 0, 0, 1, 4)
-#define ANA_AC_SG_ACCESS_CTRL_SGID GENMASK(9, 0)
+#define ANA_AC_SG_ACCESS_CTRL_SGID\
+ GENMASK(regs->fsize[FW_ANA_AC_SG_ACCESS_CTRL_SGID] + 0 - 1, 0)
#define ANA_AC_SG_ACCESS_CTRL_SGID_SET(x)\
- FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+ spx5_field_prep(ANA_AC_SG_ACCESS_CTRL_SGID, x)
#define ANA_AC_SG_ACCESS_CTRL_SGID_GET(x)\
- FIELD_GET(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+ spx5_field_get(ANA_AC_SG_ACCESS_CTRL_SGID, x)
#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(x)\
@@ -287,9 +328,10 @@ enum sparx5_target {
#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(x)\
FIELD_GET(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
-/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */
-#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD __REG(TARGET_ANA_AC,\
- 0, 1, 839140, 0, 1, 12, 8, 0, 1, 4)
+/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_ACCESS], 0, 1, 12, \
+ 8, 0, 1, 4)
#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS GENMASK(15, 0)
#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_SET(x)\
@@ -303,17 +345,20 @@ enum sparx5_target {
#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_GET(x)\
FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */
-#define ANA_AC_SG_CONFIG_REG_1 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 48, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */
+#define ANA_AC_SG_CONFIG_REG_1 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 48, 0, 1, 4)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */
-#define ANA_AC_SG_CONFIG_REG_2 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 52, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */
+#define ANA_AC_SG_CONFIG_REG_2 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 52, 0, 1, 4)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */
-#define ANA_AC_SG_CONFIG_REG_3 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 56, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */
+#define ANA_AC_SG_CONFIG_REG_3 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 56, 0, 1, 4)
#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB GENMASK(15, 0)
#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(x)\
@@ -369,17 +414,20 @@ enum sparx5_target {
#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_GET(x)\
FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */
-#define ANA_AC_SG_CONFIG_REG_4 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 60, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */
+#define ANA_AC_SG_CONFIG_REG_4 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 60, 0, 1, 4)
-/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */
-#define ANA_AC_SG_CONFIG_REG_5 __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 64, 0, 1, 4)
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */
+#define ANA_AC_SG_CONFIG_REG_5 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 64, 0, 1, 4)
-/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */
-#define ANA_AC_SG_GCL_GS_CONFIG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 0, r, 4, 4)
+/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */
+#define ANA_AC_SG_GCL_GS_CONFIG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 0, r, 4, 4)
#define ANA_AC_SG_GCL_GS_CONFIG_IPS GENMASK(3, 0)
#define ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(x)\
@@ -393,25 +441,30 @@ enum sparx5_target {
#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_GET(x)\
FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
-/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */
-#define ANA_AC_SG_GCL_TI_CONFIG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 16, r, 4, 4)
+/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */
+#define ANA_AC_SG_GCL_TI_CONFIG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 16, r, 4, 4)
-/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */
-#define ANA_AC_SG_GCL_OCT_CONFIG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 851584, 0, 1, 128, 32, r, 4, 4)
+/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */
+#define ANA_AC_SG_GCL_OCT_CONFIG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_CONFIG], 0, 1, 128,\
+ 32, r, 4, 4)
-/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */
-#define ANA_AC_SG_STATUS_REG_1 __REG(TARGET_ANA_AC,\
- 0, 1, 839088, 0, 1, 16, 0, 0, 1, 4)
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */
+#define ANA_AC_SG_STATUS_REG_1 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \
+ 0, 0, 1, 4)
-/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */
-#define ANA_AC_SG_STATUS_REG_2 __REG(TARGET_ANA_AC,\
- 0, 1, 839088, 0, 1, 16, 4, 0, 1, 4)
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */
+#define ANA_AC_SG_STATUS_REG_2 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \
+ 4, 0, 1, 4)
-/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */
-#define ANA_AC_SG_STATUS_REG_3 __REG(TARGET_ANA_AC,\
- 0, 1, 839088, 0, 1, 16, 8, 0, 1, 4)
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */
+#define ANA_AC_SG_STATUS_REG_3 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \
+ 8, 0, 1, 4)
#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB GENMASK(15, 0)
#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_SET(x)\
@@ -443,23 +496,27 @@ enum sparx5_target {
#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_GET(x)\
FIELD_GET(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
-/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */
-#define ANA_AC_SG_STATUS_REG_4 __REG(TARGET_ANA_AC,\
- 0, 1, 839088, 0, 1, 16, 12, 0, 1, 4)
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */
+#define ANA_AC_SG_STATUS_REG_4 \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_SG_STATUS], 0, 1, 16, \
+ 12, 0, 1, 4)
-/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
-#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_PORT_SGE_CFG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_PORT],\
+ 0, 1, 20, 0, r, 4, 4)
-#define ANA_AC_PORT_SGE_CFG_MASK GENMASK(15, 0)
+#define ANA_AC_PORT_SGE_CFG_MASK\
+ GENMASK(regs->fsize[FW_ANA_AC_PORT_SGE_CFG_MASK] + 0 - 1, 0)
#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\
- FIELD_PREP(ANA_AC_PORT_SGE_CFG_MASK, x)
+ spx5_field_prep(ANA_AC_PORT_SGE_CFG_MASK, x)
#define ANA_AC_PORT_SGE_CFG_MASK_GET(x)\
- FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x)
+ spx5_field_get(ANA_AC_PORT_SGE_CFG_MASK, x)
-/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
-#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC,\
- 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
+#define ANA_AC_STAT_RESET \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_PORT],\
+ 0, 1, 20, 16, 0, 1, 4)
#define ANA_AC_STAT_RESET_RESET BIT(0)
#define ANA_AC_STAT_RESET_RESET_SET(x)\
@@ -467,9 +524,10 @@ enum sparx5_target {
#define ANA_AC_STAT_RESET_RESET_GET(x)\
FIELD_GET(ANA_AC_STAT_RESET_RESET, x)
-/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
-#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC,\
- 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
+/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
+#define ANA_AC_PORT_STAT_CFG(g, r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_CNT_CFG_PORT], g,\
+ regs->gcnt[GC_ANA_AC_STAT_CNT_CFG_PORT], 64, 4, r, 4, 4)
#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK GENMASK(11, 4)
#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\
@@ -489,13 +547,15 @@ enum sparx5_target {
#define ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE_GET(x)\
FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
-/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
-#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC,\
- 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
+#define ANA_AC_PORT_STAT_LSB_CNT(g, r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_CNT_CFG_PORT], g,\
+ regs->gcnt[GC_ANA_AC_STAT_CNT_CFG_PORT], 64, 20, r, 4, 4)
-/* ANA_AC:STAT_GLOBAL_CFG_ACL:GLOBAL_CNT_FRM_TYPE_CFG */
-#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 893792, 0, 1, 24, 0, r, 2, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:GLOBAL_CNT_FRM_TYPE_CFG */
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_ACL], \
+ 0, 1, 24, 0, r, 2, 4)
#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE GENMASK(2, 0)
#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_SET(x)\
@@ -503,9 +563,10 @@ enum sparx5_target {
#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_GET(x)\
FIELD_GET(ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE, x)
-/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_CFG */
-#define ANA_AC_ACL_STAT_GLOBAL_CFG(r) __REG(TARGET_ANA_AC,\
- 0, 1, 893792, 0, 1, 24, 8, r, 2, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_CFG */
+#define ANA_AC_ACL_STAT_GLOBAL_CFG(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_ACL], \
+ 0, 1, 24, 8, r, 2, 4)
#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE BIT(0)
#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_SET(x)\
@@ -513,9 +574,10 @@ enum sparx5_target {
#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_GET(x)\
FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE, x)
-/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_EVENT_MASK */
-#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK(r) __REG(TARGET_ANA_AC,\
- 0, 1, 893792, 0, 1, 24, 16, r, 2, 4)
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK(r) \
+ __REG(TARGET_ANA_AC, 0, 1, regs->gaddr[GA_ANA_AC_STAT_GLOBAL_CFG_ACL], \
+ 0, 1, 24, 16, r, 2, 4)
#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK GENMASK(3, 0)
#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_SET(x)\
@@ -523,9 +585,10 @@ enum sparx5_target {
#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_GET(x)\
FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK, x)
-/* ANA_ACL:COMMON:VCAP_S2_CFG */
-#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 0, r, 70, 4)
+/* ANA_ACL:COMMON:VCAP_S2_CFG */
+#define ANA_ACL_VCAP_S2_CFG(r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 0, r, regs->rcnt[RC_ANA_ACL_VCAP_S2_CFG], 4)
#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA BIT(28)
#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_SET(x)\
@@ -611,9 +674,10 @@ enum sparx5_target {
#define ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(x)\
FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x)
-/* ANA_ACL:COMMON:SWAP_IP_CTRL */
-#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4)
+/* ANA_ACL:COMMON:SWAP_IP_CTRL */
+#define ANA_ACL_SWAP_IP_CTRL \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 412, 0, 1, 4)
#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL GENMASK(23, 18)
#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_SET(x)\
@@ -645,9 +709,10 @@ enum sparx5_target {
#define ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA_GET(x)\
FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x)
-/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */
-#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 424, r, 4, 4)
+/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */
+#define ANA_ACL_VCAP_S2_RLEG_STAT(r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 424, r, 4, 4)
#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK GENMASK(12, 6)
#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_SET(x)\
@@ -661,9 +726,10 @@ enum sparx5_target {
#define ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK_GET(x)\
FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x)
-/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */
-#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4)
+/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 440, 0, 1, 4)
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN GENMASK(9, 5)
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_SET(x)\
@@ -683,9 +749,10 @@ enum sparx5_target {
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES_GET(x)\
FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x)
-/* ANA_ACL:COMMON:OWN_UPSID */
-#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL,\
- 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
+/* ANA_ACL:COMMON:OWN_UPSID */
+#define ANA_ACL_OWN_UPSID(r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_COMMON], 0, 1, 592, \
+ 580, r, regs->rcnt[RC_ANA_ACL_OWN_UPSID], 4)
#define ANA_ACL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -693,9 +760,10 @@ enum sparx5_target {
#define ANA_ACL_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
-/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */
-#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL,\
- 0, 1, 34200, g, 134, 16, 0, r, 4, 4)
+/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */
+#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_KEY_SEL], g, \
+ regs->gcnt[GC_ANA_ACL_KEY_SEL], 16, 0, r, 4, 4)
#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA BIT(13)
#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(x)\
@@ -745,17 +813,20 @@ enum sparx5_target {
#define ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(x)\
FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x)
-/* ANA_ACL:CNT_A:CNT_A */
-#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL,\
- 0, 1, 0, g, 4096, 4, 0, 0, 1, 4)
+/* ANA_ACL:CNT_A:CNT_A */
+#define ANA_ACL_CNT_A(g) \
+ __REG(TARGET_ANA_ACL, 0, 1, 0, g, regs->gcnt[GC_ANA_ACL_CNT_A], 4, 0, \
+ 0, 1, 4)
-/* ANA_ACL:CNT_B:CNT_B */
-#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL,\
- 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4)
+/* ANA_ACL:CNT_B:CNT_B */
+#define ANA_ACL_CNT_B(g) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_CNT_B], g, \
+ regs->gcnt[GC_ANA_ACL_CNT_B], 4, 0, 0, 1, 4)
-/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */
-#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL,\
- 0, 1, 36408, 0, 1, 16, 0, r, 4, 4)
+/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */
+#define ANA_ACL_SEC_LOOKUP_STICKY(r) \
+ __REG(TARGET_ANA_ACL, 0, 1, regs->gaddr[GA_ANA_ACL_STICKY], 0, 1, 16, \
+ 0, r, 4, 4)
#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY BIT(17)
#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_SET(x)\
@@ -865,9 +936,10 @@ enum sparx5_target {
#define ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
-/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
-#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL,\
- 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
+/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
+#define ANA_AC_POL_POL_UPD_INT_CFG \
+ __REG(TARGET_ANA_AC_POL, 0, 1, regs->gaddr[GA_ANA_AC_POL_POL_ALL_CFG], \
+ 0, 1, 1160, 1148, 0, 1, 4)
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT GENMASK(9, 0)
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\
@@ -875,9 +947,10 @@ enum sparx5_target {
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_GET(x)\
FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
-/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
-#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
- 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
+/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
+#define ANA_AC_POL_BDLB_DLB_CTRL \
+ __REG(TARGET_ANA_AC_POL, 0, 1, regs->gaddr[GA_ANA_AC_POL_COMMON_BDLB], \
+ 0, 1, 8, 0, 0, 1, 4)
#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
@@ -903,9 +976,10 @@ enum sparx5_target {
#define ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
-/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
-#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
- 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
+/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
+#define ANA_AC_POL_SLB_DLB_CTRL \
+ __REG(TARGET_ANA_AC_POL, 0, 1, \
+ regs->gaddr[GA_ANA_AC_POL_COMMON_BUM_SLB], 0, 1, 20, 0, 0, 1, 4)
#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
@@ -931,19 +1005,22 @@ enum sparx5_target {
#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
-/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */
-#define ANA_AC_SDLB_XLB_START(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 0, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */
+#define ANA_AC_SDLB_XLB_START(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 0, 0, 1, 4)
-#define ANA_AC_SDLB_XLB_START_LBSET_START GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_START_LBSET_START\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_XLB_START_LBSET_START] + 0 - 1, 0)
#define ANA_AC_SDLB_XLB_START_LBSET_START_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+ spx5_field_prep(ANA_AC_SDLB_XLB_START_LBSET_START, x)
#define ANA_AC_SDLB_XLB_START_LBSET_START_GET(x)\
- FIELD_GET(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+ spx5_field_get(ANA_AC_SDLB_XLB_START_LBSET_START, x)
-/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */
-#define ANA_AC_SDLB_PUP_INTERVAL(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 4, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */
+#define ANA_AC_SDLB_PUP_INTERVAL(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 4, 0, 1, 4)
#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL GENMASK(19, 0)
#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(x)\
@@ -951,9 +1028,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_GET(x)\
FIELD_GET(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
-/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */
-#define ANA_AC_SDLB_PUP_CTRL(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 8, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */
+#define ANA_AC_SDLB_PUP_CTRL(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 8, 0, 1, 4)
#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT GENMASK(18, 0)
#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_SET(x)\
@@ -967,19 +1045,22 @@ enum sparx5_target {
#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(x)\
FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
-/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */
-#define ANA_AC_SDLB_LBGRP_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 12, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */
+#define ANA_AC_SDLB_LBGRP_MISC(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 12, 0, 1, 4)
-#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT GENMASK(12, 8)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT] + 8 - 1, 8)
#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+ spx5_field_prep(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_GET(x)\
- FIELD_GET(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+ spx5_field_get(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
-/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */
-#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 16, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */
+#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 16, 0, 1, 4)
#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS GENMASK(12, 0)
#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(x)\
@@ -987,9 +1068,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_GET(x)\
FIELD_GET(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
-/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */
-#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 295468, g, 10, 24, 20, 0, 1, 4)
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */
+#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, regs->gaddr[GA_ANA_AC_SDLB_LBGRP_TBL], \
+ g, regs->gcnt[GC_ANA_AC_SDLB_LBGRP_TBL], 24, 20, 0, 1, 4)
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING BIT(0)
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_SET(x)\
@@ -1003,15 +1085,17 @@ enum sparx5_target {
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_GET(x)\
FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
-#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT GENMASK(28, 16)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT] + 16 - 1, 16)
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+ spx5_field_prep(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_GET(x)\
- FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+ spx5_field_get(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
-/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */
-#define ANA_AC_SDLB_PUP_TOKENS(g, r) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 0, r, 2, 4)
+/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */
+#define ANA_AC_SDLB_PUP_TOKENS(g, r) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 0, r, 2, 4)
#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS GENMASK(12, 0)
#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(x)\
@@ -1019,9 +1103,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_GET(x)\
FIELD_GET(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
-/* ANA_AC_SDLB:LBSET_TBL:THRES */
-#define ANA_AC_SDLB_THRES(g, r) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 8, r, 2, 4)
+/* ANA_AC_SDLB:LBSET_TBL:THRES */
+#define ANA_AC_SDLB_THRES(g, r) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 8, r, 2, 4)
#define ANA_AC_SDLB_THRES_THRES GENMASK(9, 0)
#define ANA_AC_SDLB_THRES_THRES_SET(x)\
@@ -1035,25 +1120,29 @@ enum sparx5_target {
#define ANA_AC_SDLB_THRES_THRES_HYS_GET(x)\
FIELD_GET(ANA_AC_SDLB_THRES_THRES_HYS, x)
-/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */
-#define ANA_AC_SDLB_XLB_NEXT(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 16, 0, 1, 4)
+/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */
+#define ANA_AC_SDLB_XLB_NEXT(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 16, 0, 1, 4)
-#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT] + 0 - 1, 0)
#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+ spx5_field_prep(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(x)\
- FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+ spx5_field_get(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
-#define ANA_AC_SDLB_XLB_NEXT_LBGRP GENMASK(27, 24)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_XLB_NEXT_LBGRP] + 24 - 1, 24)
#define ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+ spx5_field_prep(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
#define ANA_AC_SDLB_XLB_NEXT_LBGRP_GET(x)\
- FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+ spx5_field_get(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
-/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */
-#define ANA_AC_SDLB_INH_CTRL(g, r) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 20, r, 2, 4)
+/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */
+#define ANA_AC_SDLB_INH_CTRL(g, r) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 20, r, 2, 4)
#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX GENMASK(12, 0)
#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(x)\
@@ -1073,19 +1162,22 @@ enum sparx5_target {
#define ANA_AC_SDLB_INH_CTRL_INH_LB_GET(x)\
FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
-/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */
-#define ANA_AC_SDLB_INH_LBSET_ADDR(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 28, 0, 1, 4)
+/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */
+#define ANA_AC_SDLB_INH_LBSET_ADDR(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 28, 0, 1, 4)
-#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR\
+ GENMASK(regs->fsize[FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR] + 0 - 1, 0)
#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_SET(x)\
- FIELD_PREP(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+ spx5_field_prep(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_GET(x)\
- FIELD_GET(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+ spx5_field_get(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
-/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */
-#define ANA_AC_SDLB_DLB_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 32, 0, 1, 4)
+/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */
+#define ANA_AC_SDLB_DLB_MISC(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 32, 0, 1, 4)
#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA BIT(0)
#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_SET(x)\
@@ -1105,9 +1197,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_GET(x)\
FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
-/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */
-#define ANA_AC_SDLB_DLB_CFG(g) __REG(TARGET_ANA_AC_SDLB,\
- 0, 1, 0, g, 4616, 64, 36, 0, 1, 4)
+/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */
+#define ANA_AC_SDLB_DLB_CFG(g) \
+ __REG(TARGET_ANA_AC_SDLB, 0, 1, 0, g, \
+ regs->gcnt[GC_ANA_AC_SDLB_LBSET_TBL], 64, 36, 0, 1, 4)
#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA BIT(11)
#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_SET(x)\
@@ -1157,9 +1250,10 @@ enum sparx5_target {
#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_GET(x)\
FIELD_GET(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
-/* ANA_CL:PORT:FILTER_CTRL */
-#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
+/* ANA_CL:PORT:FILTER_CTRL */
+#define ANA_CL_FILTER_CTRL(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 4, 0, 1, 4)
#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS BIT(2)
#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\
@@ -1179,9 +1273,10 @@ enum sparx5_target {
#define ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA_GET(x)\
FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
-/* ANA_CL:PORT:VLAN_FILTER_CTRL */
-#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
+/* ANA_CL:PORT:VLAN_FILTER_CTRL */
+#define ANA_CL_VLAN_FILTER_CTRL(g, r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 8, r, 3, 4)
#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10)
#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\
@@ -1249,9 +1344,10 @@ enum sparx5_target {
#define ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS_GET(x)\
FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
-/* ANA_CL:PORT:ETAG_FILTER_CTRL */
-#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
+/* ANA_CL:PORT:ETAG_FILTER_CTRL */
+#define ANA_CL_ETAG_FILTER_CTRL(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 20, 0, 1, 4)
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1)
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\
@@ -1265,9 +1361,10 @@ enum sparx5_target {
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS_GET(x)\
FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
-/* ANA_CL:PORT:VLAN_CTRL */
-#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
+/* ANA_CL:PORT:VLAN_CTRL */
+#define ANA_CL_VLAN_CTRL(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 32, 0, 1, 4)
#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26)
#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\
@@ -1335,9 +1432,10 @@ enum sparx5_target {
#define ANA_CL_VLAN_CTRL_PORT_VID_GET(x)\
FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x)
-/* ANA_CL:PORT:VLAN_CTRL_2 */
-#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
+/* ANA_CL:PORT:VLAN_CTRL_2 */
+#define ANA_CL_VLAN_CTRL_2(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 36, 0, 1, 4)
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT GENMASK(1, 0)
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\
@@ -1345,9 +1443,10 @@ enum sparx5_target {
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_GET(x)\
FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
-/* ANA_CL:PORT:PCP_DEI_MAP_CFG */
-#define ANA_CL_PCP_DEI_MAP_CFG(g, r) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 108, r, 16, 4)
+/* ANA_CL:PORT:PCP_DEI_MAP_CFG */
+#define ANA_CL_PCP_DEI_MAP_CFG(g, r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 108, r, 16, 4)
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL GENMASK(4, 3)
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(x)\
@@ -1361,9 +1460,10 @@ enum sparx5_target {
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_GET(x)\
FIELD_GET(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL, x)
-/* ANA_CL:PORT:QOS_CFG */
-#define ANA_CL_QOS_CFG(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 172, 0, 1, 4)
+/* ANA_CL:PORT:QOS_CFG */
+#define ANA_CL_QOS_CFG(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 172, 0, 1, 4)
#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA BIT(17)
#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA_SET(x)\
@@ -1437,13 +1537,15 @@ enum sparx5_target {
#define ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_GET(x)\
FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL, x)
-/* ANA_CL:PORT:CAPTURE_BPDU_CFG */
-#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+/* ANA_CL:PORT:CAPTURE_BPDU_CFG */
+#define ANA_CL_CAPTURE_BPDU_CFG(g) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 196, 0, 1, 4)
-/* ANA_CL:PORT:ADV_CL_CFG_2 */
-#define ANA_CL_ADV_CL_CFG_2(g, r) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 200, r, 6, 4)
+/* ANA_CL:PORT:ADV_CL_CFG_2 */
+#define ANA_CL_ADV_CL_CFG_2(g, r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 200, r, 6, 4)
#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA BIT(1)
#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA_SET(x)\
@@ -1457,9 +1559,10 @@ enum sparx5_target {
#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA_GET(x)\
FIELD_GET(ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA, x)
-/* ANA_CL:PORT:ADV_CL_CFG */
-#define ANA_CL_ADV_CL_CFG(g, r) __REG(TARGET_ANA_CL,\
- 0, 1, 131072, g, 70, 512, 224, r, 6, 4)
+/* ANA_CL:PORT:ADV_CL_CFG */
+#define ANA_CL_ADV_CL_CFG(g, r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_PORT], g, \
+ regs->gcnt[GC_ANA_CL_PORT], 512, 224, r, 6, 4)
#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL GENMASK(30, 26)
#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(x)\
@@ -1503,9 +1606,10 @@ enum sparx5_target {
#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA_GET(x)\
FIELD_GET(ANA_CL_ADV_CL_CFG_LOOKUP_ENA, x)
-/* ANA_CL:COMMON:OWN_UPSID */
-#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL,\
- 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
+/* ANA_CL:COMMON:OWN_UPSID */
+#define ANA_CL_OWN_UPSID(r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_COMMON], 0, 1, 756, 0,\
+ r, regs->rcnt[RC_ANA_CL_OWN_UPSID], 4)
#define ANA_CL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -1513,9 +1617,10 @@ enum sparx5_target {
#define ANA_CL_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x)
-/* ANA_CL:COMMON:DSCP_CFG */
-#define ANA_CL_DSCP_CFG(r) __REG(TARGET_ANA_CL,\
- 0, 1, 166912, 0, 1, 756, 256, r, 64, 4)
+/* ANA_CL:COMMON:DSCP_CFG */
+#define ANA_CL_DSCP_CFG(r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_COMMON], 0, 1, 756, \
+ 256, r, 64, 4)
#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL GENMASK(12, 7)
#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL_SET(x)\
@@ -1547,9 +1652,10 @@ enum sparx5_target {
#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\
FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x)
-/* ANA_CL:COMMON:QOS_MAP_CFG */
-#define ANA_CL_QOS_MAP_CFG(r) __REG(TARGET_ANA_CL,\
- 0, 1, 166912, 0, 1, 756, 512, r, 32, 4)
+/* ANA_CL:COMMON:QOS_MAP_CFG */
+#define ANA_CL_QOS_MAP_CFG(r) \
+ __REG(TARGET_ANA_CL, 0, 1, regs->gaddr[GA_ANA_CL_COMMON], 0, 1, 756, \
+ 512, r, 32, 4)
#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL GENMASK(9, 4)
#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(x)\
@@ -1557,9 +1663,10 @@ enum sparx5_target {
#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_GET(x)\
FIELD_GET(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x)
-/* ANA_L2:COMMON:FWD_CFG */
-#define ANA_L2_FWD_CFG __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 0, 0, 1, 4)
+/* ANA_L2:COMMON:FWD_CFG */
+#define ANA_L2_FWD_CFG \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 0, 0, 1, 4)
#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL GENMASK(21, 20)
#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_SET(x)\
@@ -1633,17 +1740,22 @@ enum sparx5_target {
#define ANA_L2_FWD_CFG_FWD_ENA_GET(x)\
FIELD_GET(ANA_L2_FWD_CFG_FWD_ENA, x)
-/* ANA_L2:COMMON:AUTO_LRN_CFG */
-#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
+/* ANA_L2:COMMON:AUTO_LRN_CFG */
+#define ANA_L2_AUTO_LRN_CFG \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 24, 0, 1, 4)
-/* ANA_L2:COMMON:AUTO_LRN_CFG1 */
-#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_L2:COMMON:AUTO_LRN_CFG1 */
+#define ANA_L2_AUTO_LRN_CFG1 \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 28, 0, 1, 4)
-/* ANA_L2:COMMON:AUTO_LRN_CFG2 */
-#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_L2:COMMON:AUTO_LRN_CFG2 */
+#define ANA_L2_AUTO_LRN_CFG2 \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 32, 0, 1, 4)
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2 BIT(0)
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\
@@ -1651,9 +1763,11 @@ enum sparx5_target {
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_GET(x)\
FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
-/* ANA_L2:COMMON:OWN_UPSID */
-#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2,\
- 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
+/* ANA_L2:COMMON:OWN_UPSID */
+#define ANA_L2_OWN_UPSID(r) \
+ __REG(TARGET_ANA_L2, 0, 1, regs->gaddr[GA_ANA_L2_COMMON], 0, 1, \
+ regs->gsize[GW_ANA_L2_COMMON], 672, r, \
+ regs->rcnt[RC_ANA_L2_OWN_UPSID], 4)
#define ANA_L2_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -1661,29 +1775,34 @@ enum sparx5_target {
#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x)
-/* ANA_L2:ISDX:DLB_CFG */
-#define ANA_L2_DLB_CFG(g) __REG(TARGET_ANA_L2,\
- 0, 1, 0, g, 4096, 128, 56, 0, 1, 4)
+/* ANA_L2:ISDX:DLB_CFG */
+#define ANA_L2_DLB_CFG(g) \
+ __REG(TARGET_ANA_L2, 0, 1, 0, g, regs->gcnt[GC_ANA_L2_ISDX], 128, 56, \
+ 0, 1, 4)
-#define ANA_L2_DLB_CFG_DLB_IDX GENMASK(12, 0)
+#define ANA_L2_DLB_CFG_DLB_IDX\
+ GENMASK(regs->fsize[FW_ANA_L2_DLB_CFG_DLB_IDX] + 0 - 1, 0)
#define ANA_L2_DLB_CFG_DLB_IDX_SET(x)\
- FIELD_PREP(ANA_L2_DLB_CFG_DLB_IDX, x)
+ spx5_field_prep(ANA_L2_DLB_CFG_DLB_IDX, x)
#define ANA_L2_DLB_CFG_DLB_IDX_GET(x)\
- FIELD_GET(ANA_L2_DLB_CFG_DLB_IDX, x)
+ spx5_field_get(ANA_L2_DLB_CFG_DLB_IDX, x)
-/* ANA_L2:ISDX:TSN_CFG */
-#define ANA_L2_TSN_CFG(g) __REG(TARGET_ANA_L2,\
- 0, 1, 0, g, 4096, 128, 100, 0, 1, 4)
+/* ANA_L2:ISDX:TSN_CFG */
+#define ANA_L2_TSN_CFG(g) \
+ __REG(TARGET_ANA_L2, 0, 1, 0, g, regs->gcnt[GC_ANA_L2_ISDX], 128, 100, \
+ 0, 1, 4)
-#define ANA_L2_TSN_CFG_TSN_SFID GENMASK(9, 0)
+#define ANA_L2_TSN_CFG_TSN_SFID\
+ GENMASK(regs->fsize[FW_ANA_L2_TSN_CFG_TSN_SFID] + 0 - 1, 0)
#define ANA_L2_TSN_CFG_TSN_SFID_SET(x)\
- FIELD_PREP(ANA_L2_TSN_CFG_TSN_SFID, x)
+ spx5_field_prep(ANA_L2_TSN_CFG_TSN_SFID, x)
#define ANA_L2_TSN_CFG_TSN_SFID_GET(x)\
- FIELD_GET(ANA_L2_TSN_CFG_TSN_SFID, x)
+ spx5_field_get(ANA_L2_TSN_CFG_TSN_SFID, x)
-/* ANA_L3:COMMON:VLAN_CTRL */
-#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3,\
- 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
+/* ANA_L3:COMMON:VLAN_CTRL */
+#define ANA_L3_VLAN_CTRL \
+ __REG(TARGET_ANA_L3, 0, 1, regs->gaddr[GA_ANA_L3_COMMON], 0, 1, 184, 4,\
+ 0, 1, 4)
#define ANA_L3_VLAN_CTRL_VLAN_ENA BIT(0)
#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\
@@ -1691,9 +1810,10 @@ enum sparx5_target {
#define ANA_L3_VLAN_CTRL_VLAN_ENA_GET(x)\
FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
-/* ANA_L3:VLAN:VLAN_CFG */
-#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3,\
- 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
+/* ANA_L3:VLAN:VLAN_CFG */
+#define ANA_L3_VLAN_CFG(g) \
+ __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 8, 0, \
+ 1, 4)
#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR GENMASK(30, 24)
#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\
@@ -1749,17 +1869,22 @@ enum sparx5_target {
#define ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA_GET(x)\
FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
-/* ANA_L3:VLAN:VLAN_MASK_CFG */
-#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3,\
- 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
+/* ANA_L3:VLAN:VLAN_MASK_CFG */
+#define ANA_L3_VLAN_MASK_CFG(g) \
+ __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 16, 0,\
+ 1, 4)
-/* ANA_L3:VLAN:VLAN_MASK_CFG1 */
-#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3,\
- 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_L3:VLAN:VLAN_MASK_CFG1 */
+#define ANA_L3_VLAN_MASK_CFG1(g) \
+ __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 20, 0,\
+ 1, 4)
-/* ANA_L3:VLAN:VLAN_MASK_CFG2 */
-#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3,\
- 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
+/* SPARX5 ONLY */
+/* ANA_L3:VLAN:VLAN_MASK_CFG2 */
+#define ANA_L3_VLAN_MASK_CFG2(g) \
+ __REG(TARGET_ANA_L3, 0, 1, 0, g, regs->gcnt[GC_ANA_L3_VLAN], 64, 24, 0,\
+ 1, 4)
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2 BIT(0)
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\
@@ -1767,365 +1892,455 @@ enum sparx5_target {
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_GET(x)\
FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
-/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
-#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
-#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */
-#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
-#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
-#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
-#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_UC_CNT */
-#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_MC_CNT */
-#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_BC_CNT */
-#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
-#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
-#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
-#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
-#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
-#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */
-#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */
-#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
-#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
-#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
-#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
-#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
-#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
-#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
-#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
-#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */
-#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
-#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_UC_CNT */
-#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_MC_CNT */
-#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_BC_CNT */
-#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */
-#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
-#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
-#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
-#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
-#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
-#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
-#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
-#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
-#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
-#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
-#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
-#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
-#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
-#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
-#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
-#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
-#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
-#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
-#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
-#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
-#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
-#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
-#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
-#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
-#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
-#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
-#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
-#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
-#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
-#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
-#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
-#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
-#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
-#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
-#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
-#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
-#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
-#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
-#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
-#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
-#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
-#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
-#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
-#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
-#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
-#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
-#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
-#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
-#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
-#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
-#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
-#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
-#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */
-#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_DEFER_CNT */
-#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */
-#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
-#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */
-#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
-
-/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
-#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
+/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
+#define ASM_RX_IN_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 0, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
+#define ASM_RX_SYMBOL_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 4, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */
+#define ASM_RX_PAUSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 8, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
+#define ASM_RX_UNSUP_OPCODE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 12, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
+#define ASM_RX_OK_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 16, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
+#define ASM_RX_BAD_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 20, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UC_CNT */
+#define ASM_RX_UC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 24, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_MC_CNT */
+#define ASM_RX_MC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 28, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_BC_CNT */
+#define ASM_RX_BC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 32, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
+#define ASM_RX_CRC_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 36, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
+#define ASM_RX_UNDERSIZE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 40, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
+#define ASM_RX_FRAGMENTS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 44, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 48, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 52, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
+#define ASM_RX_OVERSIZE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 56, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */
+#define ASM_RX_JABBERS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 60, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */
+#define ASM_RX_SIZE64_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 64, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
+#define ASM_RX_SIZE65TO127_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 68, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
+#define ASM_RX_SIZE128TO255_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 72, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
+#define ASM_RX_SIZE256TO511_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 76, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
+#define ASM_RX_SIZE512TO1023_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 80, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
+#define ASM_RX_SIZE1024TO1518_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 84, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
+#define ASM_RX_SIZE1519TOMAX_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 88, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
+#define ASM_RX_IPG_SHRINK_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 92, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
+#define ASM_TX_OUT_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 96, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */
+#define ASM_TX_PAUSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 100, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
+#define ASM_TX_OK_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 104, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_UC_CNT */
+#define ASM_TX_UC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 108, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_MC_CNT */
+#define ASM_TX_MC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 112, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_BC_CNT */
+#define ASM_TX_BC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 116, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */
+#define ASM_TX_SIZE64_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 120, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
+#define ASM_TX_SIZE65TO127_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 124, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
+#define ASM_TX_SIZE128TO255_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 128, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
+#define ASM_TX_SIZE256TO511_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 132, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
+#define ASM_TX_SIZE512TO1023_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 136, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
+#define ASM_TX_SIZE1024TO1518_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 140, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
+#define ASM_TX_SIZE1519TOMAX_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 144, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
+#define ASM_RX_ALIGNMENT_LOST_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 148, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
+#define ASM_RX_TAGGED_FRMS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 152, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
+#define ASM_RX_UNTAGGED_FRMS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 156, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
+#define ASM_TX_TAGGED_FRMS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 160, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
+#define ASM_TX_UNTAGGED_FRMS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 164, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
+#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 168, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
+#define ASM_PMAC_RX_PAUSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 172, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
+#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 176, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
+#define ASM_PMAC_RX_OK_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 180, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 184, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
+#define ASM_PMAC_RX_UC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 188, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
+#define ASM_PMAC_RX_MC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 192, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
+#define ASM_PMAC_RX_BC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 196, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
+#define ASM_PMAC_RX_CRC_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 200, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
+#define ASM_PMAC_RX_UNDERSIZE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 204, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
+#define ASM_PMAC_RX_FRAGMENTS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 208, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 212, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 216, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
+#define ASM_PMAC_RX_OVERSIZE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 220, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
+#define ASM_PMAC_RX_JABBERS_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 224, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
+#define ASM_PMAC_RX_SIZE64_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 228, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
+#define ASM_PMAC_RX_SIZE65TO127_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 232, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
+#define ASM_PMAC_RX_SIZE128TO255_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 236, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
+#define ASM_PMAC_RX_SIZE256TO511_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 240, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
+#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 244, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 248, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 252, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
+#define ASM_PMAC_TX_PAUSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 256, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
+#define ASM_PMAC_TX_OK_BYTES_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 260, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
+#define ASM_PMAC_TX_UC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 264, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
+#define ASM_PMAC_TX_MC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 268, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
+#define ASM_PMAC_TX_BC_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 272, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
+#define ASM_PMAC_TX_SIZE64_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 276, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
+#define ASM_PMAC_TX_SIZE65TO127_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 280, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
+#define ASM_PMAC_TX_SIZE128TO255_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 284, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
+#define ASM_PMAC_TX_SIZE256TO511_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 288, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
+#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 292, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
+#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 296, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
+#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 300, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 304, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
+#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 308, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
+#define ASM_MM_RX_SMD_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 312, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
+#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 316, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
+#define ASM_MM_RX_MERGE_FRAG_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 320, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
+#define ASM_MM_TX_PFRAGMENT_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 324, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
+#define ASM_TX_MULTI_COLL_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 328, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
+#define ASM_TX_LATE_COLL_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 332, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */
+#define ASM_TX_XCOLL_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 336, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_DEFER_CNT */
+#define ASM_TX_DEFER_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 340, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */
+#define ASM_TX_XDEFER_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 344, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
+#define ASM_TX_BACKOFF1_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 348, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */
+#define ASM_TX_CSENSE_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 352, 0, 1, 4)
+
+/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
+#define ASM_RX_IN_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 356, 0, 1, 4)
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
@@ -2133,9 +2348,10 @@ enum sparx5_target {
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
-#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
+/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
+#define ASM_RX_OK_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 360, 0, 1, 4)
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2143,9 +2359,10 @@ enum sparx5_target {
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
-#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
+/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 364, 0, 1, 4)
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2153,9 +2370,10 @@ enum sparx5_target {
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
-#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
+/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
+#define ASM_RX_BAD_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 368, 0, 1, 4)
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -2163,9 +2381,10 @@ enum sparx5_target {
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
-#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
+/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 372, 0, 1, 4)
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -2173,9 +2392,10 @@ enum sparx5_target {
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
-#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
+/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
+#define ASM_TX_OUT_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 376, 0, 1, 4)
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
@@ -2183,9 +2403,10 @@ enum sparx5_target {
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
-#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
+/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
+#define ASM_TX_OK_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 380, 0, 1, 4)
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2193,9 +2414,10 @@ enum sparx5_target {
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
-#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
+/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 384, 0, 1, 4)
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2203,13 +2425,15 @@ enum sparx5_target {
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
-/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
-#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM,\
- 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
+/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
+#define ASM_RX_SYNC_LOST_ERR_CNT(g) \
+ __REG(TARGET_ASM, 0, 1, 0, g, regs->gcnt[GC_ASM_DEV_STATISTICS], 512, \
+ 388, 0, 1, 4)
-/* ASM:CFG:STAT_CFG */
-#define ASM_STAT_CFG __REG(TARGET_ASM,\
- 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
+/* ASM:CFG:STAT_CFG */
+#define ASM_STAT_CFG \
+ __REG(TARGET_ASM, 0, 1, regs->gaddr[GA_ASM_CFG], 0, 1, \
+ regs->gsize[GW_ASM_CFG], 0, 0, 1, 4)
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT BIT(0)
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\
@@ -2217,9 +2441,10 @@ enum sparx5_target {
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_GET(x)\
FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
-/* ASM:CFG:PORT_CFG */
-#define ASM_PORT_CFG(r) __REG(TARGET_ASM,\
- 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
+/* ASM:CFG:PORT_CFG */
+#define ASM_PORT_CFG(r) \
+ __REG(TARGET_ASM, 0, 1, regs->gaddr[GA_ASM_CFG], 0, 1, \
+ regs->gsize[GW_ASM_CFG], 540, r, regs->rcnt[RC_ASM_PORT_CFG], 4)
#define ASM_PORT_CFG_CSC_STAT_DIS BIT(12)
#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\
@@ -2287,9 +2512,10 @@ enum sparx5_target {
#define ASM_PORT_CFG_PFRM_FLUSH_GET(x)\
FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x)
-/* ASM:RAM_CTRL:RAM_INIT */
-#define ASM_RAM_INIT __REG(TARGET_ASM,\
- 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
+/* ASM:RAM_CTRL:RAM_INIT */
+#define ASM_RAM_INIT \
+ __REG(TARGET_ASM, 0, 1, regs->gaddr[GA_ASM_RAM_CTRL], 0, 1, 4, 0, 0, 1,\
+ 4)
#define ASM_RAM_INIT_RAM_INIT BIT(1)
#define ASM_RAM_INIT_RAM_INIT_SET(x)\
@@ -2303,9 +2529,10 @@ enum sparx5_target {
#define ASM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x)
-/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
-#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN,\
- 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
+#define CLKGEN_LCPLL1_CORE_CLK_CFG \
+ __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV GENMASK(7, 0)
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\
@@ -2343,91 +2570,144 @@ enum sparx5_target {
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA_GET(x)\
FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
-/* CPU:CPU_REGS:PROC_CTRL */
-#define CPU_PROC_CTRL __REG(TARGET_CPU,\
- 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
+/* CPU:CPU_REGS:PROC_CTRL */
+#define CPU_PROC_CTRL \
+ __REG(TARGET_CPU, 0, 1, 0, 0, 1, regs->gsize[GW_CPU_CPU_REGS], \
+ regs->raddr[RA_CPU_PROC_CTRL], 0, 1, 4)
-#define CPU_PROC_CTRL_AARCH64_MODE_ENA BIT(12)
+#define CPU_PROC_CTRL_AARCH64_MODE_ENA\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_AARCH64_MODE_ENA])
#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+ spx5_field_prep(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
#define CPU_PROC_CTRL_AARCH64_MODE_ENA_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
+ spx5_field_get(CPU_PROC_CTRL_AARCH64_MODE_ENA, x)
-#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS BIT(11)
+#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS])
#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+ spx5_field_prep(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
#define CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
+ spx5_field_get(CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS, x)
-#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS BIT(10)
+#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS])
#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+ spx5_field_prep(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
#define CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
+ spx5_field_get(CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS, x)
-#define CPU_PROC_CTRL_BE_EXCEP_MODE BIT(9)
+#define CPU_PROC_CTRL_BE_EXCEP_MODE\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_BE_EXCEP_MODE])
#define CPU_PROC_CTRL_BE_EXCEP_MODE_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+ spx5_field_prep(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
#define CPU_PROC_CTRL_BE_EXCEP_MODE_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
+ spx5_field_get(CPU_PROC_CTRL_BE_EXCEP_MODE, x)
-#define CPU_PROC_CTRL_VINITHI BIT(8)
+#define CPU_PROC_CTRL_VINITHI\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_VINITHI])
#define CPU_PROC_CTRL_VINITHI_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_VINITHI, x)
+ spx5_field_prep(CPU_PROC_CTRL_VINITHI, x)
#define CPU_PROC_CTRL_VINITHI_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_VINITHI, x)
+ spx5_field_get(CPU_PROC_CTRL_VINITHI, x)
-#define CPU_PROC_CTRL_CFGTE BIT(7)
+#define CPU_PROC_CTRL_CFGTE\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_CFGTE])
#define CPU_PROC_CTRL_CFGTE_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_CFGTE, x)
+ spx5_field_prep(CPU_PROC_CTRL_CFGTE, x)
#define CPU_PROC_CTRL_CFGTE_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_CFGTE, x)
+ spx5_field_get(CPU_PROC_CTRL_CFGTE, x)
-#define CPU_PROC_CTRL_CP15S_DISABLE BIT(6)
+#define CPU_PROC_CTRL_CP15S_DISABLE\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_CP15S_DISABLE])
#define CPU_PROC_CTRL_CP15S_DISABLE_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_CP15S_DISABLE, x)
+ spx5_field_prep(CPU_PROC_CTRL_CP15S_DISABLE, x)
#define CPU_PROC_CTRL_CP15S_DISABLE_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_CP15S_DISABLE, x)
+ spx5_field_get(CPU_PROC_CTRL_CP15S_DISABLE, x)
-#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE BIT(5)
+#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE])
#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+ spx5_field_prep(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
#define CPU_PROC_CTRL_PROC_CRYPTO_DISABLE_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+ spx5_field_get(CPU_PROC_CTRL_PROC_CRYPTO_DISABLE, x)
+/* SPARX5 ONLY */
#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA BIT(4)
#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(x)\
FIELD_PREP(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
#define CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_GET(x)\
FIELD_GET(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA, x)
+/* SPARX5 ONLY */
#define CPU_PROC_CTRL_ACP_AWCACHE BIT(3)
#define CPU_PROC_CTRL_ACP_AWCACHE_SET(x)\
FIELD_PREP(CPU_PROC_CTRL_ACP_AWCACHE, x)
#define CPU_PROC_CTRL_ACP_AWCACHE_GET(x)\
FIELD_GET(CPU_PROC_CTRL_ACP_AWCACHE, x)
+/* SPARX5 ONLY */
#define CPU_PROC_CTRL_ACP_ARCACHE BIT(2)
#define CPU_PROC_CTRL_ACP_ARCACHE_SET(x)\
FIELD_PREP(CPU_PROC_CTRL_ACP_ARCACHE, x)
#define CPU_PROC_CTRL_ACP_ARCACHE_GET(x)\
FIELD_GET(CPU_PROC_CTRL_ACP_ARCACHE, x)
-#define CPU_PROC_CTRL_L2_FLUSH_REQ BIT(1)
+#define CPU_PROC_CTRL_L2_FLUSH_REQ\
+ BIT(regs->fpos[FP_CPU_PROC_CTRL_L2_FLUSH_REQ])
#define CPU_PROC_CTRL_L2_FLUSH_REQ_SET(x)\
- FIELD_PREP(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+ spx5_field_prep(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
#define CPU_PROC_CTRL_L2_FLUSH_REQ_GET(x)\
- FIELD_GET(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+ spx5_field_get(CPU_PROC_CTRL_L2_FLUSH_REQ, x)
+/* SPARX5 ONLY */
#define CPU_PROC_CTRL_ACP_DISABLE BIT(0)
#define CPU_PROC_CTRL_ACP_DISABLE_SET(x)\
FIELD_PREP(CPU_PROC_CTRL_ACP_DISABLE, x)
#define CPU_PROC_CTRL_ACP_DISABLE_GET(x)\
FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
+/* DEV1G:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define DEV2G5_PHAD_CTRL(t, g) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 200, g, 2, \
+ regs->gsize[GW_DEV2G5_PHASE_DETECTOR_CTRL], 0, 0, 1, 4)
+
+#define DEV2G5_PHAD_CTRL_PHAD_ENA\
+ BIT(regs->fpos[FP_DEV2G5_PHAD_CTRL_PHAD_ENA])
+#define DEV2G5_PHAD_CTRL_PHAD_ENA_SET(x)\
+ spx5_field_prep(DEV2G5_PHAD_CTRL_PHAD_ENA, x)
+#define DEV2G5_PHAD_CTRL_PHAD_ENA_GET(x)\
+ spx5_field_get(DEV2G5_PHAD_CTRL_PHAD_ENA, x)
+
+/* LAN969X ONLY */
+#define DEV2G5_PHAD_CTRL_DIV_CFG GENMASK(11, 9)
+#define DEV2G5_PHAD_CTRL_DIV_CFG_SET(x)\
+ FIELD_PREP(DEV2G5_PHAD_CTRL_DIV_CFG, x)
+#define DEV2G5_PHAD_CTRL_DIV_CFG_GET(x)\
+ FIELD_GET(DEV2G5_PHAD_CTRL_DIV_CFG, x)
+
+/* DEV1G:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define DEV2G5_PHAD_CTRL(t, g) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 200, g, 2, \
+ regs->gsize[GW_DEV2G5_PHASE_DETECTOR_CTRL], 0, 0, 1, 4)
+
+#define DEV2G5_PHAD_CTRL_PHAD_ENA\
+ BIT(regs->fpos[FP_DEV2G5_PHAD_CTRL_PHAD_ENA])
+#define DEV2G5_PHAD_CTRL_PHAD_ENA_SET(x)\
+ spx5_field_prep(DEV2G5_PHAD_CTRL_PHAD_ENA, x)
+#define DEV2G5_PHAD_CTRL_PHAD_ENA_GET(x)\
+ spx5_field_get(DEV2G5_PHAD_CTRL_PHAD_ENA, x)
+
+/* LAN969X ONLY */
+#define DEV2G5_PHAD_CTRL_DIV_CFG GENMASK(11, 9)
+#define DEV2G5_PHAD_CTRL_DIV_CFG_SET(x)\
+ FIELD_PREP(DEV2G5_PHAD_CTRL_DIV_CFG, x)
+#define DEV2G5_PHAD_CTRL_DIV_CFG_GET(x)\
+ FIELD_GET(DEV2G5_PHAD_CTRL_DIV_CFG, x)
+
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV10G_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 0, 0, 1, \
+ 4)
#define DEV10G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -2441,9 +2721,10 @@ enum sparx5_target {
#define DEV10G_MAC_ENA_CFG_TX_ENA_GET(x)\
FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV10G_MAC_MAXLEN_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 8, 0, 1, \
+ 4)
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -2457,9 +2738,10 @@ enum sparx5_target {
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
-#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
+#define DEV10G_MAC_NUM_TAGS_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 12, 0, 1, \
+ 4)
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS GENMASK(1, 0)
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\
@@ -2467,9 +2749,10 @@ enum sparx5_target {
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_GET(x)\
FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
-#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 16, r, 3, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV10G_MAC_TAGS_CFG(t, r) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 16, r, 3, \
+ 4)
#define DEV10G_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\
@@ -2483,9 +2766,10 @@ enum sparx5_target {
#define DEV10G_MAC_TAGS_CFG_TAG_ENA_GET(x)\
FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV10G_MAC_ADV_CHK_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 28, 0, 1, \
+ 4)
#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -2529,9 +2813,10 @@ enum sparx5_target {
#define DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
-#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G,\
- t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
+#define DEV10G_MAC_TX_MONITOR_STICKY(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 0, 0, 1, 60, 48, 0, 1, \
+ 4)
#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4)
#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\
@@ -2563,9 +2848,10 @@ enum sparx5_target {
#define DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY_GET(x)\
FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
-/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G,\
- t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV10G_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 436, 0, 1, 52, 0, 0, 1,\
+ 4)
#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -2621,9 +2907,15 @@ enum sparx5_target {
#define DEV10G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
-/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
-#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G,\
- t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
+/* DEV10G:DEV_CFG_STATUS:PTP_STAMPER_CFG */
+#define DEV10G_PTP_STAMPER_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 436, 0, 1, 52, 20, 0, \
+ 1, 4)
+
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV10G_PCS25G_CFG(t) \
+ __REG(TARGET_DEV10G, t, regs->tsize[TC_DEV10G], 488, 0, 1, 32, 0, 0, 1,\
+ 4)
#define DEV10G_PCS25G_CFG_PCS25G_ENA BIT(0)
#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\
@@ -2631,9 +2923,10 @@ enum sparx5_target {
#define DEV10G_PCS25G_CFG_PCS25G_ENA_GET(x)\
FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV25G_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV25G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -2647,9 +2940,10 @@ enum sparx5_target {
#define DEV25G_MAC_ENA_CFG_TX_ENA_GET(x)\
FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV25G_MAC_MAXLEN_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -2663,9 +2957,10 @@ enum sparx5_target {
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV25G_MAC_ADV_CHK_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -2709,9 +3004,10 @@ enum sparx5_target {
#define DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
-/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G,\
- t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV25G_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -2767,9 +3063,10 @@ enum sparx5_target {
#define DEV25G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
-/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
-#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
+#define DEV25G_PCS25G_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
#define DEV25G_PCS25G_CFG_PCS25G_ENA BIT(0)
#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\
@@ -2777,9 +3074,10 @@ enum sparx5_target {
#define DEV25G_PCS25G_CFG_PCS25G_ENA_GET(x)\
FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
-/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
-#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G,\
- t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
+#define DEV25G_PCS25G_SD_CFG(t) \
+ __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
#define DEV25G_PCS25G_SD_CFG_SD_SEL BIT(8)
#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\
@@ -2799,9 +3097,10 @@ enum sparx5_target {
#define DEV25G_PCS25G_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
-/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5,\
- t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV2G5_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 0, 0, 1, 36, 0, 0, 1, \
+ 4)
#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23)
#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
@@ -2851,9 +3150,10 @@ enum sparx5_target {
#define DEV2G5_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV2G5_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 0, 0, 1, \
+ 4)
#define DEV2G5_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -2867,9 +3167,10 @@ enum sparx5_target {
#define DEV2G5_MAC_ENA_CFG_TX_ENA_GET(x)\
FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
-#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV2G5_MAC_MODE_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 4, 0, 1, \
+ 4)
#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\
@@ -2889,9 +3190,10 @@ enum sparx5_target {
#define DEV2G5_MAC_MODE_CFG_FDX_ENA_GET(x)\
FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV2G5_MAC_MAXLEN_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 8, 0, 1, \
+ 4)
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
@@ -2899,9 +3201,10 @@ enum sparx5_target {
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
-#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEV2G5_MAC_TAGS_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 12, 0, 1,\
+ 4)
#define DEV2G5_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\
@@ -2927,9 +3230,10 @@ enum sparx5_target {
#define DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
-#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
+#define DEV2G5_MAC_TAGS_CFG2(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 16, 0, 1,\
+ 4)
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3 GENMASK(31, 16)
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\
@@ -2943,9 +3247,10 @@ enum sparx5_target {
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID2_GET(x)\
FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV2G5_MAC_ADV_CHK_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 20, 0, 1,\
+ 4)
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\
@@ -2953,9 +3258,10 @@ enum sparx5_target {
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_GET(x)\
FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
-#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV2G5_MAC_IFG_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 24, 0, 1,\
+ 4)
#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\
@@ -2981,9 +3287,10 @@ enum sparx5_target {
#define DEV2G5_MAC_IFG_CFG_RX_IFG1_GET(x)\
FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
-/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
-#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
+/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV2G5_MAC_HDX_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 52, 0, 1, 36, 28, 0, 1,\
+ 4)
#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\
@@ -3015,9 +3322,10 @@ enum sparx5_target {
#define DEV2G5_MAC_HDX_CFG_LATE_COL_POS_GET(x)\
FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
-#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV2G5_PCS1G_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 0, 0, 1, \
+ 4)
#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\
@@ -3037,9 +3345,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_CFG_PCS_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
-#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV2G5_PCS1G_MODE_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 4, 0, 1, \
+ 4)
#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\
@@ -3059,9 +3368,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
-#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV2G5_PCS1G_SD_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 8, 0, 1, \
+ 4)
#define DEV2G5_PCS1G_SD_CFG_SD_SEL BIT(8)
#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\
@@ -3081,9 +3391,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
-#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV2G5_PCS1G_ANEG_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 12, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
@@ -3109,9 +3420,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
-#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
+#define DEV2G5_PCS1G_LB_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 20, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_LB_CFG_RA_ENA BIT(4)
#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\
@@ -3131,9 +3443,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
-#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV2G5_PCS1G_ANEG_STATUS(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 32, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY GENMASK(31, 16)
#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\
@@ -3159,9 +3472,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
-#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV2G5_PCS1G_LINK_STATUS(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 40, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR GENMASK(15, 12)
#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\
@@ -3187,9 +3501,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
-/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
-#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5,\
- t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
+/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV2G5_PCS1G_STICKY(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 88, 0, 1, 68, 48, 0, 1,\
+ 4)
#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
@@ -3203,9 +3518,10 @@ enum sparx5_target {
#define DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY_GET(x)\
FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
-/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
-#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5,\
- t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
+/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
+#define DEV2G5_PCS_FX100_CFG(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 164, 0, 1, 4, 0, 0, 1, \
+ 4)
#define DEV2G5_PCS_FX100_CFG_SD_SEL BIT(26)
#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\
@@ -3285,9 +3601,10 @@ enum sparx5_target {
#define DEV2G5_PCS_FX100_CFG_PCS_ENA_GET(x)\
FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
-/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
-#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5,\
- t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
+/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
+#define DEV2G5_PCS_FX100_STATUS(t) \
+ __REG(TARGET_DEV2G5, t, regs->tsize[TC_DEV2G5], 168, 0, 1, 4, 0, 0, 1, \
+ 4)
#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP GENMASK(11, 8)
#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\
@@ -3337,9 +3654,9 @@ enum sparx5_target {
#define DEV2G5_PCS_FX100_STATUS_SYNC_STATUS_GET(x)\
FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G,\
- t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV5G_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV5G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -3353,9 +3670,9 @@ enum sparx5_target {
#define DEV5G_MAC_ENA_CFG_TX_ENA_GET(x)\
FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G,\
- t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV5G_MAC_MAXLEN_CFG(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -3369,9 +3686,10 @@ enum sparx5_target {
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
-/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G,\
- t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
+/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
+#define DEV5G_MAC_ADV_CHK_CFG(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 0, 0, 1, 60, 28, 0, 1, \
+ 4)
#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -3415,325 +3733,405 @@ enum sparx5_target {
#define DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA_GET(x)\
FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
-#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
-#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
-#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
-#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
-#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
-#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
-#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
-#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
-#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
-#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
-#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
-#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
-#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
-#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
-#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
-#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
-#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
-#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
-#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
-#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
-#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
-#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
-#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
-#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
-#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
-#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
-#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
-#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
-#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
-#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
-#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
-#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
-#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
-#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
-#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
-#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
-#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
-#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
-#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
-#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
-#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
-#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
-#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
-#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
-#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
-#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 184, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 188, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
-#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
-#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
-#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
-#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
-#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
-#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
-#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
-#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
-#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
-#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
-#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
-#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
-#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
-#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
-#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
-#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
-#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
-#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
-#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
-#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
-#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
-#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
-#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
-#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
-#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
-#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
-#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
-#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
-#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
-#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
-#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
-
-/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
-#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
+#define DEV5G_RX_SYMBOL_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 0, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
+#define DEV5G_RX_PAUSE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 4, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
+#define DEV5G_RX_UNSUP_OPCODE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 8, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
+#define DEV5G_RX_UC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 12, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
+#define DEV5G_RX_MC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 16, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
+#define DEV5G_RX_BC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 20, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
+#define DEV5G_RX_CRC_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 24, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
+#define DEV5G_RX_UNDERSIZE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 28, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
+#define DEV5G_RX_FRAGMENTS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 32, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 36, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 40, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
+#define DEV5G_RX_OVERSIZE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 44, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
+#define DEV5G_RX_JABBERS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 48, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
+#define DEV5G_RX_SIZE64_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 52, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
+#define DEV5G_RX_SIZE65TO127_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 56, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
+#define DEV5G_RX_SIZE128TO255_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 60, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
+#define DEV5G_RX_SIZE256TO511_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 64, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
+#define DEV5G_RX_SIZE512TO1023_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 68, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
+#define DEV5G_RX_SIZE1024TO1518_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 72, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
+#define DEV5G_RX_SIZE1519TOMAX_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 76, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
+#define DEV5G_RX_IPG_SHRINK_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 80, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
+#define DEV5G_TX_PAUSE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 84, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
+#define DEV5G_TX_UC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 88, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
+#define DEV5G_TX_MC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 92, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
+#define DEV5G_TX_BC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 96, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
+#define DEV5G_TX_SIZE64_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 100, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
+#define DEV5G_TX_SIZE65TO127_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 104, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
+#define DEV5G_TX_SIZE128TO255_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 108, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
+#define DEV5G_TX_SIZE256TO511_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 112, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
+#define DEV5G_TX_SIZE512TO1023_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 116, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
+#define DEV5G_TX_SIZE1024TO1518_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 120, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
+#define DEV5G_TX_SIZE1519TOMAX_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 124, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 128, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
+#define DEV5G_RX_TAGGED_FRMS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 132, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
+#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 136, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
+#define DEV5G_TX_TAGGED_FRMS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 140, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
+#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 144, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
+#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 148, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
+#define DEV5G_PMAC_RX_PAUSE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 152, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
+#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 156, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
+#define DEV5G_PMAC_RX_UC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 160, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
+#define DEV5G_PMAC_RX_MC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 164, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
+#define DEV5G_PMAC_RX_BC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 168, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
+#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 172, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
+#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 176, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
+#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 180, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 184, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
+#define DEV5G_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 188, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
+#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 192, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
+#define DEV5G_PMAC_RX_JABBERS_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 196, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
+#define DEV5G_PMAC_RX_SIZE64_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 200, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 204, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 208, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 212, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 216, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 220, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 224, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
+#define DEV5G_PMAC_TX_PAUSE_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 228, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
+#define DEV5G_PMAC_TX_UC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 232, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
+#define DEV5G_PMAC_TX_MC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 236, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
+#define DEV5G_PMAC_TX_BC_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 240, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
+#define DEV5G_PMAC_TX_SIZE64_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 244, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
+#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 248, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
+#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 252, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
+#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 256, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
+#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 260, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
+#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 264, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
+#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 268, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
+#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 272, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 276, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
+#define DEV5G_MM_RX_SMD_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 280, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
+#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 284, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
+#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 288, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
+#define DEV5G_MM_TX_PFRAGMENT_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 292, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 296, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 300, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
+#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 304, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
+#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 60, 0, 1, 312, 308, 0, 1,\
+ 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
+#define DEV5G_RX_IN_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 0, 0, 1, \
+ 4)
+
+/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
+#define DEV5G_RX_IN_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 4, 0, 1, \
+ 4)
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
@@ -3741,13 +4139,15 @@ enum sparx5_target {
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
-#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
+#define DEV5G_RX_OK_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 8, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
-#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
+#define DEV5G_RX_OK_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 12, 0, 1, \
+ 4)
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -3755,13 +4155,15 @@ enum sparx5_target {
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
-#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
+#define DEV5G_RX_BAD_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 16, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
-#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 20, 0, 1, \
+ 4)
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -3769,13 +4171,15 @@ enum sparx5_target {
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
-#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
+#define DEV5G_TX_OUT_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 24, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
-#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
+#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 28, 0, 1, \
+ 4)
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
@@ -3783,13 +4187,15 @@ enum sparx5_target {
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
-#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
+#define DEV5G_TX_OK_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 32, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
-#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
+#define DEV5G_TX_OK_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 36, 0, 1, \
+ 4)
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -3797,13 +4203,15 @@ enum sparx5_target {
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
-#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 40, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
-#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 44, 0, 1, \
+ 4)
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -3811,13 +4219,15 @@ enum sparx5_target {
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
-#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 48, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
-#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 52, 0, 1, \
+ 4)
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -3825,13 +4235,15 @@ enum sparx5_target {
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
-#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 56, 0, 1, \
+ 4)
-/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
-#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
- t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
+/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 372, 0, 1, 64, 60, 0, 1, \
+ 4)
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -3839,9 +4251,10 @@ enum sparx5_target {
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_GET(x)\
FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
-/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G,\
- t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
+/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEV5G_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 436, 0, 1, 52, 0, 0, 1, \
+ 4)
#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -3897,9 +4310,14 @@ enum sparx5_target {
#define DEV5G_DEV_RST_CTRL_MAC_RX_RST_GET(x)\
FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
-/* DSM:RAM_CTRL:RAM_INIT */
-#define DSM_RAM_INIT __REG(TARGET_DSM,\
- 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
+/* DEV10G:DEV_CFG_STATUS:PTP_STAMPER_CFG */
+#define DEV5G_PTP_STAMPER_CFG(t) \
+ __REG(TARGET_DEV5G, t, regs->tsize[TC_DEV5G], 436, 0, 1, 52, 20, 0, 1, \
+ 4)
+
+/* DSM:RAM_CTRL:RAM_INIT */
+#define DSM_RAM_INIT \
+ __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
#define DSM_RAM_INIT_RAM_INIT BIT(1)
#define DSM_RAM_INIT_RAM_INIT_SET(x)\
@@ -3913,9 +4331,10 @@ enum sparx5_target {
#define DSM_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x)
-/* DSM:CFG:BUF_CFG */
-#define DSM_BUF_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
+/* DSM:CFG:BUF_CFG */
+#define DSM_BUF_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, \
+ regs->rcnt[RC_DSM_BUF_CFG], 4)
#define DSM_BUF_CFG_CSC_STAT_DIS BIT(13)
#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\
@@ -3941,9 +4360,10 @@ enum sparx5_target {
#define DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT_GET(x)\
FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
-/* DSM:CFG:DEV_TX_STOP_WM_CFG */
-#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
+/* DSM:CFG:DEV_TX_STOP_WM_CFG */
+#define DSM_DEV_TX_STOP_WM_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, \
+ regs->rcnt[RC_DSM_DEV_TX_STOP_WM_CFG], 4)
#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA BIT(9)
#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\
@@ -3969,9 +4389,10 @@ enum sparx5_target {
#define DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_GET(x)\
FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
-/* DSM:CFG:RX_PAUSE_CFG */
-#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
+/* DSM:CFG:RX_PAUSE_CFG */
+#define DSM_RX_PAUSE_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, \
+ regs->rcnt[RC_DSM_RX_PAUSE_CFG], 4)
#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN BIT(1)
#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\
@@ -3985,9 +4406,10 @@ enum sparx5_target {
#define DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL_GET(x)\
FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
-/* DSM:CFG:MAC_CFG */
-#define DSM_MAC_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
+/* DSM:CFG:MAC_CFG */
+#define DSM_MAC_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, \
+ regs->rcnt[RC_DSM_MAC_CFG], 4)
#define DSM_MAC_CFG_TX_PAUSE_VAL GENMASK(31, 16)
#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\
@@ -4013,9 +4435,10 @@ enum sparx5_target {
#define DSM_MAC_CFG_TX_PAUSE_XON_XOFF_GET(x)\
FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
-/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
-#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
+/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
+#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, \
+ regs->rcnt[RC_DSM_MAC_ADDR_BASE_HIGH_CFG], 4)
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0)
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\
@@ -4023,9 +4446,10 @@ enum sparx5_target {
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_GET(x)\
FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
-/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
-#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
+/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
+#define DSM_MAC_ADDR_BASE_LOW_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, \
+ regs->rcnt[RC_DSM_MAC_ADDR_BASE_LOW_CFG], 4)
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW GENMASK(23, 0)
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\
@@ -4033,9 +4457,10 @@ enum sparx5_target {
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_GET(x)\
FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
-/* DSM:CFG:TAXI_CAL_CFG */
-#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM,\
- 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
+/* DSM:CFG:TAXI_CAL_CFG */
+#define DSM_TAXI_CAL_CFG(r) \
+ __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, \
+ regs->rcnt[RC_DSM_TAXI_CAL_CFG], 4)
#define DSM_TAXI_CAL_CFG_CAL_IDX GENMASK(20, 15)
#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\
@@ -4067,9 +4492,31 @@ enum sparx5_target {
#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\
FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
-/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */
-#define EACL_VCAP_ES2_KEY_SEL(g, r) __REG(TARGET_EACL,\
- 0, 1, 149504, g, 138, 8, 0, r, 2, 4)
+/* LAN969X ONLY */
+#define DSM_TAXI_CAL_CFG_CAL_SEL_STAT BIT(23)
+#define DSM_TAXI_CAL_CFG_CAL_SEL_STAT_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_SEL_STAT, x)
+#define DSM_TAXI_CAL_CFG_CAL_SEL_STAT_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_SEL_STAT, x)
+
+/* LAN969X ONLY */
+#define DSM_TAXI_CAL_CFG_CAL_SWITCH BIT(22)
+#define DSM_TAXI_CAL_CFG_CAL_SWITCH_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_SWITCH, x)
+#define DSM_TAXI_CAL_CFG_CAL_SWITCH_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_SWITCH, x)
+
+/* LAN969X ONLY */
+#define DSM_TAXI_CAL_CFG_CAL_PGM_SEL BIT(21)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_SEL_SET(x)\
+ FIELD_PREP(DSM_TAXI_CAL_CFG_CAL_PGM_SEL, x)
+#define DSM_TAXI_CAL_CFG_CAL_PGM_SEL_GET(x)\
+ FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_SEL, x)
+
+/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */
+#define EACL_VCAP_ES2_KEY_SEL(g, r) \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_ES2_KEY_SELECT_PROFILE], \
+ g, regs->gcnt[GC_EACL_ES2_KEY_SELECT_PROFILE], 8, 0, r, 2, 4)
#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL GENMASK(7, 5)
#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(x)\
@@ -4095,13 +4542,15 @@ enum sparx5_target {
#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(x)\
FIELD_GET(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
-/* EACL:CNT_TBL:ES2_CNT */
-#define EACL_ES2_CNT(g) __REG(TARGET_EACL,\
- 0, 1, 122880, g, 2048, 4, 0, 0, 1, 4)
+/* EACL:CNT_TBL:ES2_CNT */
+#define EACL_ES2_CNT(g) \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_CNT_TBL], g, \
+ regs->gcnt[GC_EACL_CNT_TBL], 4, 0, 0, 1, 4)
-/* EACL:POL_CFG:POL_EACL_CFG */
-#define EACL_POL_EACL_CFG __REG(TARGET_EACL,\
- 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
+/* EACL:POL_CFG:POL_EACL_CFG */
+#define EACL_POL_EACL_CFG \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_POL_CFG], 0, 1, 780, 768, \
+ 0, 1, 4)
#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5)
#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\
@@ -4139,9 +4588,10 @@ enum sparx5_target {
#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\
FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
-/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */
-#define EACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_EACL,\
- 0, 1, 118696, 0, 1, 8, 0, r, 2, 4)
+/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */
+#define EACL_SEC_LOOKUP_STICKY(r) \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_ES2_STICKY], 0, 1, 8, 0, \
+ r, 2, 4)
#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7)
#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\
@@ -4191,9 +4641,10 @@ enum sparx5_target {
#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
-/* EACL:RAM_CTRL:RAM_INIT */
-#define EACL_RAM_INIT __REG(TARGET_EACL,\
- 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
+/* EACL:RAM_CTRL:RAM_INIT */
+#define EACL_RAM_INIT \
+ __REG(TARGET_EACL, 0, 1, regs->gaddr[GA_EACL_RAM_CTRL], 0, 1, 4, 0, 0, \
+ 1, 4)
#define EACL_RAM_INIT_RAM_INIT BIT(1)
#define EACL_RAM_INIT_RAM_INIT_SET(x)\
@@ -4207,9 +4658,10 @@ enum sparx5_target {
#define EACL_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x)
-/* FDMA:FDMA:FDMA_CH_ACTIVATE */
-#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+/* FDMA:FDMA:FDMA_CH_ACTIVATE */
+#define FDMA_CH_ACTIVATE \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 0, 0, 1, \
+ 4)
#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
@@ -4217,9 +4669,10 @@ enum sparx5_target {
#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\
FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
-/* FDMA:FDMA:FDMA_CH_RELOAD */
-#define FDMA_CH_RELOAD __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+/* FDMA:FDMA:FDMA_CH_RELOAD */
+#define FDMA_CH_RELOAD \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 4, 0, 1, \
+ 4)
#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
@@ -4227,9 +4680,10 @@ enum sparx5_target {
#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\
FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
-/* FDMA:FDMA:FDMA_CH_DISABLE */
-#define FDMA_CH_DISABLE __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+/* FDMA:FDMA:FDMA_CH_DISABLE */
+#define FDMA_CH_DISABLE \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 8, 0, 1, \
+ 4)
#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
@@ -4237,49 +4691,58 @@ enum sparx5_target {
#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\
FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
-/* FDMA:FDMA:FDMA_DCB_LLP */
-#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
-
-/* FDMA:FDMA:FDMA_DCB_LLP1 */
-#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
-
-/* FDMA:FDMA:FDMA_DCB_LLP_PREV */
-#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
-
-/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
-#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
-
-/* FDMA:FDMA:FDMA_CH_CFG */
-#define FDMA_CH_CFG(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
-
-#define FDMA_CH_CFG_CH_XTR_STATUS_MODE BIT(7)
+/* FDMA:FDMA:FDMA_DCB_LLP */
+#define FDMA_DCB_LLP(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 52, r, 8, \
+ 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP1 */
+#define FDMA_DCB_LLP1(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 84, r, 8, \
+ 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP_PREV */
+#define FDMA_DCB_LLP_PREV(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 116, r, 8,\
+ 4)
+
+/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
+#define FDMA_DCB_LLP_PREV1(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 148, r, 8,\
+ 4)
+
+/* FDMA:FDMA:FDMA_CH_CFG */
+#define FDMA_CH_CFG(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 224, r, 8,\
+ 4)
+
+#define FDMA_CH_CFG_CH_XTR_STATUS_MODE\
+ BIT(regs->fpos[FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE])
#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\
- FIELD_PREP(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+ spx5_field_prep(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_GET(x)\
- FIELD_GET(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
+ spx5_field_get(FDMA_CH_CFG_CH_XTR_STATUS_MODE, x)
-#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(6)
+#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY\
+ BIT(regs->fpos[FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY])
#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\
- FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+ spx5_field_prep(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\
- FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
+ spx5_field_get(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x)
-#define FDMA_CH_CFG_CH_INJ_PORT BIT(5)
+#define FDMA_CH_CFG_CH_INJ_PORT\
+ BIT(regs->fpos[FP_FDMA_CH_CFG_CH_INJ_PORT])
#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\
- FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x)
+ spx5_field_prep(FDMA_CH_CFG_CH_INJ_PORT, x)
#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\
- FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x)
+ spx5_field_get(FDMA_CH_CFG_CH_INJ_PORT, x)
-#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(4, 1)
+#define FDMA_CH_CFG_CH_DCB_DB_CNT\
+ GENMASK(regs->fsize[FW_FDMA_CH_CFG_CH_DCB_DB_CNT] + 1 - 1, 1)
#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\
- FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+ spx5_field_prep(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\
- FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
+ spx5_field_get(FDMA_CH_CFG_CH_DCB_DB_CNT, x)
#define FDMA_CH_CFG_CH_MEM BIT(0)
#define FDMA_CH_CFG_CH_MEM_SET(x)\
@@ -4287,9 +4750,10 @@ enum sparx5_target {
#define FDMA_CH_CFG_CH_MEM_GET(x)\
FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
-/* FDMA:FDMA:FDMA_CH_TRANSLATE */
-#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
+/* FDMA:FDMA:FDMA_CH_TRANSLATE */
+#define FDMA_CH_TRANSLATE(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 256, r, 8,\
+ 4)
#define FDMA_CH_TRANSLATE_OFFSET GENMASK(15, 0)
#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\
@@ -4297,9 +4761,10 @@ enum sparx5_target {
#define FDMA_CH_TRANSLATE_OFFSET_GET(x)\
FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x)
-/* FDMA:FDMA:FDMA_XTR_CFG */
-#define FDMA_XTR_CFG __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
+/* FDMA:FDMA:FDMA_XTR_CFG */
+#define FDMA_XTR_CFG \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 364, 0, 1,\
+ 4)
#define FDMA_XTR_CFG_XTR_FIFO_WM GENMASK(15, 11)
#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\
@@ -4313,9 +4778,10 @@ enum sparx5_target {
#define FDMA_XTR_CFG_XTR_ARB_SAT_GET(x)\
FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x)
-/* FDMA:FDMA:FDMA_PORT_CTRL */
-#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+/* FDMA:FDMA:FDMA_PORT_CTRL */
+#define FDMA_PORT_CTRL(r) \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 376, r, 2,\
+ 4)
#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
@@ -4347,9 +4813,10 @@ enum sparx5_target {
#define FDMA_PORT_CTRL_XTR_BUF_RST_GET(x)\
FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x)
-/* FDMA:FDMA:FDMA_INTR_DCB */
-#define FDMA_INTR_DCB __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_DCB */
+#define FDMA_INTR_DCB \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 384, 0, 1,\
+ 4)
#define FDMA_INTR_DCB_INTR_DCB GENMASK(7, 0)
#define FDMA_INTR_DCB_INTR_DCB_SET(x)\
@@ -4357,9 +4824,10 @@ enum sparx5_target {
#define FDMA_INTR_DCB_INTR_DCB_GET(x)\
FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x)
-/* FDMA:FDMA:FDMA_INTR_DCB_ENA */
-#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_DCB_ENA */
+#define FDMA_INTR_DCB_ENA \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 388, 0, 1,\
+ 4)
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA GENMASK(7, 0)
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\
@@ -4367,9 +4835,10 @@ enum sparx5_target {
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_GET(x)\
FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
-/* FDMA:FDMA:FDMA_INTR_DB */
-#define FDMA_INTR_DB __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_DB */
+#define FDMA_INTR_DB \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 392, 0, 1,\
+ 4)
#define FDMA_INTR_DB_INTR_DB GENMASK(7, 0)
#define FDMA_INTR_DB_INTR_DB_SET(x)\
@@ -4377,9 +4846,10 @@ enum sparx5_target {
#define FDMA_INTR_DB_INTR_DB_GET(x)\
FIELD_GET(FDMA_INTR_DB_INTR_DB, x)
-/* FDMA:FDMA:FDMA_INTR_DB_ENA */
-#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_DB_ENA */
+#define FDMA_INTR_DB_ENA \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 396, 0, 1,\
+ 4)
#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
@@ -4387,9 +4857,10 @@ enum sparx5_target {
#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\
FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
-/* FDMA:FDMA:FDMA_INTR_ERR */
-#define FDMA_INTR_ERR __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+/* FDMA:FDMA:FDMA_INTR_ERR */
+#define FDMA_INTR_ERR \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 400, 0, 1,\
+ 4)
#define FDMA_INTR_ERR_INTR_PORT_ERR GENMASK(9, 8)
#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\
@@ -4403,9 +4874,10 @@ enum sparx5_target {
#define FDMA_INTR_ERR_INTR_CH_ERR_GET(x)\
FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x)
-/* FDMA:FDMA:FDMA_ERRORS */
-#define FDMA_ERRORS __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+/* FDMA:FDMA:FDMA_ERRORS */
+#define FDMA_ERRORS \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 412, 0, 1,\
+ 4)
#define FDMA_ERRORS_ERR_XTR_WR GENMASK(31, 30)
#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\
@@ -4455,9 +4927,10 @@ enum sparx5_target {
#define FDMA_ERRORS_ERR_CH_WR_GET(x)\
FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x)
-/* FDMA:FDMA:FDMA_ERRORS_2 */
-#define FDMA_ERRORS_2 __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
+/* FDMA:FDMA:FDMA_ERRORS_2 */
+#define FDMA_ERRORS_2 \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 416, 0, 1,\
+ 4)
#define FDMA_ERRORS_2_ERR_XTR_FRAG GENMASK(1, 0)
#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\
@@ -4465,9 +4938,10 @@ enum sparx5_target {
#define FDMA_ERRORS_2_ERR_XTR_FRAG_GET(x)\
FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
-/* FDMA:FDMA:FDMA_CTRL */
-#define FDMA_CTRL __REG(TARGET_FDMA,\
- 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
+/* FDMA:FDMA:FDMA_CTRL */
+#define FDMA_CTRL \
+ __REG(TARGET_FDMA, 0, 1, 8, 0, 1, regs->gsize[GW_FDMA_FDMA], 424, 0, 1,\
+ 4)
#define FDMA_CTRL_NRESET BIT(0)
#define FDMA_CTRL_NRESET_SET(x)\
@@ -4475,9 +4949,10 @@ enum sparx5_target {
#define FDMA_CTRL_NRESET_GET(x)\
FIELD_GET(FDMA_CTRL_NRESET, x)
-/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */
-#define GCB_CHIP_ID __REG(TARGET_GCB,\
- 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
+/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */
+#define GCB_CHIP_ID \
+ __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], 0, 0, \
+ 1, 4)
#define GCB_CHIP_ID_REV_ID GENMASK(31, 28)
#define GCB_CHIP_ID_REV_ID_SET(x)\
@@ -4503,10 +4978,12 @@ enum sparx5_target {
#define GCB_CHIP_ID_ONE_GET(x)\
FIELD_GET(GCB_CHIP_ID_ONE, x)
-/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */
-#define GCB_SOFT_RST __REG(TARGET_GCB,\
- 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
+/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */
+#define GCB_SOFT_RST \
+ __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], \
+ regs->raddr[RA_GCB_SOFT_RST], 0, 1, 4)
+/* SPARX5 ONLY */
#define GCB_SOFT_RST_SOFT_NON_CFG_RST BIT(2)
#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\
FIELD_PREP(GCB_SOFT_RST_SOFT_NON_CFG_RST, x)
@@ -4525,9 +5002,11 @@ enum sparx5_target {
#define GCB_SOFT_RST_SOFT_CHIP_RST_GET(x)\
FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x)
-/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
-#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB,\
- 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
+/* SPARX5 ONLY */
+/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
+#define GCB_HW_SGPIO_SD_CFG \
+ __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], 20, 0, \
+ 1, 4)
#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA BIT(1)
#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\
@@ -4541,19 +5020,23 @@ enum sparx5_target {
#define GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL_GET(x)\
FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
-/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
-#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB,\
- 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
+/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) \
+ __REG(TARGET_GCB, 0, 1, 0, 0, 1, regs->gsize[GW_GCB_CHIP_REGS], \
+ regs->raddr[RA_GCB_HW_SGPIO_TO_SD_MAP_CFG], r, \
+ regs->rcnt[RC_GCB_HW_SGPIO_TO_SD_MAP_CFG], 4)
-#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL\
+ GENMASK(regs->fsize[FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL] + 0 - 1, 0)
#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\
- FIELD_PREP(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+ spx5_field_prep(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_GET(x)\
- FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
+ spx5_field_get(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
-/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
-#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB,\
- 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
+/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
+#define GCB_SIO_CLOCK(g) \
+ __REG(TARGET_GCB, 0, 1, regs->gaddr[GA_GCB_SIO_CTRL], g, \
+ regs->gcnt[GC_GCB_SIO_CTRL], 280, 20, 0, 1, 4)
#define GCB_SIO_CLOCK_SIO_CLK_FREQ GENMASK(19, 8)
#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\
@@ -4567,9 +5050,10 @@ enum sparx5_target {
#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
-/* HSCH:HSCH_CFG:CIR_CFG */
-#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
+/* HSCH:HSCH_CFG:CIR_CFG */
+#define HSCH_CIR_CFG(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 0, 0, \
+ 1, 4)
#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
@@ -4583,9 +5067,10 @@ enum sparx5_target {
#define HSCH_CIR_CFG_CIR_BURST_GET(x)\
FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
-/* HSCH:HSCH_CFG:EIR_CFG */
-#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
+/* HSCH:HSCH_CFG:EIR_CFG */
+#define HSCH_EIR_CFG(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 4, 0, \
+ 1, 4)
#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
@@ -4599,15 +5084,17 @@ enum sparx5_target {
#define HSCH_EIR_CFG_EIR_BURST_GET(x)\
FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
-/* HSCH:HSCH_CFG:SE_CFG */
-#define HSCH_SE_CFG(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
+/* HSCH:HSCH_CFG:SE_CFG */
+#define HSCH_SE_CFG(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 8, 0, \
+ 1, 4)
-#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
+#define HSCH_SE_CFG_SE_DWRR_CNT\
+ GENMASK(regs->fsize[FW_HSCH_SE_CFG_SE_DWRR_CNT] + 6 - 1, 6)
#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
- FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x)
+ spx5_field_prep(HSCH_SE_CFG_SE_DWRR_CNT, x)
#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\
- FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x)
+ spx5_field_get(HSCH_SE_CFG_SE_DWRR_CNT, x)
#define HSCH_SE_CFG_SE_AVB_ENA BIT(5)
#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\
@@ -4633,19 +5120,22 @@ enum sparx5_target {
#define HSCH_SE_CFG_SE_STOP_GET(x)\
FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
-/* HSCH:HSCH_CFG:SE_CONNECT */
-#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
+/* HSCH:HSCH_CFG:SE_CONNECT */
+#define HSCH_SE_CONNECT(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 12, 0,\
+ 1, 4)
-#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK\
+ GENMASK(regs->fsize[FW_HSCH_SE_CONNECT_SE_LEAK_LINK] + 0 - 1, 0)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
- FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+ spx5_field_prep(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\
- FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+ spx5_field_get(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
-/* HSCH:HSCH_CFG:SE_DLB_SENSE */
-#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH,\
- 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
+/* HSCH:HSCH_CFG:SE_DLB_SENSE */
+#define HSCH_SE_DLB_SENSE(g) \
+ __REG(TARGET_HSCH, 0, 1, 0, g, regs->gcnt[GC_HSCH_HSCH_CFG], 32, 16, 0,\
+ 1, 4)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
@@ -4653,11 +5143,12 @@ enum sparx5_target {
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
-#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT\
+ GENMASK(regs->fsize[FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT] + 3 - 1, 3)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\
- FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+ spx5_field_prep(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\
- FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+ spx5_field_get(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2)
#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\
@@ -4677,9 +5168,10 @@ enum sparx5_target {
#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
-/* HSCH:HSCH_DWRR:DWRR_ENTRY */
-#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH,\
- 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+/* HSCH:HSCH_DWRR:DWRR_ENTRY */
+#define HSCH_DWRR_ENTRY(g) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_DWRR], g, \
+ regs->gcnt[GC_HSCH_HSCH_DWRR], 4, 0, 0, 1, 4)
#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
@@ -4693,15 +5185,17 @@ enum sparx5_target {
#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\
FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
-/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
-#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH,\
- 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
+/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
+#define HSCH_HSCH_CFG_CFG \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_MISC], 0, 1, 648, \
+ 284, 0, 1, 4)
-#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX\
+ GENMASK(regs->fsize[FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX] + 14 - 1, 14)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
- FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+ spx5_field_prep(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\
- FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+ spx5_field_get(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12)
#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\
@@ -4715,9 +5209,11 @@ enum sparx5_target {
#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\
FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
-/* HSCH:HSCH_MISC:SYS_CLK_PER */
-#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH,\
- 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
+/* SPARX5 ONLY */
+/* HSCH:HSCH_MISC:SYS_CLK_PER */
+#define HSCH_SYS_CLK_PER \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_MISC], 0, 1, 648, \
+ 640, 0, 1, 4)
#define HSCH_SYS_CLK_PER_100PS GENMASK(7, 0)
#define HSCH_SYS_CLK_PER_100PS_SET(x)\
@@ -4725,9 +5221,10 @@ enum sparx5_target {
#define HSCH_SYS_CLK_PER_100PS_GET(x)\
FIELD_GET(HSCH_SYS_CLK_PER_100PS, x)
-/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
-#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH,\
- 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
+/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
+#define HSCH_HSCH_TIMER_CFG(g, r) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_LEAK_LISTS], g, 4, \
+ 32, 0, r, 4, 4)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
@@ -4735,15 +5232,17 @@ enum sparx5_target {
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\
FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
-/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
-#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH,\
- 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
+/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
+#define HSCH_HSCH_LEAK_CFG(g, r) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_HSCH_LEAK_LISTS], g, 4, \
+ 32, 16, r, 4, 4)
-#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST\
+ GENMASK(regs->fsize[FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST] + 1 - 1, 1)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
- FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+ spx5_field_prep(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\
- FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+ spx5_field_get(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0)
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\
@@ -4751,9 +5250,10 @@ enum sparx5_target {
#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\
FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
-/* HSCH:SYSTEM:FLUSH_CTRL */
-#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH,\
- 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
+/* HSCH:SYSTEM:FLUSH_CTRL */
+#define HSCH_FLUSH_CTRL \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_SYSTEM], 0, 1, 312, 4, 0, \
+ 1, 4)
#define HSCH_FLUSH_CTRL_FLUSH_ENA BIT(27)
#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\
@@ -4773,11 +5273,12 @@ enum sparx5_target {
#define HSCH_FLUSH_CTRL_FLUSH_DST_GET(x)\
FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_DST, x)
-#define HSCH_FLUSH_CTRL_FLUSH_PORT GENMASK(24, 18)
+#define HSCH_FLUSH_CTRL_FLUSH_PORT\
+ GENMASK(regs->fsize[FW_HSCH_FLUSH_CTRL_FLUSH_PORT] + 18 - 1, 18)
#define HSCH_FLUSH_CTRL_FLUSH_PORT_SET(x)\
- FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+ spx5_field_prep(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
#define HSCH_FLUSH_CTRL_FLUSH_PORT_GET(x)\
- FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
+ spx5_field_get(HSCH_FLUSH_CTRL_FLUSH_PORT, x)
#define HSCH_FLUSH_CTRL_FLUSH_QUEUE BIT(17)
#define HSCH_FLUSH_CTRL_FLUSH_QUEUE_SET(x)\
@@ -4791,15 +5292,17 @@ enum sparx5_target {
#define HSCH_FLUSH_CTRL_FLUSH_SE_GET(x)\
FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_SE, x)
-#define HSCH_FLUSH_CTRL_FLUSH_HIER GENMASK(15, 0)
+#define HSCH_FLUSH_CTRL_FLUSH_HIER\
+ GENMASK(regs->fsize[FW_HSCH_FLUSH_CTRL_FLUSH_HIER] + 0 - 1, 0)
#define HSCH_FLUSH_CTRL_FLUSH_HIER_SET(x)\
- FIELD_PREP(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+ spx5_field_prep(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
#define HSCH_FLUSH_CTRL_FLUSH_HIER_GET(x)\
- FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
+ spx5_field_get(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
-/* HSCH:SYSTEM:PORT_MODE */
-#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH,\
- 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
+/* HSCH:SYSTEM:PORT_MODE */
+#define HSCH_PORT_MODE(r) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_SYSTEM], 0, 1, 312, 8, r, \
+ regs->rcnt[RC_HSCH_PORT_MODE], 4)
#define HSCH_PORT_MODE_DEQUEUE_DIS BIT(4)
#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\
@@ -4831,9 +5334,10 @@ enum sparx5_target {
#define HSCH_PORT_MODE_CPU_PRIO_MODE_GET(x)\
FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
-/* HSCH:SYSTEM:OUTB_SHARE_ENA */
-#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH,\
- 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
+/* HSCH:SYSTEM:OUTB_SHARE_ENA */
+#define HSCH_OUTB_SHARE_ENA(r) \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_SYSTEM], 0, 1, 312, 288, \
+ r, 5, 4)
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA GENMASK(7, 0)
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\
@@ -4841,9 +5345,10 @@ enum sparx5_target {
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_GET(x)\
FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
-/* HSCH:MMGT:RESET_CFG */
-#define HSCH_RESET_CFG __REG(TARGET_HSCH,\
- 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
+/* HSCH:MMGT:RESET_CFG */
+#define HSCH_RESET_CFG \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_MMGT], 0, 1, 16, 8, 0, 1, \
+ 4)
#define HSCH_RESET_CFG_CORE_ENA BIT(0)
#define HSCH_RESET_CFG_CORE_ENA_SET(x)\
@@ -4851,9 +5356,10 @@ enum sparx5_target {
#define HSCH_RESET_CFG_CORE_ENA_GET(x)\
FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x)
-/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
-#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH,\
- 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
+/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define HSCH_TAS_STATEMACHINE_CFG \
+ __REG(TARGET_HSCH, 0, 1, regs->gaddr[GA_HSCH_TAS_CONFIG], 0, 1, \
+ regs->gsize[GW_HSCH_TAS_CONFIG], 8, 0, 1, 4)
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY GENMASK(7, 0)
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\
@@ -4861,9 +5367,9 @@ enum sparx5_target {
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
-/* LRN:COMMON:COMMON_ACCESS_CTRL */
-#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
+/* LRN:COMMON:COMMON_ACCESS_CTRL */
+#define LRN_COMMON_ACCESS_CTRL \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\
@@ -4877,11 +5383,12 @@ enum sparx5_target {
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE_GET(x)\
FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_TYPE, x)
-#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW GENMASK(18, 5)
+#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW\
+ GENMASK(regs->fsize[FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW] + 5 - 1, 5)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_SET(x)\
- FIELD_PREP(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+ spx5_field_prep(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW_GET(x)\
- FIELD_GET(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
+ spx5_field_get(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW, x)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD GENMASK(4, 1)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(x)\
@@ -4895,9 +5402,9 @@ enum sparx5_target {
#define LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(x)\
FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
-/* LRN:COMMON:MAC_ACCESS_CFG_0 */
-#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
+/* LRN:COMMON:MAC_ACCESS_CFG_0 */
+#define LRN_MAC_ACCESS_CFG_0 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID GENMASK(28, 16)
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\
@@ -4911,13 +5418,13 @@ enum sparx5_target {
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB_GET(x)\
FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
-/* LRN:COMMON:MAC_ACCESS_CFG_1 */
-#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
+/* LRN:COMMON:MAC_ACCESS_CFG_1 */
+#define LRN_MAC_ACCESS_CFG_1 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
-/* LRN:COMMON:MAC_ACCESS_CFG_2 */
-#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
+/* LRN:COMMON:MAC_ACCESS_CFG_2 */
+#define LRN_MAC_ACCESS_CFG_2 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28)
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\
@@ -4991,19 +5498,20 @@ enum sparx5_target {
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(x)\
FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
-/* LRN:COMMON:MAC_ACCESS_CFG_3 */
-#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
+/* LRN:COMMON:MAC_ACCESS_CFG_3 */
+#define LRN_MAC_ACCESS_CFG_3 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
-#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0)
+#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX\
+ GENMASK(regs->fsize[FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX] + 0 - 1, 0)
#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\
- FIELD_PREP(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+ spx5_field_prep(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_GET(x)\
- FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
+ spx5_field_get(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
-/* LRN:COMMON:SCAN_NEXT_CFG */
-#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
+/* LRN:COMMON:SCAN_NEXT_CFG */
+#define LRN_SCAN_NEXT_CFG \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19)
#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\
@@ -5095,9 +5603,9 @@ enum sparx5_target {
#define LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA_GET(x)\
FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
-/* LRN:COMMON:SCAN_NEXT_CFG_1 */
-#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
+/* LRN:COMMON:SCAN_NEXT_CFG_1 */
+#define LRN_SCAN_NEXT_CFG_1 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR GENMASK(30, 16)
#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\
@@ -5111,9 +5619,9 @@ enum sparx5_target {
#define LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK_GET(x)\
FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
-/* LRN:COMMON:AUTOAGE_CFG */
-#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
+/* LRN:COMMON:AUTOAGE_CFG */
+#define LRN_AUTOAGE_CFG(r) \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
#define LRN_AUTOAGE_CFG_UNIT_SIZE GENMASK(29, 28)
#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\
@@ -5127,9 +5635,9 @@ enum sparx5_target {
#define LRN_AUTOAGE_CFG_PERIOD_VAL_GET(x)\
FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
-/* LRN:COMMON:AUTOAGE_CFG_1 */
-#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
+/* LRN:COMMON:AUTOAGE_CFG_1 */
+#define LRN_AUTOAGE_CFG_1 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA BIT(25)
#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\
@@ -5173,15 +5681,16 @@ enum sparx5_target {
#define LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA_GET(x)\
FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
-/* LRN:COMMON:AUTOAGE_CFG_2 */
-#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN,\
- 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
+/* LRN:COMMON:AUTOAGE_CFG_2 */
+#define LRN_AUTOAGE_CFG_2 \
+ __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
-#define LRN_AUTOAGE_CFG_2_NEXT_ROW GENMASK(17, 4)
+#define LRN_AUTOAGE_CFG_2_NEXT_ROW\
+ GENMASK(regs->fsize[FW_LRN_AUTOAGE_CFG_2_NEXT_ROW] + 4 - 1, 4)
#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\
- FIELD_PREP(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+ spx5_field_prep(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
#define LRN_AUTOAGE_CFG_2_NEXT_ROW_GET(x)\
- FIELD_GET(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
+ spx5_field_get(LRN_AUTOAGE_CFG_2_NEXT_ROW, x)
#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS GENMASK(3, 0)
#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_SET(x)\
@@ -5189,9 +5698,10 @@ enum sparx5_target {
#define LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS_GET(x)\
FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
-#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
+#define PCEP_RCTRL_2_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
#define PCEP_RCTRL_2_OUT_0_MSG_CODE GENMASK(7, 0)
#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\
@@ -5253,9 +5763,10 @@ enum sparx5_target {
#define PCEP_RCTRL_2_OUT_0_REGION_EN_GET(x)\
FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW GENMASK(15, 0)
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\
@@ -5269,13 +5780,15 @@ enum sparx5_target {
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW_GET(x)\
FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LIM_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW GENMASK(15, 0)
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\
@@ -5289,17 +5802,20 @@ enum sparx5_target {
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW_GET(x)\
FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_LWR_TGT_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_TGT_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
-/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP,\
- 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
+#define PCEP_ADDR_UPR_LIM_OUT_0 \
+ __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0)
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\
@@ -5313,9 +5829,10 @@ enum sparx5_target {
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW_GET(x)\
FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR,\
- t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS10G_BR_PCS_CFG(t) \
+ __REG(TARGET_PCS10G_BR, t, regs->tsize[TC_PCS10G_BR], 0, 0, 1, 56, 0, \
+ 0, 1, 4)
#define PCS10G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -5389,9 +5906,10 @@ enum sparx5_target {
#define PCS10G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR,\
- t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS10G_BR_PCS_SD_CFG(t) \
+ __REG(TARGET_PCS10G_BR, t, regs->tsize[TC_PCS10G_BR], 0, 0, 1, 56, 4, \
+ 0, 1, 4)
#define PCS10G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -5411,9 +5929,10 @@ enum sparx5_target {
#define PCS10G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR,\
- t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS25G_BR_PCS_CFG(t) \
+ __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
#define PCS25G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -5487,9 +6006,10 @@ enum sparx5_target {
#define PCS25G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR,\
- t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS25G_BR_PCS_SD_CFG(t) \
+ __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
#define PCS25G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -5509,9 +6029,10 @@ enum sparx5_target {
#define PCS25G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR,\
- t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
+#define PCS5G_BR_PCS_CFG(t) \
+ __REG(TARGET_PCS5G_BR, t, regs->tsize[TC_PCS5G_BR], 0, 0, 1, 56, 0, 0, \
+ 1, 4)
#define PCS5G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -5585,9 +6106,10 @@ enum sparx5_target {
#define PCS5G_BR_PCS_CFG_TX_SCR_DISABLE_GET(x)\
FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
-/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR,\
- t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
+/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
+#define PCS5G_BR_PCS_SD_CFG(t) \
+ __REG(TARGET_PCS5G_BR, t, regs->tsize[TC_PCS5G_BR], 0, 0, 1, 56, 4, 0, \
+ 1, 4)
#define PCS5G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -5607,58 +6129,67 @@ enum sparx5_target {
#define PCS5G_BR_PCS_SD_CFG_SD_ENA_GET(x)\
FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
-/* PORT_CONF:HW_CFG:DEV5G_MODES */
-#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF,\
- 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+/* PORT_CONF:HW_CFG:DEV5G_MODES */
+#define PORT_CONF_DEV5G_MODES \
+ __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE BIT(0)
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE BIT(1)
#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D1_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE BIT(2)
#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D2_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE BIT(3)
#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D3_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE BIT(4)
#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D4_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE BIT(5)
#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D5_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE BIT(6)
#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D6_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE BIT(7)
#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D7_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE BIT(8)
#define PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D8_MODE, x)
@@ -5671,27 +6202,30 @@ enum sparx5_target {
#define PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D9_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE BIT(10)
#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D10_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE BIT(11)
#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D11_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE BIT(12)
#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
#define PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
-/* PORT_CONF:HW_CFG:DEV10G_MODES */
-#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF,\
- 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
+/* PORT_CONF:HW_CFG:DEV10G_MODES */
+#define PORT_CONF_DEV10G_MODES \
+ __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE BIT(0)
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\
@@ -5699,75 +6233,87 @@ enum sparx5_target {
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE BIT(1)
#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D13_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE BIT(2)
#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D14_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE BIT(3)
#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D15_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE BIT(4)
#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D48_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE BIT(5)
#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D49_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE BIT(6)
#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D50_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE BIT(7)
#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D51_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE BIT(8)
#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D52_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE BIT(9)
#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D53_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE BIT(10)
#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D54_MODE, x)
+/* SPARX5 ONLY */
#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE BIT(11)
#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_SET(x)\
FIELD_PREP(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
#define PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
-/* PORT_CONF:HW_CFG:DEV25G_MODES */
-#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF,\
- 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PORT_CONF:HW_CFG:DEV25G_MODES */
+#define PORT_CONF_DEV25G_MODES \
+ __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE BIT(0)
#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\
@@ -5817,9 +6363,9 @@ enum sparx5_target {
#define PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE_GET(x)\
FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
-/* PORT_CONF:HW_CFG:QSGMII_ENA */
-#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF,\
- 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
+/* PORT_CONF:HW_CFG:QSGMII_ENA */
+#define PORT_CONF_QSGMII_ENA \
+ __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0 BIT(0)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\
@@ -5857,45 +6403,52 @@ enum sparx5_target {
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_5_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_5, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6 BIT(6)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_6_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_6, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7 BIT(7)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_7_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_7, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8 BIT(8)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_8_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_8, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9 BIT(9)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_9_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_9, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10 BIT(10)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_10_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_10, x)
+/* SPARX5 ONLY */
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11 BIT(11)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_SET(x)\
FIELD_PREP(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_11_GET(x)\
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
-/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
-#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF,\
- 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
+#define PORT_CONF_USGMII_CFG(g) \
+ __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM BIT(9)
#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\
@@ -5939,39 +6492,46 @@ enum sparx5_target {
#define PORT_CONF_USGMII_CFG_QUAD_MODE_GET(x)\
FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
-/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
-#define PTP_PTP_PIN_INTR __REG(TARGET_PTP,\
- 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
+#define PTP_PTP_PIN_INTR \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 0, 0, 1,\
+ 4)
-#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_INTR_PTP\
+ GENMASK(regs->fsize[FW_PTP_PTP_PIN_INTR_INTR_PTP] + 0 - 1, 0)
#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_INTR_INTR_PTP, x)
+ spx5_field_prep(PTP_PTP_PIN_INTR_INTR_PTP, x)
#define PTP_PTP_PIN_INTR_INTR_PTP_GET(x)\
- FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x)
+ spx5_field_get(PTP_PTP_PIN_INTR_INTR_PTP, x)
-/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
-#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP,\
- 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
+/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
+#define PTP_PTP_PIN_INTR_ENA \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 4, 0, 1,\
+ 4)
-#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0)
+#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA\
+ GENMASK(regs->fsize[FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA] + 0 - 1, 0)
#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+ spx5_field_prep(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_GET(x)\
- FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
+ spx5_field_get(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
-/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
-#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP,\
- 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
+/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
+#define PTP_PTP_INTR_IDENT \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 8, 0, 1,\
+ 4)
-#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0)
+#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT\
+ GENMASK(regs->fsize[FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT] + 0 - 1, 0)
#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\
- FIELD_PREP(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+ spx5_field_prep(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_GET(x)\
- FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
+ spx5_field_get(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
-/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
-#define PTP_PTP_DOM_CFG __REG(TARGET_PTP,\
- 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
+/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
+#define PTP_PTP_DOM_CFG \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_CFG], 0, 1, 16, 12, 0, \
+ 1, 4)
#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9)
#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\
@@ -5997,13 +6557,15 @@ enum sparx5_target {
#define PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS_GET(x)\
FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
-#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 0, r, 2, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
+#define PTP_CLK_PER_CFG(g, r) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 0, r, 2, 4)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
-#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
+#define PTP_PTP_CUR_NSEC(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 8, 0, 1, 4)
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0)
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\
@@ -6011,9 +6573,10 @@ enum sparx5_target {
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_GET(x)\
FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
-#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
+#define PTP_PTP_CUR_NSEC_FRAC(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 12, 0, 1, 4)
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0)
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\
@@ -6021,13 +6584,15 @@ enum sparx5_target {
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_GET(x)\
FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
-#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
+#define PTP_PTP_CUR_SEC_LSB(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 16, 0, 1, 4)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
-#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
+#define PTP_PTP_CUR_SEC_MSB(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 20, 0, 1, 4)
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0)
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\
@@ -6035,37 +6600,43 @@ enum sparx5_target {
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_GET(x)\
FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
-/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
-#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP,\
- 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
+/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
+#define PTP_NTP_CUR_NSEC(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PTP_TOD_DOMAINS], g, 3, 28, \
+ 24, 0, 1, 4)
-/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
-#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
+#define PTP_PTP_PIN_CFG(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 0, 0, 1,\
+ 4)
-#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26)
+#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION\
+ GENMASK(regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION] + 2, regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION])
#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+ spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_GET(x)\
- FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
+ spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_ACTION, x)
-#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC GENMASK(25, 24)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC\
+ GENMASK(regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC] + 1, regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC])
#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+ spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
#define PTP_PTP_PIN_CFG_PTP_PIN_SYNC_GET(x)\
- FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
+ spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_SYNC, x)
-#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL BIT(23)
+#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL\
+ BIT(regs->fpos[FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL])
#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+ spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
#define PTP_PTP_PIN_CFG_PTP_PIN_INV_POL_GET(x)\
- FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
+ spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_INV_POL, x)
-#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT GENMASK(22, 21)
+#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT\
+ GENMASK(regs->fsize[FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT] + 21 - 1, 21)
#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_SET(x)\
- FIELD_PREP(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+ spx5_field_prep(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
#define PTP_PTP_PIN_CFG_PTP_PIN_SELECT_GET(x)\
- FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
+ spx5_field_get(PTP_PTP_PIN_CFG_PTP_PIN_SELECT, x)
#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT GENMASK(20, 18)
#define PTP_PTP_PIN_CFG_PTP_CLK_SELECT_SET(x)\
@@ -6097,9 +6668,10 @@ enum sparx5_target {
#define PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS_GET(x)\
FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
-/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
-#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
+#define PTP_PTP_TOD_SEC_MSB(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 4, 0, 1,\
+ 4)
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0)
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\
@@ -6107,13 +6679,15 @@ enum sparx5_target {
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_GET(x)\
FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
-/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
-#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
+#define PTP_PTP_TOD_SEC_LSB(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 8, 0, 1,\
+ 4)
-/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
-#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
+#define PTP_PTP_TOD_NSEC(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 12, 0, \
+ 1, 4)
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0)
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\
@@ -6121,9 +6695,10 @@ enum sparx5_target {
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_GET(x)\
FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
-/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
-#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
+#define PTP_PTP_TOD_NSEC_FRAC(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 16, 0, \
+ 1, 4)
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0)
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\
@@ -6131,13 +6706,15 @@ enum sparx5_target {
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_GET(x)\
FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
-/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
-#define PTP_NTP_NSEC(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
+#define PTP_NTP_NSEC(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 20, 0, \
+ 1, 4)
-/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
-#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
+#define PTP_PIN_WF_HIGH_PERIOD(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 24, 0, \
+ 1, 4)
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0)
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\
@@ -6145,9 +6722,10 @@ enum sparx5_target {
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_GET(x)\
FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
-/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
-#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
+#define PTP_PIN_WF_LOW_PERIOD(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 28, 0, \
+ 1, 4)
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0)
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\
@@ -6155,9 +6733,10 @@ enum sparx5_target {
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_GET(x)\
FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
-/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
-#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP,\
- 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
+/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
+#define PTP_PIN_IOBOUNCH_DELAY(g) \
+ __REG(TARGET_PTP, 0, 1, 0, g, regs->gcnt[GC_PTP_PTP_PINS], 64, 32, 0, \
+ 1, 4)
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3)
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\
@@ -6171,22 +6750,27 @@ enum sparx5_target {
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG_GET(x)\
FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
-/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
-#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP,\
- 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
+#define PTP_PHAD_CTRL(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PHASE_DETECTOR_CTRL], g, \
+ regs->gcnt[GC_PTP_PHASE_DETECTOR_CTRL], \
+ regs->gsize[GW_PTP_PHASE_DETECTOR_CTRL], 0, 0, 1, 4)
-#define PTP_PHAD_CTRL_PHAD_ENA BIT(7)
+#define PTP_PHAD_CTRL_PHAD_ENA\
+ BIT(regs->fpos[FP_PTP_PHAD_CTRL_PHAD_ENA])
#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\
- FIELD_PREP(PTP_PHAD_CTRL_PHAD_ENA, x)
+ spx5_field_prep(PTP_PHAD_CTRL_PHAD_ENA, x)
#define PTP_PHAD_CTRL_PHAD_ENA_GET(x)\
- FIELD_GET(PTP_PHAD_CTRL_PHAD_ENA, x)
+ spx5_field_get(PTP_PHAD_CTRL_PHAD_ENA, x)
-#define PTP_PHAD_CTRL_PHAD_FAILED BIT(6)
+#define PTP_PHAD_CTRL_PHAD_FAILED\
+ BIT(regs->fpos[FP_PTP_PHAD_CTRL_PHAD_FAILED])
#define PTP_PHAD_CTRL_PHAD_FAILED_SET(x)\
- FIELD_PREP(PTP_PHAD_CTRL_PHAD_FAILED, x)
+ spx5_field_prep(PTP_PHAD_CTRL_PHAD_FAILED, x)
#define PTP_PHAD_CTRL_PHAD_FAILED_GET(x)\
- FIELD_GET(PTP_PHAD_CTRL_PHAD_FAILED, x)
+ spx5_field_get(PTP_PHAD_CTRL_PHAD_FAILED, x)
+/* SPARX5 ONLY */
#define PTP_PHAD_CTRL_REDUCED_RES GENMASK(5, 3)
#define PTP_PHAD_CTRL_REDUCED_RES_SET(x)\
FIELD_PREP(PTP_PHAD_CTRL_REDUCED_RES, x)
@@ -6199,13 +6783,79 @@ enum sparx5_target {
#define PTP_PHAD_CTRL_LOCK_ACC_GET(x)\
FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x)
-/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
-#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP,\
- 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
-
-/* QFWD:SYSTEM:SWITCH_PORT_MODE */
-#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD,\
- 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
+/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
+#define PTP_PHAD_CYC_STAT(g) \
+ __REG(TARGET_PTP, 0, 1, regs->gaddr[GA_PTP_PHASE_DETECTOR_CTRL], g, \
+ regs->gcnt[GC_PTP_PHASE_DETECTOR_CTRL], \
+ regs->gsize[GW_PTP_PHASE_DETECTOR_CTRL], 4, 0, 1, 4)
+
+/* LAN969X ONLY */
+/* DEVCPU_PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */
+#define PTP_TWOSTEP_CTRL \
+ __REG(TARGET_PTP, 0, 1, 612, 0, 1, 16, 0, 0, 1, 4)
+
+#define PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
+#define PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+#define PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_PTP_OVWR_ENA, x)
+
+#define PTP_TWOSTEP_CTRL_PTP_NXT BIT(11)
+#define PTP_TWOSTEP_CTRL_PTP_NXT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_PTP_NXT, x)
+#define PTP_TWOSTEP_CTRL_PTP_NXT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_PTP_NXT, x)
+
+#define PTP_TWOSTEP_CTRL_PTP_VLD BIT(10)
+#define PTP_TWOSTEP_CTRL_PTP_VLD_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_PTP_VLD, x)
+#define PTP_TWOSTEP_CTRL_PTP_VLD_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_PTP_VLD, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_TX BIT(9)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+#define PTP_TWOSTEP_CTRL_STAMP_TX_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_TX, x)
+
+#define PTP_TWOSTEP_CTRL_STAMP_PORT GENMASK(8, 1)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+#define PTP_TWOSTEP_CTRL_STAMP_PORT_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_STAMP_PORT, x)
+
+#define PTP_TWOSTEP_CTRL_PTP_OVFL BIT(0)
+#define PTP_TWOSTEP_CTRL_PTP_OVFL_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+#define PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_CTRL_PTP_OVFL, x)
+
+/* LAN969X ONLY */
+/* DEVCPU_PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP_NSEC */
+#define PTP_TWOSTEP_STAMP_NSEC \
+ __REG(TARGET_PTP, 0, 1, 612, 0, 1, 16, 4, 0, 1, 4)
+
+#define PTP_TWOSTEP_STAMP_NSEC_NS GENMASK(29, 0)
+#define PTP_TWOSTEP_STAMP_NSEC_NS_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_STAMP_NSEC_NS, x)
+#define PTP_TWOSTEP_STAMP_NSEC_NS_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_STAMP_NSEC_NS, x)
+
+/* LAN969X ONLY */
+/* DEVCPU_PTP:PTP_TS_FIFO:PTP_TWOSTEP_STAMP_SUBNS */
+#define PTP_TWOSTEP_STAMP_SUBNS \
+ __REG(TARGET_PTP, 0, 1, 612, 0, 1, 16, 8, 0, 1, 4)
+
+#define PTP_TWOSTEP_STAMP_SUBNS_NS GENMASK(7, 0)
+#define PTP_TWOSTEP_STAMP_SUBNS_NS_SET(x)\
+ FIELD_PREP(PTP_TWOSTEP_STAMP_SUBNS_NS, x)
+#define PTP_TWOSTEP_STAMP_SUBNS_NS_GET(x)\
+ FIELD_GET(PTP_TWOSTEP_STAMP_SUBNS_NS, x)
+
+/* QFWD:SYSTEM:SWITCH_PORT_MODE */
+#define QFWD_SWITCH_PORT_MODE(r) \
+ __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, \
+ regs->rcnt[RC_QFWD_SWITCH_PORT_MODE], 4)
#define QFWD_SWITCH_PORT_MODE_PORT_ENA BIT(19)
#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\
@@ -6261,49 +6911,53 @@ enum sparx5_target {
#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\
FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
-/* QFWD:SYSTEM:FRAME_COPY_CFG */
-#define QFWD_FRAME_COPY_CFG(r)\
+/* QFWD:SYSTEM:FRAME_COPY_CFG */
+#define QFWD_FRAME_COPY_CFG(r) \
__REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 284, r, 12, 4)
-#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL GENMASK(12, 6)
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL\
+ GENMASK(regs->fsize[FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL] + 6 - 1, 6)
#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(x)\
- FIELD_PREP(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
+ spx5_field_prep(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(x)\
- FIELD_GET(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
+ spx5_field_get(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
-/* QRES:RES_CTRL:RES_CFG */
-#define QRES_RES_CFG(g) __REG(TARGET_QRES,\
- 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
+/* QRES:RES_CTRL:RES_CFG */
+#define QRES_RES_CFG(g) \
+ __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
-#define QRES_RES_CFG_WM_HIGH GENMASK(11, 0)
+#define QRES_RES_CFG_WM_HIGH\
+ GENMASK(regs->fsize[FW_QRES_RES_CFG_WM_HIGH] + 0 - 1, 0)
#define QRES_RES_CFG_WM_HIGH_SET(x)\
- FIELD_PREP(QRES_RES_CFG_WM_HIGH, x)
+ spx5_field_prep(QRES_RES_CFG_WM_HIGH, x)
#define QRES_RES_CFG_WM_HIGH_GET(x)\
- FIELD_GET(QRES_RES_CFG_WM_HIGH, x)
+ spx5_field_get(QRES_RES_CFG_WM_HIGH, x)
-/* QRES:RES_CTRL:RES_STAT */
-#define QRES_RES_STAT(g) __REG(TARGET_QRES,\
- 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
+/* QRES:RES_CTRL:RES_STAT */
+#define QRES_RES_STAT(g) \
+ __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
-#define QRES_RES_STAT_MAXUSE GENMASK(20, 0)
+#define QRES_RES_STAT_MAXUSE\
+ GENMASK(regs->fsize[FW_QRES_RES_STAT_MAXUSE] + 0 - 1, 0)
#define QRES_RES_STAT_MAXUSE_SET(x)\
- FIELD_PREP(QRES_RES_STAT_MAXUSE, x)
+ spx5_field_prep(QRES_RES_STAT_MAXUSE, x)
#define QRES_RES_STAT_MAXUSE_GET(x)\
- FIELD_GET(QRES_RES_STAT_MAXUSE, x)
+ spx5_field_get(QRES_RES_STAT_MAXUSE, x)
-/* QRES:RES_CTRL:RES_STAT_CUR */
-#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES,\
- 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
+/* QRES:RES_CTRL:RES_STAT_CUR */
+#define QRES_RES_STAT_CUR(g) \
+ __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
-#define QRES_RES_STAT_CUR_INUSE GENMASK(20, 0)
+#define QRES_RES_STAT_CUR_INUSE\
+ GENMASK(regs->fsize[FW_QRES_RES_STAT_CUR_INUSE] + 0 - 1, 0)
#define QRES_RES_STAT_CUR_INUSE_SET(x)\
- FIELD_PREP(QRES_RES_STAT_CUR_INUSE, x)
+ spx5_field_prep(QRES_RES_STAT_CUR_INUSE, x)
#define QRES_RES_STAT_CUR_INUSE_GET(x)\
- FIELD_GET(QRES_RES_STAT_CUR_INUSE, x)
+ spx5_field_get(QRES_RES_STAT_CUR_INUSE, x)
-/* DEVCPU_QS:XTR:XTR_GRP_CFG */
-#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS,\
- 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+/* DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r) \
+ __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
#define QS_XTR_GRP_CFG_MODE_SET(x)\
@@ -6323,13 +6977,13 @@ enum sparx5_target {
#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
-/* DEVCPU_QS:XTR:XTR_RD */
-#define QS_XTR_RD(r) __REG(TARGET_QS,\
- 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+/* DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r) \
+ __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
-/* DEVCPU_QS:XTR:XTR_FLUSH */
-#define QS_XTR_FLUSH __REG(TARGET_QS,\
- 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+/* DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH \
+ __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
#define QS_XTR_FLUSH_FLUSH GENMASK(1, 0)
#define QS_XTR_FLUSH_FLUSH_SET(x)\
@@ -6337,9 +6991,9 @@ enum sparx5_target {
#define QS_XTR_FLUSH_FLUSH_GET(x)\
FIELD_GET(QS_XTR_FLUSH_FLUSH, x)
-/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
-#define QS_XTR_DATA_PRESENT __REG(TARGET_QS,\
- 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT \
+ __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
#define QS_XTR_DATA_PRESENT_DATA_PRESENT GENMASK(1, 0)
#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\
@@ -6347,9 +7001,9 @@ enum sparx5_target {
#define QS_XTR_DATA_PRESENT_DATA_PRESENT_GET(x)\
FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
-/* DEVCPU_QS:INJ:INJ_GRP_CFG */
-#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS,\
- 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+/* DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r) \
+ __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
#define QS_INJ_GRP_CFG_MODE_SET(x)\
@@ -6363,13 +7017,13 @@ enum sparx5_target {
#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
-/* DEVCPU_QS:INJ:INJ_WR */
-#define QS_INJ_WR(r) __REG(TARGET_QS,\
- 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+/* DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r) \
+ __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
-/* DEVCPU_QS:INJ:INJ_CTRL */
-#define QS_INJ_CTRL(r) __REG(TARGET_QS,\
- 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+/* DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r) \
+ __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
@@ -6401,9 +7055,9 @@ enum sparx5_target {
#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
-/* DEVCPU_QS:INJ:INJ_STATUS */
-#define QS_INJ_STATUS __REG(TARGET_QS,\
- 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+/* DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS \
+ __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
@@ -6423,21 +7077,24 @@ enum sparx5_target {
#define QS_INJ_STATUS_INJ_IN_PROGRESS_GET(x)\
FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
-/* QSYS:PAUSE_CFG:PAUSE_CFG */
-#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS,\
- 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
+/* QSYS:PAUSE_CFG:PAUSE_CFG */
+#define QSYS_PAUSE_CFG(r) \
+ __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], 0, \
+ r, regs->rcnt[RC_QSYS_PAUSE_CFG], 4)
-#define QSYS_PAUSE_CFG_PAUSE_START GENMASK(25, 14)
+#define QSYS_PAUSE_CFG_PAUSE_START\
+ GENMASK(regs->fsize[FW_QSYS_PAUSE_CFG_PAUSE_START] + 14 - 1, 14)
#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\
- FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_START, x)
+ spx5_field_prep(QSYS_PAUSE_CFG_PAUSE_START, x)
#define QSYS_PAUSE_CFG_PAUSE_START_GET(x)\
- FIELD_GET(QSYS_PAUSE_CFG_PAUSE_START, x)
+ spx5_field_get(QSYS_PAUSE_CFG_PAUSE_START, x)
-#define QSYS_PAUSE_CFG_PAUSE_STOP GENMASK(13, 2)
+#define QSYS_PAUSE_CFG_PAUSE_STOP\
+ GENMASK(regs->fsize[FW_QSYS_PAUSE_CFG_PAUSE_STOP] + 2 - 1, 2)
#define QSYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
- FIELD_PREP(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+ spx5_field_prep(QSYS_PAUSE_CFG_PAUSE_STOP, x)
#define QSYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
- FIELD_GET(QSYS_PAUSE_CFG_PAUSE_STOP, x)
+ spx5_field_get(QSYS_PAUSE_CFG_PAUSE_STOP, x)
#define QSYS_PAUSE_CFG_PAUSE_ENA BIT(1)
#define QSYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
@@ -6451,19 +7108,22 @@ enum sparx5_target {
#define QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA_GET(x)\
FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
-/* QSYS:PAUSE_CFG:ATOP */
-#define QSYS_ATOP(r) __REG(TARGET_QSYS,\
- 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
+/* QSYS:PAUSE_CFG:ATOP */
+#define QSYS_ATOP(r) \
+ __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], \
+ 284, r, regs->rcnt[RC_QSYS_ATOP], 4)
-#define QSYS_ATOP_ATOP GENMASK(11, 0)
+#define QSYS_ATOP_ATOP\
+ GENMASK(regs->fsize[FW_QSYS_ATOP_ATOP] + 0 - 1, 0)
#define QSYS_ATOP_ATOP_SET(x)\
- FIELD_PREP(QSYS_ATOP_ATOP, x)
+ spx5_field_prep(QSYS_ATOP_ATOP, x)
#define QSYS_ATOP_ATOP_GET(x)\
- FIELD_GET(QSYS_ATOP_ATOP, x)
+ spx5_field_get(QSYS_ATOP_ATOP, x)
-/* QSYS:PAUSE_CFG:FWD_PRESSURE */
-#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS,\
- 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
+/* QSYS:PAUSE_CFG:FWD_PRESSURE */
+#define QSYS_FWD_PRESSURE(r) \
+ __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], \
+ 564, r, regs->rcnt[RC_QSYS_FWD_PRESSURE], 4)
#define QSYS_FWD_PRESSURE_FWD_PRESSURE GENMASK(11, 1)
#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\
@@ -6477,19 +7137,22 @@ enum sparx5_target {
#define QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_GET(x)\
FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
-/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */
-#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS,\
- 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
+/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define QSYS_ATOP_TOT_CFG \
+ __REG(TARGET_QSYS, 0, 1, 544, 0, 1, regs->gsize[GW_QSYS_PAUSE_CFG], \
+ 844, 0, 1, 4)
-#define QSYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(11, 0)
+#define QSYS_ATOP_TOT_CFG_ATOP_TOT\
+ GENMASK(regs->fsize[FW_QSYS_ATOP_TOT_CFG_ATOP_TOT] + 0 - 1, 0)
#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\
- FIELD_PREP(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+ spx5_field_prep(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
#define QSYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\
- FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
+ spx5_field_get(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
-/* QSYS:CALCFG:CAL_AUTO */
-#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS,\
- 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
+/* QSYS:CALCFG:CAL_AUTO */
+#define QSYS_CAL_AUTO(r) \
+ __REG(TARGET_QSYS, 0, 1, regs->gaddr[GA_QSYS_CALCFG], 0, 1, 40, 0, r, \
+ regs->rcnt[RC_QSYS_CAL_AUTO], 4)
#define QSYS_CAL_AUTO_CAL_AUTO GENMASK(29, 0)
#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\
@@ -6497,9 +7160,10 @@ enum sparx5_target {
#define QSYS_CAL_AUTO_CAL_AUTO_GET(x)\
FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x)
-/* QSYS:CALCFG:CAL_CTRL */
-#define QSYS_CAL_CTRL __REG(TARGET_QSYS,\
- 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
+/* QSYS:CALCFG:CAL_CTRL */
+#define QSYS_CAL_CTRL \
+ __REG(TARGET_QSYS, 0, 1, regs->gaddr[GA_QSYS_CALCFG], 0, 1, 40, 36, 0, \
+ 1, 4)
#define QSYS_CAL_CTRL_CAL_MODE GENMASK(14, 11)
#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\
@@ -6519,9 +7183,10 @@ enum sparx5_target {
#define QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(x)\
FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
-/* QSYS:RAM_CTRL:RAM_INIT */
-#define QSYS_RAM_INIT __REG(TARGET_QSYS,\
- 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
+/* QSYS:RAM_CTRL:RAM_INIT */
+#define QSYS_RAM_INIT \
+ __REG(TARGET_QSYS, 0, 1, regs->gaddr[GA_QSYS_RAM_CTRL], 0, 1, 4, 0, 0, \
+ 1, 4)
#define QSYS_RAM_INIT_RAM_INIT BIT(1)
#define QSYS_RAM_INIT_RAM_INIT_SET(x)\
@@ -6535,9 +7200,10 @@ enum sparx5_target {
#define QSYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
-/* REW:COMMON:OWN_UPSID */
-#define REW_OWN_UPSID(r) __REG(TARGET_REW,\
- 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
+/* REW:COMMON:OWN_UPSID */
+#define REW_OWN_UPSID(r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_COMMON], 0, 1, 1232, 0, r, \
+ regs->rcnt[RC_REW_OWN_UPSID], 4)
#define REW_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define REW_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -6545,15 +7211,17 @@ enum sparx5_target {
#define REW_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x)
-/* REW:COMMON:RTAG_ETAG_CTRL */
-#define REW_RTAG_ETAG_CTRL(r) __REG(TARGET_REW,\
- 0, 1, 387264, 0, 1, 1232, 560, r, 70, 4)
+/* REW:COMMON:RTAG_ETAG_CTRL */
+#define REW_RTAG_ETAG_CTRL(r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_COMMON], 0, 1, 1232, 560, r,\
+ regs->rcnt[RC_REW_RTAG_ETAG_CTRL], 4)
-#define REW_RTAG_ETAG_CTRL_IPE_TBL GENMASK(9, 3)
+#define REW_RTAG_ETAG_CTRL_IPE_TBL\
+ GENMASK(regs->fsize[FW_REW_RTAG_ETAG_CTRL_IPE_TBL] + 3 - 1, 3)
#define REW_RTAG_ETAG_CTRL_IPE_TBL_SET(x)\
- FIELD_PREP(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+ spx5_field_prep(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
#define REW_RTAG_ETAG_CTRL_IPE_TBL_GET(x)\
- FIELD_GET(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+ spx5_field_get(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA GENMASK(2, 1)
#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(x)\
@@ -6567,9 +7235,10 @@ enum sparx5_target {
#define REW_RTAG_ETAG_CTRL_KEEP_ETAG_GET(x)\
FIELD_GET(REW_RTAG_ETAG_CTRL_KEEP_ETAG, x)
-/* REW:COMMON:ES0_CTRL */
-#define REW_ES0_CTRL __REG(TARGET_REW,\
- 0, 1, 387264, 0, 1, 1232, 852, 0, 1, 4)
+/* REW:COMMON:ES0_CTRL */
+#define REW_ES0_CTRL \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_COMMON], 0, 1, 1232, 852, 0,\
+ 1, 4)
#define REW_ES0_CTRL_ES0_BY_RT_FWD BIT(5)
#define REW_ES0_CTRL_ES0_BY_RT_FWD_SET(x)\
@@ -6607,9 +7276,10 @@ enum sparx5_target {
#define REW_ES0_CTRL_ES0_LU_ENA_GET(x)\
FIELD_GET(REW_ES0_CTRL_ES0_LU_ENA, x)
-/* REW:PORT:PORT_VLAN_CFG */
-#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 0, 0, 1, 4)
#define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(15, 13)
#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\
@@ -6629,9 +7299,10 @@ enum sparx5_target {
#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
-/* REW:PORT:PCP_MAP_DE0 */
-#define REW_PCP_MAP_DE0(g, r) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 4, r, 8, 4)
+/* REW:PORT:PCP_MAP_DE0 */
+#define REW_PCP_MAP_DE0(g, r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 4, r, 8, 4)
#define REW_PCP_MAP_DE0_PCP_DE0 GENMASK(2, 0)
#define REW_PCP_MAP_DE0_PCP_DE0_SET(x)\
@@ -6639,9 +7310,10 @@ enum sparx5_target {
#define REW_PCP_MAP_DE0_PCP_DE0_GET(x)\
FIELD_GET(REW_PCP_MAP_DE0_PCP_DE0, x)
-/* REW:PORT:PCP_MAP_DE1 */
-#define REW_PCP_MAP_DE1(g, r) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 36, r, 8, 4)
+/* REW:PORT:PCP_MAP_DE1 */
+#define REW_PCP_MAP_DE1(g, r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 36, r, 8, 4)
#define REW_PCP_MAP_DE1_PCP_DE1 GENMASK(2, 0)
#define REW_PCP_MAP_DE1_PCP_DE1_SET(x)\
@@ -6649,9 +7321,10 @@ enum sparx5_target {
#define REW_PCP_MAP_DE1_PCP_DE1_GET(x)\
FIELD_GET(REW_PCP_MAP_DE1_PCP_DE1, x)
-/* REW:PORT:DEI_MAP_DE0 */
-#define REW_DEI_MAP_DE0(g, r) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 68, r, 8, 4)
+/* REW:PORT:DEI_MAP_DE0 */
+#define REW_DEI_MAP_DE0(g, r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 68, r, 8, 4)
#define REW_DEI_MAP_DE0_DEI_DE0 BIT(0)
#define REW_DEI_MAP_DE0_DEI_DE0_SET(x)\
@@ -6659,9 +7332,10 @@ enum sparx5_target {
#define REW_DEI_MAP_DE0_DEI_DE0_GET(x)\
FIELD_GET(REW_DEI_MAP_DE0_DEI_DE0, x)
-/* REW:PORT:DEI_MAP_DE1 */
-#define REW_DEI_MAP_DE1(g, r) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 100, r, 8, 4)
+/* REW:PORT:DEI_MAP_DE1 */
+#define REW_DEI_MAP_DE1(g, r) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 100, r, 8, 4)
#define REW_DEI_MAP_DE1_DEI_DE1 BIT(0)
#define REW_DEI_MAP_DE1_DEI_DE1_SET(x)\
@@ -6669,9 +7343,10 @@ enum sparx5_target {
#define REW_DEI_MAP_DE1_DEI_DE1_GET(x)\
FIELD_GET(REW_DEI_MAP_DE1_DEI_DE1, x)
-/* REW:PORT:TAG_CTRL */
-#define REW_TAG_CTRL(g) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
+/* REW:PORT:TAG_CTRL */
+#define REW_TAG_CTRL(g) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 132, 0, 1, 4)
#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED BIT(13)
#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\
@@ -6709,9 +7384,10 @@ enum sparx5_target {
#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
-/* REW:PORT:DSCP_MAP */
-#define REW_DSCP_MAP(g) __REG(TARGET_REW,\
- 0, 1, 360448, g, 70, 256, 136, 0, 1, 4)
+/* REW:PORT:DSCP_MAP */
+#define REW_DSCP_MAP(g) \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_PORT], g, \
+ regs->gcnt[GC_REW_PORT], 256, 136, 0, 1, 4)
#define REW_DSCP_MAP_DSCP_UPDATE_ENA BIT(1)
#define REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(x)\
@@ -6725,9 +7401,10 @@ enum sparx5_target {
#define REW_DSCP_MAP_DSCP_REMAP_ENA_GET(x)\
FIELD_GET(REW_DSCP_MAP_DSCP_REMAP_ENA, x)
-/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
-#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
+#define REW_PTP_TWOSTEP_CTRL \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
@@ -6765,9 +7442,10 @@ enum sparx5_target {
#define REW_PTP_TWOSTEP_CTRL_PTP_OVFL_GET(x)\
FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
-/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
-#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
+#define REW_PTP_TWOSTEP_STAMP \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0)
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
@@ -6775,9 +7453,10 @@ enum sparx5_target {
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_GET(x)\
FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
-/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
-#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
+#define REW_PTP_TWOSTEP_STAMP_SUBNS \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0)
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\
@@ -6785,17 +7464,20 @@ enum sparx5_target {
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_GET(x)\
FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
-/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
-#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
+#define REW_PTP_RSRV_NOT_ZERO \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
-/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
-#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
+#define REW_PTP_RSRV_NOT_ZERO1 \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
-/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
-#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
+#define REW_PTP_RSRV_NOT_ZERO2 \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0)
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\
@@ -6803,9 +7485,10 @@ enum sparx5_target {
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_GET(x)\
FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
-/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
-#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW,\
- 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
+/* SPARX5 ONLY */
+/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
+#define REW_PTP_GEN_STAMP_FMT(r) \
+ __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2)
#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\
@@ -6819,9 +7502,10 @@ enum sparx5_target {
#define REW_PTP_GEN_STAMP_FMT_RT_FMT_GET(x)\
FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
-/* REW:RAM_CTRL:RAM_INIT */
-#define REW_RAM_INIT __REG(TARGET_REW,\
- 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
+/* REW:RAM_CTRL:RAM_INIT */
+#define REW_RAM_INIT \
+ __REG(TARGET_REW, 0, 1, regs->gaddr[GA_REW_RAM_CTRL], 0, 1, 4, 0, 0, 1,\
+ 4)
#define REW_RAM_INIT_RAM_INIT BIT(1)
#define REW_RAM_INIT_RAM_INIT_SET(x)\
@@ -6835,9 +7519,9 @@ enum sparx5_target {
#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
-/* VCAP_ES0:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
-#define VCAP_ES0_CTRL __REG(TARGET_VCAP_ES0,\
- 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES0_CTRL \
+ __REG(TARGET_VCAP_ES0, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_ES0_CTRL_UPDATE_CMD GENMASK(24, 22)
#define VCAP_ES0_CTRL_UPDATE_CMD_SET(x)\
@@ -6887,9 +7571,9 @@ enum sparx5_target {
#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN_GET(x)\
FIELD_GET(VCAP_ES0_CTRL_MV_TRAFFIC_IGN, x)
-/* VCAP_ES0:VCAP_CORE_CFG:VCAP_MV_CFG */
-#define VCAP_ES0_CFG __REG(TARGET_VCAP_ES0,\
- 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES0_CFG \
+ __REG(TARGET_VCAP_ES0, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_ES0_CFG_MV_NUM_POS GENMASK(31, 16)
#define VCAP_ES0_CFG_MV_NUM_POS_SET(x)\
@@ -6903,33 +7587,33 @@ enum sparx5_target {
#define VCAP_ES0_CFG_MV_SIZE_GET(x)\
FIELD_GET(VCAP_ES0_CFG_MV_SIZE, x)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
-#define VCAP_ES0_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES0_VCAP_ENTRY_DAT(r) \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_MASK_DAT */
-#define VCAP_ES0_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES0_VCAP_MASK_DAT(r) \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
-#define VCAP_ES0_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES0_VCAP_ACTION_DAT(r) \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_DAT */
-#define VCAP_ES0_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES0_VCAP_CNT_DAT(r) \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
-#define VCAP_ES0_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES0_VCAP_CNT_FW_DAT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
-/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_TG_DAT */
-#define VCAP_ES0_VCAP_TG_DAT __REG(TARGET_VCAP_ES0,\
- 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES0_VCAP_TG_DAT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
-/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_IDX */
-#define VCAP_ES0_IDX __REG(TARGET_VCAP_ES0,\
- 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES0_IDX \
+ __REG(TARGET_VCAP_ES0, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_ES0_IDX_CORE_IDX GENMASK(3, 0)
#define VCAP_ES0_IDX_CORE_IDX_SET(x)\
@@ -6937,9 +7621,9 @@ enum sparx5_target {
#define VCAP_ES0_IDX_CORE_IDX_GET(x)\
FIELD_GET(VCAP_ES0_IDX_CORE_IDX, x)
-/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_MAP */
-#define VCAP_ES0_MAP __REG(TARGET_VCAP_ES0,\
- 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES0_MAP \
+ __REG(TARGET_VCAP_ES0, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_ES0_MAP_CORE_MAP GENMASK(2, 0)
#define VCAP_ES0_MAP_CORE_MAP_SET(x)\
@@ -6947,9 +7631,9 @@ enum sparx5_target {
#define VCAP_ES0_MAP_CORE_MAP_GET(x)\
FIELD_GET(VCAP_ES0_MAP_CORE_MAP, x)
-/* VCAP_ES0:VCAP_CORE_STICKY:VCAP_STICKY */
-#define VCAP_ES0_VCAP_STICKY __REG(TARGET_VCAP_ES0,\
- 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+/* VCAP_ES0:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES0_VCAP_STICKY \
+ __REG(TARGET_VCAP_ES0, 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
@@ -6957,49 +7641,49 @@ enum sparx5_target {
#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
FIELD_GET(VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
-/* VCAP_ES0:VCAP_CONST:VCAP_VER */
-#define VCAP_ES0_VCAP_VER __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:VCAP_VER */
+#define VCAP_ES0_VCAP_VER \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ENTRY_WIDTH */
-#define VCAP_ES0_ENTRY_WIDTH __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES0_ENTRY_WIDTH \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ENTRY_CNT */
-#define VCAP_ES0_ENTRY_CNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES0_ENTRY_CNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ENTRY_SWCNT */
-#define VCAP_ES0_ENTRY_SWCNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES0_ENTRY_SWCNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ENTRY_TG_WIDTH */
-#define VCAP_ES0_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES0_ENTRY_TG_WIDTH \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ACTION_DEF_CNT */
-#define VCAP_ES0_ACTION_DEF_CNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES0_ACTION_DEF_CNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:ACTION_WIDTH */
-#define VCAP_ES0_ACTION_WIDTH __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES0_ACTION_WIDTH \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:CNT_WIDTH */
-#define VCAP_ES0_CNT_WIDTH __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES0_CNT_WIDTH \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:CORE_CNT */
-#define VCAP_ES0_CORE_CNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:CORE_CNT */
+#define VCAP_ES0_CORE_CNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
-/* VCAP_ES0:VCAP_CONST:IF_CNT */
-#define VCAP_ES0_IF_CNT __REG(TARGET_VCAP_ES0,\
- 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+/* VCAP_ES0:VCAP_CONST:IF_CNT */
+#define VCAP_ES0_IF_CNT \
+ __REG(TARGET_VCAP_ES0, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
-/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
-#define VCAP_ES2_CTRL __REG(TARGET_VCAP_ES2,\
- 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES2_CTRL \
+ __REG(TARGET_VCAP_ES2, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_ES2_CTRL_UPDATE_CMD GENMASK(24, 22)
#define VCAP_ES2_CTRL_UPDATE_CMD_SET(x)\
@@ -7049,9 +7733,9 @@ enum sparx5_target {
#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_GET(x)\
FIELD_GET(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
-/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */
-#define VCAP_ES2_CFG __REG(TARGET_VCAP_ES2,\
- 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES2_CFG \
+ __REG(TARGET_VCAP_ES2, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_ES2_CFG_MV_NUM_POS GENMASK(31, 16)
#define VCAP_ES2_CFG_MV_NUM_POS_SET(x)\
@@ -7065,33 +7749,33 @@ enum sparx5_target {
#define VCAP_ES2_CFG_MV_SIZE_GET(x)\
FIELD_GET(VCAP_ES2_CFG_MV_SIZE, x)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
-#define VCAP_ES2_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES2_VCAP_ENTRY_DAT(r) \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */
-#define VCAP_ES2_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES2_VCAP_MASK_DAT(r) \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
-#define VCAP_ES2_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES2_VCAP_ACTION_DAT(r) \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */
-#define VCAP_ES2_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES2_VCAP_CNT_DAT(r) \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
-#define VCAP_ES2_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES2_VCAP_CNT_FW_DAT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
-/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */
-#define VCAP_ES2_VCAP_TG_DAT __REG(TARGET_VCAP_ES2,\
- 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES2_VCAP_TG_DAT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
-/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */
-#define VCAP_ES2_IDX __REG(TARGET_VCAP_ES2,\
- 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES2_IDX \
+ __REG(TARGET_VCAP_ES2, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_ES2_IDX_CORE_IDX GENMASK(3, 0)
#define VCAP_ES2_IDX_CORE_IDX_SET(x)\
@@ -7099,9 +7783,9 @@ enum sparx5_target {
#define VCAP_ES2_IDX_CORE_IDX_GET(x)\
FIELD_GET(VCAP_ES2_IDX_CORE_IDX, x)
-/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */
-#define VCAP_ES2_MAP __REG(TARGET_VCAP_ES2,\
- 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES2_MAP \
+ __REG(TARGET_VCAP_ES2, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_ES2_MAP_CORE_MAP GENMASK(2, 0)
#define VCAP_ES2_MAP_CORE_MAP_SET(x)\
@@ -7109,9 +7793,9 @@ enum sparx5_target {
#define VCAP_ES2_MAP_CORE_MAP_GET(x)\
FIELD_GET(VCAP_ES2_MAP_CORE_MAP, x)
-/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */
-#define VCAP_ES2_VCAP_STICKY __REG(TARGET_VCAP_ES2,\
- 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES2_VCAP_STICKY \
+ __REG(TARGET_VCAP_ES2, 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
@@ -7119,49 +7803,49 @@ enum sparx5_target {
#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
FIELD_GET(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
-/* VCAP_ES2:VCAP_CONST:VCAP_VER */
-#define VCAP_ES2_VCAP_VER __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:VCAP_VER */
+#define VCAP_ES2_VCAP_VER \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */
-#define VCAP_ES2_ENTRY_WIDTH __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES2_ENTRY_WIDTH \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */
-#define VCAP_ES2_ENTRY_CNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES2_ENTRY_CNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */
-#define VCAP_ES2_ENTRY_SWCNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES2_ENTRY_SWCNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */
-#define VCAP_ES2_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES2_ENTRY_TG_WIDTH \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */
-#define VCAP_ES2_ACTION_DEF_CNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES2_ACTION_DEF_CNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */
-#define VCAP_ES2_ACTION_WIDTH __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES2_ACTION_WIDTH \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */
-#define VCAP_ES2_CNT_WIDTH __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES2_CNT_WIDTH \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:CORE_CNT */
-#define VCAP_ES2_CORE_CNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:CORE_CNT */
+#define VCAP_ES2_CORE_CNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
-/* VCAP_ES2:VCAP_CONST:IF_CNT */
-#define VCAP_ES2_IF_CNT __REG(TARGET_VCAP_ES2,\
- 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+/* VCAP_ES2:VCAP_CONST:IF_CNT */
+#define VCAP_ES2_IF_CNT \
+ __REG(TARGET_VCAP_ES2, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
-#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER,\
- 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_SUPER_CTRL \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_SUPER_CTRL_UPDATE_CMD GENMASK(24, 22)
#define VCAP_SUPER_CTRL_UPDATE_CMD_SET(x)\
@@ -7211,9 +7895,9 @@ enum sparx5_target {
#define VCAP_SUPER_CTRL_MV_TRAFFIC_IGN_GET(x)\
FIELD_GET(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x)
-/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */
-#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER,\
- 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_SUPER_CFG \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_SUPER_CFG_MV_NUM_POS GENMASK(31, 16)
#define VCAP_SUPER_CFG_MV_NUM_POS_SET(x)\
@@ -7227,33 +7911,33 @@ enum sparx5_target {
#define VCAP_SUPER_CFG_MV_SIZE_GET(x)\
FIELD_GET(VCAP_SUPER_CFG_MV_SIZE, x)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
-#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_SUPER_VCAP_ENTRY_DAT(r) \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */
-#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_SUPER_VCAP_MASK_DAT(r) \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
-#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_SUPER_VCAP_ACTION_DAT(r) \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */
-#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_SUPER_VCAP_CNT_DAT(r) \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
-#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_SUPER_VCAP_CNT_FW_DAT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */
-#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_SUPER_VCAP_TG_DAT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */
-#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER,\
- 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_SUPER_IDX \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_SUPER_IDX_CORE_IDX GENMASK(3, 0)
#define VCAP_SUPER_IDX_CORE_IDX_SET(x)\
@@ -7261,9 +7945,9 @@ enum sparx5_target {
#define VCAP_SUPER_IDX_CORE_IDX_GET(x)\
FIELD_GET(VCAP_SUPER_IDX_CORE_IDX, x)
-/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */
-#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER,\
- 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_SUPER_MAP \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_SUPER_MAP_CORE_MAP GENMASK(2, 0)
#define VCAP_SUPER_MAP_CORE_MAP_SET(x)\
@@ -7271,49 +7955,49 @@ enum sparx5_target {
#define VCAP_SUPER_MAP_CORE_MAP_GET(x)\
FIELD_GET(VCAP_SUPER_MAP_CORE_MAP, x)
-/* VCAP_SUPER:VCAP_CONST:VCAP_VER */
-#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:VCAP_VER */
+#define VCAP_SUPER_VCAP_VER \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */
-#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_SUPER_ENTRY_WIDTH \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */
-#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */
+#define VCAP_SUPER_ENTRY_CNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */
-#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_SUPER_ENTRY_SWCNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */
-#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_SUPER_ENTRY_TG_WIDTH \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */
-#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_SUPER_ACTION_DEF_CNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */
-#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_SUPER_ACTION_WIDTH \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */
-#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */
+#define VCAP_SUPER_CNT_WIDTH \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:CORE_CNT */
-#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:CORE_CNT */
+#define VCAP_SUPER_CORE_CNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
-/* VCAP_SUPER:VCAP_CONST:IF_CNT */
-#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+/* VCAP_SUPER:VCAP_CONST:IF_CNT */
+#define VCAP_SUPER_IF_CNT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
-/* VCAP_SUPER:RAM_CTRL:RAM_INIT */
-#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER,\
- 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
+/* VCAP_SUPER:RAM_CTRL:RAM_INIT */
+#define VCAP_SUPER_RAM_INIT \
+ __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
#define VCAP_SUPER_RAM_INIT_RAM_INIT BIT(1)
#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\
@@ -7327,9 +8011,10 @@ enum sparx5_target {
#define VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
-/* VOP:RAM_CTRL:RAM_INIT */
-#define VOP_RAM_INIT __REG(TARGET_VOP,\
- 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
+/* VOP:RAM_CTRL:RAM_INIT */
+#define VOP_RAM_INIT \
+ __REG(TARGET_VOP, 0, 1, regs->gaddr[GA_VOP_RAM_CTRL], 0, 1, 4, 0, 0, 1,\
+ 4)
#define VOP_RAM_INIT_RAM_INIT BIT(1)
#define VOP_RAM_INIT_RAM_INIT_SET(x)\
@@ -7343,9 +8028,10 @@ enum sparx5_target {
#define VOP_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x)
-/* XQS:SYSTEM:STAT_CFG */
-#define XQS_STAT_CFG __REG(TARGET_XQS,\
- 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
+/* XQS:SYSTEM:STAT_CFG */
+#define XQS_STAT_CFG \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_SYSTEM], 0, 1, 872, 860, 0, \
+ 1, 4)
#define XQS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(21, 18)
#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\
@@ -7353,11 +8039,12 @@ enum sparx5_target {
#define XQS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\
FIELD_GET(XQS_STAT_CFG_STAT_CLEAR_SHOT, x)
-#define XQS_STAT_CFG_STAT_VIEW GENMASK(17, 5)
+#define XQS_STAT_CFG_STAT_VIEW\
+ GENMASK(regs->fsize[FW_XQS_STAT_CFG_STAT_VIEW] + 5 - 1, 5)
#define XQS_STAT_CFG_STAT_VIEW_SET(x)\
- FIELD_PREP(XQS_STAT_CFG_STAT_VIEW, x)
+ spx5_field_prep(XQS_STAT_CFG_STAT_VIEW, x)
#define XQS_STAT_CFG_STAT_VIEW_GET(x)\
- FIELD_GET(XQS_STAT_CFG_STAT_VIEW, x)
+ spx5_field_get(XQS_STAT_CFG_STAT_VIEW, x)
#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY BIT(4)
#define XQS_STAT_CFG_STAT_SRV_PKT_ONLY_SET(x)\
@@ -7371,48 +8058,56 @@ enum sparx5_target {
#define XQS_STAT_CFG_STAT_WRAP_DIS_GET(x)\
FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x)
-/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
-#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS,\
- 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
+#define XQS_QLIMIT_SHR_TOP_CFG(g) \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 0, 0,\
+ 1, 4)
-#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP\
+ GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP] + 0 - 1, 0)
#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\
- FIELD_PREP(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+ spx5_field_prep(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_GET(x)\
- FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
+ spx5_field_get(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
-/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
-#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS,\
- 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
+#define XQS_QLIMIT_SHR_ATOP_CFG(g) \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 4, 0,\
+ 1, 4)
-#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP\
+ GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP] + 0 - 1, 0)
#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\
- FIELD_PREP(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+ spx5_field_prep(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_GET(x)\
- FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
+ spx5_field_get(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
-/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
-#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS,\
- 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
+#define XQS_QLIMIT_SHR_CTOP_CFG(g) \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 8, 0,\
+ 1, 4)
-#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP\
+ GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP] + 0 - 1, 0)
#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\
- FIELD_PREP(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+ spx5_field_prep(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_GET(x)\
- FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
+ spx5_field_get(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
-/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
-#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS,\
- 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
+/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
+#define XQS_QLIMIT_SHR_QLIM_CFG(g) \
+ __REG(TARGET_XQS, 0, 1, regs->gaddr[GA_XQS_QLIMIT_SHR], g, 4, 48, 12, \
+ 0, 1, 4)
-#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM GENMASK(14, 0)
+#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM\
+ GENMASK(regs->fsize[FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM] + 0 - 1, 0)
#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\
- FIELD_PREP(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+ spx5_field_prep(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_GET(x)\
- FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
+ spx5_field_get(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
-/* XQS:STAT:CNT */
-#define XQS_CNT(g) __REG(TARGET_XQS,\
- 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+/* XQS:STAT:CNT */
+#define XQS_CNT(g) \
+ __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
index 459a53676ae9..9806729e9c62 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
@@ -24,8 +24,14 @@ static u32 sparx5_mirror_to_dir(bool ingress)
/* Get ports belonging to this mirror */
static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx)
{
- return (u64)spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG1(idx)) << 32 |
- spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+ u64 val;
+
+ val = spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+
+ if (is_sparx5(sparx5))
+ val |= (u64)spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG1(idx)) << 32;
+
+ return val;
}
/* Add port to mirror (only front ports) */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index 705a004b324f..1d34af78166a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -55,7 +55,7 @@ static void __ifh_encode_bitfield(void *ifh, u64 value, u32 pos, u32 width)
ifh_hdr[byte - 5] |= (u8)((encode & 0xFF0000000000) >> 40);
}
-void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
+void sparx5_set_port_ifh(struct sparx5 *sparx5, void *ifh_hdr, u16 portno)
{
/* VSTAX.RSV = 1. MSBit must be 1 */
ifh_encode_bitfield(ifh_hdr, 1, VSTAX + 79, 1);
@@ -64,15 +64,16 @@ void sparx5_set_port_ifh(void *ifh_hdr, u16 portno)
/* MISC.CPU_MASK/DPORT = Destination port */
ifh_encode_bitfield(ifh_hdr, portno, 29, 8);
/* MISC.PIPELINE_PT */
- ifh_encode_bitfield(ifh_hdr, 16, 37, 5);
+ ifh_encode_bitfield(ifh_hdr, is_sparx5(sparx5) ? 16 : 17, 37, 5);
/* MISC.PIPELINE_ACT */
ifh_encode_bitfield(ifh_hdr, 1, 42, 3);
/* FWD.SRC_PORT = CPU */
- ifh_encode_bitfield(ifh_hdr, SPX5_PORT_CPU, 46, 7);
+ ifh_encode_bitfield(ifh_hdr, sparx5_get_pgid(sparx5, SPX5_PORT_CPU_0),
+ 46, is_sparx5(sparx5) ? 7 : 6);
/* FWD.SFLOW_ID (disable SFlow sampling) */
- ifh_encode_bitfield(ifh_hdr, 124, 57, 7);
+ ifh_encode_bitfield(ifh_hdr, 124, is_sparx5(sparx5) ? 57 : 56, 7);
/* FWD.UPDATE_FCS = Enable. Enforce update of FCS. */
- ifh_encode_bitfield(ifh_hdr, 1, 67, 1);
+ ifh_encode_bitfield(ifh_hdr, 1, is_sparx5(sparx5) ? 67 : 66, 1);
}
void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op)
@@ -80,19 +81,25 @@ void sparx5_set_port_ifh_rew_op(void *ifh_hdr, u32 rew_op)
ifh_encode_bitfield(ifh_hdr, rew_op, VSTAX + 32, 10);
}
-void sparx5_set_port_ifh_pdu_type(void *ifh_hdr, u32 pdu_type)
+void sparx5_set_port_ifh_pdu_type(struct sparx5 *sparx5, void *ifh_hdr,
+ u32 pdu_type)
{
- ifh_encode_bitfield(ifh_hdr, pdu_type, 191, 4);
+ ifh_encode_bitfield(ifh_hdr, pdu_type, is_sparx5(sparx5) ? 191 : 190,
+ 4);
}
-void sparx5_set_port_ifh_pdu_w16_offset(void *ifh_hdr, u32 pdu_w16_offset)
+void sparx5_set_port_ifh_pdu_w16_offset(struct sparx5 *sparx5, void *ifh_hdr,
+ u32 pdu_w16_offset)
{
- ifh_encode_bitfield(ifh_hdr, pdu_w16_offset, 195, 6);
+ ifh_encode_bitfield(ifh_hdr, pdu_w16_offset,
+ is_sparx5(sparx5) ? 195 : 194, 6);
}
-void sparx5_set_port_ifh_timestamp(void *ifh_hdr, u64 timestamp)
+void sparx5_set_port_ifh_timestamp(struct sparx5 *sparx5, void *ifh_hdr,
+ u64 timestamp)
{
- ifh_encode_bitfield(ifh_hdr, timestamp, 232, 40);
+ ifh_encode_bitfield(ifh_hdr, timestamp, 232,
+ is_sparx5(sparx5) ? 40 : 38);
}
static int sparx5_port_open(struct net_device *ndev)
@@ -190,7 +197,8 @@ static int sparx5_set_mac_address(struct net_device *dev, void *p)
sparx5_mact_forget(sparx5, dev->dev_addr, port->pvid);
/* Add new */
- sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
+ addr->sa_data, port->pvid);
/* Record the address */
eth_hw_addr_set(dev, addr->sa_data);
@@ -290,7 +298,7 @@ int sparx5_register_netdevs(struct sparx5 *sparx5)
int portno;
int err;
- for (portno = 0; portno < SPX5_PORTS; portno++)
+ for (portno = 0; portno < sparx5->data->consts->n_ports; portno++)
if (sparx5->ports[portno]) {
err = register_netdev(sparx5->ports[portno]->ndev);
if (err) {
@@ -309,7 +317,7 @@ void sparx5_destroy_netdevs(struct sparx5 *sparx5)
struct sparx5_port *port;
int portno;
- for (portno = 0; portno < SPX5_PORTS; portno++) {
+ for (portno = 0; portno < sparx5->data->consts->n_ports; portno++) {
port = sparx5->ports[portno];
if (port && port->phylink) {
/* Disconnect the phy */
@@ -327,8 +335,7 @@ void sparx5_unregister_netdevs(struct sparx5 *sparx5)
{
int portno;
- for (portno = 0; portno < SPX5_PORTS; portno++)
+ for (portno = 0; portno < sparx5->data->consts->n_ports; portno++)
if (sparx5->ports[portno])
unregister_netdev(sparx5->ports[portno]->ndev);
}
-
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 70427643f777..b6f635d85820 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -32,7 +32,7 @@ void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp)
spx5_wr(0, sparx5, QS_XTR_FLUSH);
}
-void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
+void sparx5_ifh_parse(struct sparx5 *sparx5, u32 *ifh, struct frame_info *info)
{
u8 *xtr_hdr = (u8 *)ifh;
@@ -43,7 +43,8 @@ void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
((u32)xtr_hdr[29] << 8) |
((u32)xtr_hdr[30] << 0);
fwd = (fwd >> 5);
- info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
+ info->src_port = spx5_field_get(GENMASK(is_sparx5(sparx5) ? 7 : 6, 1),
+ fwd);
/*
* Bit 270-271 are occasionally unexpectedly set by the hardware,
@@ -72,10 +73,10 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
/* Decode IFH (what's needed) */
- sparx5_ifh_parse(ifh, &fi);
+ sparx5_ifh_parse(sparx5, ifh, &fi);
/* Map to port netdev */
- port = fi.src_port < SPX5_PORTS ?
+ port = fi.src_port < sparx5->data->consts->n_ports ?
sparx5->ports[fi.src_port] : NULL;
if (!port || !port->ndev) {
dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
@@ -235,16 +236,19 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
netdev_tx_t ret;
memset(ifh, 0, IFH_LEN * 4);
- sparx5_set_port_ifh(ifh, port->portno);
+ sparx5_set_port_ifh(sparx5, ifh, port->portno);
if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
if (sparx5_ptp_txtstamp_request(port, skb) < 0)
return NETDEV_TX_BUSY;
sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
- sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
- sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset);
- sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id);
+ sparx5_set_port_ifh_pdu_type(sparx5, ifh,
+ SPARX5_SKB_CB(skb)->pdu_type);
+ sparx5_set_port_ifh_pdu_w16_offset(sparx5, ifh,
+ SPARX5_SKB_CB(skb)->pdu_w16_offset);
+ sparx5_set_port_ifh_timestamp(sparx5, ifh,
+ SPARX5_SKB_CB(skb)->ts_id);
}
skb_tx_timestamp(skb);
@@ -317,7 +321,9 @@ int sparx5_manual_injection_mode(struct sparx5 *sparx5)
sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
/* CPU ports capture setup */
- for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
+ for (portno = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0);
+ portno <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1);
+ portno++) {
/* ASM CPU port: No preamble, IFH, enable padding */
spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
index af8b435009f4..eae819fa9486 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c
@@ -5,13 +5,13 @@ void sparx5_pgid_init(struct sparx5 *spx5)
{
int i;
- for (i = 0; i < PGID_TABLE_SIZE; i++)
+ for (i = 0; i < spx5->data->consts->n_pgids; i++)
spx5->pgid_map[i] = SPX5_PGID_FREE;
/* Reserved for unicast, flood control, broadcast, and CPU.
* These cannot be freed.
*/
- for (i = 0; i <= PGID_CPU; i++)
+ for (i = 0; i <= sparx5_get_pgid(spx5, PGID_CPU); i++)
spx5->pgid_map[i] = SPX5_PGID_RESERVED;
}
@@ -22,7 +22,8 @@ int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
/* The multicast area starts at index 65, but the first 7
* are reserved for flood masks and CPU. Start alloc after that.
*/
- for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) {
+ for (i = sparx5_get_pgid(spx5, PGID_MCAST_START);
+ i < spx5->data->consts->n_pgids; i++) {
if (spx5->pgid_map[i] == SPX5_PGID_FREE) {
spx5->pgid_map[i] = SPX5_PGID_MULTICAST;
*idx = i;
@@ -35,7 +36,8 @@ int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx)
int sparx5_pgid_free(struct sparx5 *spx5, u16 idx)
{
- if (idx <= PGID_CPU || idx >= PGID_TABLE_SIZE)
+ if (idx <= sparx5_get_pgid(spx5, PGID_CPU) ||
+ idx >= spx5->data->consts->n_pgids)
return -EINVAL;
if (spx5->pgid_map[idx] == SPX5_PGID_FREE)
@@ -44,3 +46,8 @@ int sparx5_pgid_free(struct sparx5 *spx5, u16 idx)
spx5->pgid_map[idx] = SPX5_PGID_FREE;
return 0;
}
+
+int sparx5_get_pgid(struct sparx5 *sparx5, int pgid)
+{
+ return sparx5->data->consts->n_ports + pgid;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
index 8ada5cee1342..c88820e83812 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
@@ -11,10 +11,11 @@ static int sparx5_policer_service_conf_set(struct sparx5 *sparx5,
struct sparx5_policer *pol)
{
u32 idx, pup_tokens, max_pup_tokens, burst, thres;
+ const struct sparx5_ops *ops = sparx5->data->ops;
struct sparx5_sdlb_group *g;
u64 rate;
- g = &sdlb_groups[pol->group];
+ g = ops->get_sdlb_group(pol->group);
idx = pol->idx;
rate = pol->rate * 1000;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 062e486c002c..1401761c6251 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -132,8 +132,8 @@ static int sparx5_get_sfi_status(struct sparx5 *sparx5,
return -EINVAL;
}
- dev = sparx5_to_high_dev(portno);
- tinst = sparx5_port_dev_index(portno);
+ dev = sparx5_to_high_dev(sparx5, portno);
+ tinst = sparx5_port_dev_index(sparx5, portno);
inst = spx5_inst_get(sparx5, dev, tinst);
value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
@@ -213,11 +213,13 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
struct sparx5_port *port,
struct sparx5_port_config *conf)
{
- if ((sparx5_port_is_2g5(port->portno) &&
+ const struct sparx5_ops *ops = sparx5->data->ops;
+
+ if ((ops->is_port_2g5(port->portno) &&
conf->speed > SPEED_2500) ||
- (sparx5_port_is_5g(port->portno) &&
+ (ops->is_port_5g(port->portno) &&
conf->speed > SPEED_5000) ||
- (sparx5_port_is_10g(port->portno) &&
+ (ops->is_port_10g(port->portno) &&
conf->speed > SPEED_10000))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
@@ -226,14 +228,14 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
return -EINVAL;
case PHY_INTERFACE_MODE_1000BASEX:
if (conf->speed != SPEED_1000 ||
- sparx5_port_is_2g5(port->portno))
+ ops->is_port_2g5(port->portno))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
- if (sparx5_port_is_2g5(port->portno))
+ if (ops->is_port_2g5(port->portno))
return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
break;
case PHY_INTERFACE_MODE_2500BASEX:
if (conf->speed != SPEED_2500 ||
- sparx5_port_is_2g5(port->portno))
+ ops->is_port_2g5(port->portno))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
break;
case PHY_INTERFACE_MODE_QSGMII:
@@ -316,10 +318,11 @@ static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
{
u32 tinst = high_spd_dev ?
- sparx5_port_dev_index(port->portno) : port->portno;
+ sparx5_port_dev_index(sparx5, port->portno) : port->portno;
u32 dev = high_spd_dev ?
- sparx5_to_high_dev(port->portno) : TARGET_DEV2G5;
+ sparx5_to_high_dev(sparx5, port->portno) : TARGET_DEV2G5;
void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
+ const struct sparx5_ops *ops = sparx5->data->ops;
u32 spd = port->conf.speed;
u32 spd_prm;
int err;
@@ -427,7 +430,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port,
HSCH_FLUSH_CTRL);
if (high_spd_dev) {
- u32 pcs = sparx5_to_pcs_dev(port->portno);
+ u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
/* 12: Disable 5G/10G/25 BaseR PCS */
@@ -436,7 +439,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port,
pcsinst,
PCS10G_BR_PCS_CFG(0));
- if (sparx5_port_is_25g(port->portno))
+ if (ops->is_port_25g(port->portno))
/* Disable 25G PCS */
spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
DEV25G_PCS25G_CFG_PCS25G_ENA,
@@ -473,6 +476,9 @@ static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
u32 mac_width = 8;
u32 addition = 0;
+ if (!is_sparx5(sparx5))
+ return 0;
+
switch (speed) {
case SPEED_25000:
return 0;
@@ -513,9 +519,8 @@ static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
/* Configure port muxing:
* QSGMII: 4x2G5 devices
*/
-static int sparx5_port_mux_set(struct sparx5 *sparx5,
- struct sparx5_port *port,
- struct sparx5_port_config *conf)
+int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
+ struct sparx5_port_config *conf)
{
u32 portno = port->portno;
u32 inst;
@@ -558,9 +563,10 @@ static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
enum sparx5_vlan_port_type vlan_type = port->vlan_type;
bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
- u32 dev = sparx5_to_high_dev(port->portno);
- u32 tinst = sparx5_port_dev_index(port->portno);
+ u32 dev = sparx5_to_high_dev(sparx5, port->portno);
+ u32 tinst = sparx5_port_dev_index(sparx5, port->portno);
void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
+ const struct sparx5_ops *ops = sparx5->data->ops;
u32 etype;
etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
@@ -575,7 +581,7 @@ static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
sparx5,
DEV2G5_MAC_TAGS_CFG(port->portno));
- if (sparx5_port_is_2g5(port->portno))
+ if (ops->is_port_2g5(port->portno))
return 0;
spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
@@ -789,9 +795,9 @@ static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
struct sparx5_port_config *conf)
{
u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
- u32 pix = sparx5_port_dev_index(port->portno);
- u32 dev = sparx5_to_high_dev(port->portno);
- u32 pcs = sparx5_to_pcs_dev(port->portno);
+ u32 pix = sparx5_port_dev_index(sparx5, port->portno);
+ u32 dev = sparx5_to_high_dev(sparx5, port->portno);
+ u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
void __iomem *devinst;
void __iomem *pcsinst;
int err;
@@ -843,19 +849,22 @@ static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
/* Switch between 1G/2500 and 5G/10G/25G devices */
static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
{
- int bt_indx = BIT(sparx5_port_dev_index(port));
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ int bt_indx;
+
+ bt_indx = BIT(ops->get_port_dev_bit(sparx5, port));
- if (sparx5_port_is_5g(port)) {
+ if (ops->is_port_5g(port)) {
spx5_rmw(hsd ? 0 : bt_indx,
bt_indx,
sparx5,
PORT_CONF_DEV5G_MODES);
- } else if (sparx5_port_is_10g(port)) {
+ } else if (ops->is_port_10g(port)) {
spx5_rmw(hsd ? 0 : bt_indx,
bt_indx,
sparx5,
PORT_CONF_DEV10G_MODES);
- } else if (sparx5_port_is_25g(port)) {
+ } else if (ops->is_port_25g(port)) {
spx5_rmw(hsd ? 0 : bt_indx,
bt_indx,
sparx5,
@@ -915,6 +924,20 @@ static int sparx5_port_config_low_set(struct sparx5 *sparx5,
sparx5,
DEV2G5_DEV_RST_CTRL(port->portno));
+ /* Enable PHAD_CTRL for better timestamping */
+ if (!is_sparx5(sparx5)) {
+ for (int i = 0; i < 2; ++i) {
+ /* Divide the port clock by three for the two
+ * phase detection registers.
+ */
+ spx5_rmw(DEV2G5_PHAD_CTRL_DIV_CFG_SET(3) |
+ DEV2G5_PHAD_CTRL_PHAD_ENA_SET(1),
+ DEV2G5_PHAD_CTRL_DIV_CFG |
+ DEV2G5_PHAD_CTRL_PHAD_ENA,
+ sparx5, DEV2G5_PHAD_CTRL(port->portno, i));
+ }
+ }
+
return 0;
}
@@ -972,6 +995,7 @@ int sparx5_port_config(struct sparx5 *sparx5,
struct sparx5_port_config *conf)
{
bool high_speed_dev = sparx5_is_baser(conf->portmode);
+ const struct sparx5_ops *ops = sparx5->data->ops;
int err, urgency, stop_wm;
err = sparx5_port_verify_speed(sparx5, port, conf);
@@ -987,6 +1011,13 @@ int sparx5_port_config(struct sparx5 *sparx5,
if (err)
return err;
+ if (!is_sparx5(sparx5) && ops->is_port_10g(port->portno) &&
+ conf->speed < SPEED_10000)
+ spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
+ DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
+ sparx5,
+ DSM_DEV_TX_STOP_WM_CFG(port->portno));
+
/* Set the DSM stop watermark */
stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
@@ -1016,9 +1047,10 @@ int sparx5_port_init(struct sparx5 *sparx5,
{
u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
- u32 devhigh = sparx5_to_high_dev(port->portno);
- u32 pix = sparx5_port_dev_index(port->portno);
- u32 pcs = sparx5_to_pcs_dev(port->portno);
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ u32 devhigh = sparx5_to_high_dev(sparx5, port->portno);
+ u32 pix = sparx5_port_dev_index(sparx5, port->portno);
+ u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
bool sd_pol = port->signd_active_high;
bool sd_sel = !port->signd_internal;
bool sd_ena = port->signd_enable;
@@ -1031,7 +1063,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
pcsinst = spx5_inst_get(sparx5, pcs, pix);
/* Set the mux port mode */
- err = sparx5_port_mux_set(sparx5, port, conf);
+ err = ops->set_port_mux(sparx5, port, conf);
if (err)
return err;
@@ -1082,7 +1114,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
if (err)
return err;
- if (!sparx5_port_is_2g5(port->portno))
+ if (!ops->is_port_2g5(port->portno))
/* Enable shadow device */
spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
@@ -1105,7 +1137,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
sparx5,
DEV2G5_MAC_IFG_CFG(port->portno));
- if (sparx5_port_is_2g5(port->portno))
+ if (ops->is_port_2g5(port->portno))
return 0; /* Low speed device only - return */
/* Now setup the high speed device */
@@ -1128,7 +1160,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
pcsinst,
PCS10G_BR_PCS_SD_CFG(0));
- if (sparx5_port_is_25g(port->portno)) {
+ if (ops->is_port_25g(port->portno)) {
/* Handle Signal Detect in 25G PCS */
spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
@@ -1137,6 +1169,27 @@ int sparx5_port_init(struct sparx5 *sparx5,
DEV25G_PCS25G_SD_CFG(pix));
}
+ if (!is_sparx5(sparx5)) {
+ void __iomem *inst;
+ u32 dev, tinst;
+
+ if (ops->is_port_10g(port->portno)) {
+ dev = sparx5_to_high_dev(sparx5, port->portno);
+ tinst = sparx5_port_dev_index(sparx5, port->portno);
+ inst = spx5_inst_get(sparx5, dev, tinst);
+
+ spx5_inst_wr(5, inst,
+ DEV10G_PTP_STAMPER_CFG(port->portno));
+ } else if (ops->is_port_5g(port->portno)) {
+ dev = sparx5_to_high_dev(sparx5, port->portno);
+ tinst = sparx5_port_dev_index(sparx5, port->portno);
+ inst = spx5_inst_get(sparx5, dev, tinst);
+
+ spx5_inst_wr(5, inst,
+ DEV5G_PTP_STAMPER_CFG(port->portno));
+ }
+ }
+
return 0;
}
@@ -1345,3 +1398,8 @@ int sparx5_port_qos_default_set(const struct sparx5_port *port,
return 0;
}
+
+int sparx5_get_internal_port(struct sparx5 *sparx5, int port)
+{
+ return sparx5->data->consts->n_ports + port;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index 607c4ff1df6b..9b9bcc6834bc 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -40,25 +40,29 @@ static inline bool sparx5_port_is_25g(int portno)
return portno >= 56 && portno <= 63;
}
-static inline u32 sparx5_to_high_dev(int port)
+static inline u32 sparx5_to_high_dev(struct sparx5 *sparx5, int port)
{
- if (sparx5_port_is_5g(port))
+ const struct sparx5_ops *ops = sparx5->data->ops;
+
+ if (ops->is_port_5g(port))
return TARGET_DEV5G;
- if (sparx5_port_is_10g(port))
+ if (ops->is_port_10g(port))
return TARGET_DEV10G;
return TARGET_DEV25G;
}
-static inline u32 sparx5_to_pcs_dev(int port)
+static inline u32 sparx5_to_pcs_dev(struct sparx5 *sparx5, int port)
{
- if (sparx5_port_is_5g(port))
+ const struct sparx5_ops *ops = sparx5->data->ops;
+
+ if (ops->is_port_5g(port))
return TARGET_PCS5G_BR;
- if (sparx5_port_is_10g(port))
+ if (ops->is_port_10g(port))
return TARGET_PCS10G_BR;
return TARGET_PCS25G_BR;
}
-static inline int sparx5_port_dev_index(int port)
+static inline u32 sparx5_port_dev_mapping(struct sparx5 *sparx5, int port)
{
if (sparx5_port_is_2g5(port))
return port;
@@ -70,6 +74,11 @@ static inline int sparx5_port_dev_index(int port)
return (port - 56);
}
+static inline u32 sparx5_port_dev_index(struct sparx5 *sparx5, int port)
+{
+ return sparx5->data->ops->get_port_dev_index(sparx5, port);
+}
+
int sparx5_port_init(struct sparx5 *sparx5,
struct sparx5_port *spx5_port,
struct sparx5_port_config *conf);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
index 8dee1ab1fa75..cd4f42c3f7eb 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
@@ -20,36 +20,40 @@ static struct sparx5_pool_entry sparx5_psfp_sg_pool[SPX5_PSFP_SG_CNT];
/* Pool of available stream filters */
static struct sparx5_pool_entry sparx5_psfp_sf_pool[SPX5_PSFP_SF_CNT];
-static int sparx5_psfp_sf_get(u32 *id)
+static int sparx5_psfp_sf_get(struct sparx5 *sparx5, u32 *id)
{
- return sparx5_pool_get(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+ return sparx5_pool_get(sparx5_psfp_sf_pool,
+ sparx5->data->consts->n_filters, id);
}
-static int sparx5_psfp_sf_put(u32 id)
+static int sparx5_psfp_sf_put(struct sparx5 *sparx5, u32 id)
{
- return sparx5_pool_put(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+ return sparx5_pool_put(sparx5_psfp_sf_pool,
+ sparx5->data->consts->n_filters, id);
}
-static int sparx5_psfp_sg_get(u32 idx, u32 *id)
+static int sparx5_psfp_sg_get(struct sparx5 *sparx5, u32 idx, u32 *id)
{
- return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT,
- idx, id);
+ return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool,
+ sparx5->data->consts->n_gates, idx, id);
}
-static int sparx5_psfp_sg_put(u32 id)
+static int sparx5_psfp_sg_put(struct sparx5 *sparx5, u32 id)
{
- return sparx5_pool_put(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT, id);
+ return sparx5_pool_put(sparx5_psfp_sg_pool,
+ sparx5->data->consts->n_gates, id);
}
-static int sparx5_psfp_fm_get(u32 idx, u32 *id)
+static int sparx5_psfp_fm_get(struct sparx5 *sparx5, u32 idx, u32 *id)
{
- return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, idx,
- id);
+ return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool,
+ sparx5->data->consts->n_sdlbs, idx, id);
}
-static int sparx5_psfp_fm_put(u32 id)
+static int sparx5_psfp_fm_put(struct sparx5 *sparx5, u32 id)
{
- return sparx5_pool_put(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, id);
+ return sparx5_pool_put(sparx5_psfp_fm_pool,
+ sparx5->data->consts->n_sdlbs, id);
}
u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx)
@@ -205,7 +209,7 @@ int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
{
int ret;
- ret = sparx5_psfp_sf_get(id);
+ ret = sparx5_psfp_sf_get(sparx5, id);
if (ret < 0)
return ret;
@@ -220,7 +224,7 @@ int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id)
sparx5_psfp_sf_set(sparx5, id, &sf);
- return sparx5_psfp_sf_put(id);
+ return sparx5_psfp_sf_put(sparx5, id);
}
int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
@@ -229,7 +233,7 @@ int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
ktime_t basetime;
int ret;
- ret = sparx5_psfp_sg_get(uidx, id);
+ ret = sparx5_psfp_sg_get(sparx5, uidx, id);
if (ret < 0)
return ret;
/* Was already in use, no need to reconfigure */
@@ -253,7 +257,7 @@ int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id)
const struct sparx5_psfp_sg sg = { 0 };
int ret;
- ret = sparx5_psfp_sg_put(id);
+ ret = sparx5_psfp_sg_put(sparx5, id);
if (ret < 0)
return ret;
/* Stream gate still in use ? */
@@ -270,7 +274,7 @@ int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
int ret;
/* Get flow meter */
- ret = sparx5_psfp_fm_get(uidx, &fm->pol.idx);
+ ret = sparx5_psfp_fm_get(sparx5, uidx, &fm->pol.idx);
if (ret < 0)
return ret;
/* Was already in use, no need to reconfigure */
@@ -303,7 +307,7 @@ int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id)
if (ret < 0)
return ret;
- ret = sparx5_psfp_fm_put(id);
+ ret = sparx5_psfp_fm_put(sparx5, id);
if (ret < 0)
return ret;
/* Do not reset flow-meter if still in use. */
@@ -315,11 +319,12 @@ int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id)
void sparx5_psfp_init(struct sparx5 *sparx5)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
const struct sparx5_sdlb_group *group;
int i;
- for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
- group = &sdlb_groups[i];
+ for (i = 0; i < sparx5->data->consts->n_lb_groups; i++) {
+ group = ops->get_sdlb_group(i);
sparx5_sdlb_group_init(sparx5, group->max_rate,
group->min_burst, group->frame_size, i);
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 5a932460db58..1c2903700a9c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -11,8 +11,6 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
-#define SPARX5_MAX_PTP_ID 512
-
#define TOD_ACC_PIN 0x4
enum {
@@ -38,6 +36,9 @@ static u64 sparx5_ptp_get_1ppm(struct sparx5 *sparx5)
case SPX5_CORE_CLOCK_250MHZ:
res = 2301339409586;
break;
+ case SPX5_CORE_CLOCK_328MHZ:
+ res = 1756832768924;
+ break;
case SPX5_CORE_CLOCK_500MHZ:
res = 1150669704793;
break;
@@ -60,6 +61,9 @@ static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
case SPX5_CORE_CLOCK_250MHZ:
res = 0x1FF0000000000000;
break;
+ case SPX5_CORE_CLOCK_328MHZ:
+ res = 0x18604697DD0F9B5B;
+ break;
case SPX5_CORE_CLOCK_500MHZ:
res = 0x0FF8000000000000;
break;
@@ -269,11 +273,12 @@ void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
spin_unlock_irqrestore(&sparx5->ptp_ts_id_lock, flags);
}
-static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
- struct timespec64 *ts,
- u32 nsec)
+void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
+ struct timespec64 *ts,
+ u32 nsec)
{
/* Read current PTP time to get seconds */
+ const struct sparx5_consts *consts = sparx5->data->consts;
unsigned long flags;
u32 curr_nsec;
@@ -285,10 +290,10 @@ static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
- ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
- curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ ts->tv_sec = spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin));
+ curr_nsec = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin));
ts->tv_nsec = nsec;
@@ -298,6 +303,7 @@ static void sparx5_get_hwtimestamp(struct sparx5 *sparx5,
spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
}
+EXPORT_SYMBOL_GPL(sparx5_get_hwtimestamp);
irqreturn_t sparx5_ptp_irq_handler(int irq, void *args)
{
@@ -440,8 +446,11 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
{
struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
struct sparx5 *sparx5 = phc->sparx5;
+ const struct sparx5_consts *consts;
unsigned long flags;
+ consts = sparx5->data->consts;
+
spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
/* Must be in IDLE mode before the time can be loaded */
@@ -451,14 +460,14 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
/* Set new value */
spx5_wr(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(upper_32_bits(ts->tv_sec)),
- sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ sparx5, PTP_PTP_TOD_SEC_MSB(consts->tod_pin));
spx5_wr(lower_32_bits(ts->tv_sec),
- sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
- spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin));
+ spx5_wr(ts->tv_nsec, sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin));
/* Apply new values */
spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_LOAD) |
@@ -467,7 +476,7 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
@@ -478,10 +487,13 @@ int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
struct sparx5 *sparx5 = phc->sparx5;
+ const struct sparx5_consts *consts;
unsigned long flags;
time64_t s;
s64 ns;
+ consts = sparx5->data->consts;
+
spin_lock_irqsave(&sparx5->ptp_clock_lock, flags);
spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) |
@@ -490,12 +502,12 @@ int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
- s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(TOD_ACC_PIN));
+ s = spx5_rd(sparx5, PTP_PTP_TOD_SEC_MSB(consts->tod_pin));
s <<= 32;
- s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(TOD_ACC_PIN));
- ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ s |= spx5_rd(sparx5, PTP_PTP_TOD_SEC_LSB(consts->tod_pin));
+ ns = spx5_rd(sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin));
ns &= PTP_PTP_TOD_NSEC_PTP_TOD_NSEC;
spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
@@ -515,6 +527,9 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
struct sparx5 *sparx5 = phc->sparx5;
+ const struct sparx5_consts *consts;
+
+ consts = sparx5->data->consts;
if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
unsigned long flags;
@@ -528,10 +543,10 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
spx5_wr(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(delta),
- sparx5, PTP_PTP_TOD_NSEC(TOD_ACC_PIN));
+ sparx5, PTP_PTP_TOD_NSEC(consts->tod_pin));
/* Adjust time with the value of PTP_TOD_NSEC */
spx5_rmw(PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(PTP_PIN_ACTION_DELTA) |
@@ -540,7 +555,7 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
PTP_PTP_PIN_CFG_PTP_PIN_ACTION |
PTP_PTP_PIN_CFG_PTP_PIN_DOM |
PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
- sparx5, PTP_PTP_PIN_CFG(TOD_ACC_PIN));
+ sparx5, PTP_PTP_PIN_CFG(consts->tod_pin));
spin_unlock_irqrestore(&sparx5->ptp_clock_lock, flags);
} else {
@@ -630,7 +645,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
/* Enable master counters */
spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
- for (i = 0; i < SPX5_PORTS; i++) {
+ for (i = 0; i < sparx5->data->consts->n_ports; i++) {
port = sparx5->ports[i];
if (!port)
continue;
@@ -646,7 +661,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
- for (i = 0; i < SPX5_PORTS; i++) {
+ for (i = 0; i < sparx5->data->consts->n_ports; i++) {
port = sparx5->ports[i];
if (!port)
continue;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
index 5f34febaee6b..e580670f3992 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -74,6 +74,11 @@ static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
26214200 /* 26.214 Gbps */
};
+u32 sparx5_get_hsch_max_group_rate(int grp)
+{
+ return spx5_hsch_max_group_rate[grp];
+}
+
static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
@@ -362,9 +367,10 @@ static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
static int sparx5_dwrr_conf_set(struct sparx5_port *port,
struct sparx5_dwrr *dwrr)
{
+ u32 layer = is_sparx5(port->sparx5) ? 2 : 1;
int i;
- spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer) |
HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
port->sparx5, HSCH_HSCH_CFG_CFG);
@@ -385,6 +391,7 @@ static int sparx5_dwrr_conf_set(struct sparx5_port *port,
static int sparx5_leak_groups_init(struct sparx5 *sparx5)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
struct sparx5_layer *layer;
u32 sys_clk_per_100ps;
struct sparx5_lg *lg;
@@ -397,7 +404,7 @@ static int sparx5_leak_groups_init(struct sparx5 *sparx5)
layer = &layers[i];
for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
lg = &layer->leak_groups[ii];
- lg->max_rate = spx5_hsch_max_group_rate[ii];
+ lg->max_rate = ops->get_hsch_max_group_rate(i);
/* Calculate the leak time in us, to serve a maximum
* rate of 'max_rate' for this group
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
index ced35033a6c5..1231a80335d7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
@@ -79,4 +79,6 @@ int sparx5_tc_ets_add(struct sparx5_port *port,
int sparx5_tc_ets_del(struct sparx5_port *port);
+u32 sparx5_get_hsch_max_group_rate(int grp);
+
#endif /* __SPARX5_QOS_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_regs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.c
new file mode 100644
index 000000000000..220e81b714d4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2024-09-30 11:48:29 +0200.
+ * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b
+ */
+
+#include "sparx5_regs.h"
+
+const unsigned int sparx5_tsize[TSIZE_LAST] = {
+ [TC_DEV10G] = 12,
+ [TC_DEV2G5] = 65,
+ [TC_DEV5G] = 13,
+ [TC_PCS10G_BR] = 12,
+ [TC_PCS5G_BR] = 13,
+};
+
+const unsigned int sparx5_raddr[RADDR_LAST] = {
+ [RA_CPU_PROC_CTRL] = 176,
+ [RA_GCB_SOFT_RST] = 8,
+ [RA_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 24,
+};
+
+const unsigned int sparx5_rcnt[RCNT_LAST] = {
+ [RC_ANA_AC_OWN_UPSID] = 3,
+ [RC_ANA_ACL_VCAP_S2_CFG] = 70,
+ [RC_ANA_ACL_OWN_UPSID] = 3,
+ [RC_ANA_CL_OWN_UPSID] = 3,
+ [RC_ANA_L2_OWN_UPSID] = 3,
+ [RC_ASM_PORT_CFG] = 67,
+ [RC_DSM_BUF_CFG] = 67,
+ [RC_DSM_DEV_TX_STOP_WM_CFG] = 67,
+ [RC_DSM_RX_PAUSE_CFG] = 67,
+ [RC_DSM_MAC_CFG] = 67,
+ [RC_DSM_MAC_ADDR_BASE_HIGH_CFG] = 65,
+ [RC_DSM_MAC_ADDR_BASE_LOW_CFG] = 65,
+ [RC_DSM_TAXI_CAL_CFG] = 9,
+ [RC_GCB_HW_SGPIO_TO_SD_MAP_CFG] = 65,
+ [RC_HSCH_PORT_MODE] = 70,
+ [RC_QFWD_SWITCH_PORT_MODE] = 70,
+ [RC_QSYS_PAUSE_CFG] = 70,
+ [RC_QSYS_ATOP] = 70,
+ [RC_QSYS_FWD_PRESSURE] = 70,
+ [RC_QSYS_CAL_AUTO] = 7,
+ [RC_REW_OWN_UPSID] = 3,
+ [RC_REW_RTAG_ETAG_CTRL] = 70,
+};
+
+const unsigned int sparx5_gaddr[GADDR_LAST] = {
+ [GA_ANA_AC_RAM_CTRL] = 839108,
+ [GA_ANA_AC_PS_COMMON] = 894472,
+ [GA_ANA_AC_MIRROR_PROBE] = 893696,
+ [GA_ANA_AC_SRC] = 849920,
+ [GA_ANA_AC_PGID] = 786432,
+ [GA_ANA_AC_TSN_SF] = 839136,
+ [GA_ANA_AC_TSN_SF_CFG] = 839680,
+ [GA_ANA_AC_TSN_SF_STATUS] = 839072,
+ [GA_ANA_AC_SG_ACCESS] = 839140,
+ [GA_ANA_AC_SG_CONFIG] = 851584,
+ [GA_ANA_AC_SG_STATUS] = 839088,
+ [GA_ANA_AC_SG_STATUS_STICKY] = 839152,
+ [GA_ANA_AC_STAT_GLOBAL_CFG_PORT] = 851552,
+ [GA_ANA_AC_STAT_CNT_CFG_PORT] = 843776,
+ [GA_ANA_AC_STAT_GLOBAL_CFG_ACL] = 893792,
+ [GA_ANA_ACL_COMMON] = 32768,
+ [GA_ANA_ACL_KEY_SEL] = 34200,
+ [GA_ANA_ACL_CNT_B] = 16384,
+ [GA_ANA_ACL_STICKY] = 36408,
+ [GA_ANA_AC_POL_POL_ALL_CFG] = 75968,
+ [GA_ANA_AC_POL_COMMON_BDLB] = 79048,
+ [GA_ANA_AC_POL_COMMON_BUM_SLB] = 79056,
+ [GA_ANA_AC_SDLB_LBGRP_TBL] = 295468,
+ [GA_ANA_CL_PORT] = 131072,
+ [GA_ANA_CL_COMMON] = 166912,
+ [GA_ANA_L2_COMMON] = 566024,
+ [GA_ANA_L3_COMMON] = 493632,
+ [GA_ANA_L3_VLAN_ARP_L3MC_STICKY] = 491460,
+ [GA_ASM_CFG] = 33280,
+ [GA_ASM_PFC_TIMER_CFG] = 34716,
+ [GA_ASM_LBK_WM_CFG] = 34744,
+ [GA_ASM_LBK_MISC_CFG] = 34756,
+ [GA_ASM_RAM_CTRL] = 34832,
+ [GA_EACL_ES2_KEY_SELECT_PROFILE] = 149504,
+ [GA_EACL_CNT_TBL] = 122880,
+ [GA_EACL_POL_CFG] = 150608,
+ [GA_EACL_ES2_STICKY] = 118696,
+ [GA_EACL_RAM_CTRL] = 118736,
+ [GA_GCB_SIO_CTRL] = 876,
+ [GA_HSCH_HSCH_DWRR] = 162816,
+ [GA_HSCH_HSCH_MISC] = 163104,
+ [GA_HSCH_HSCH_LEAK_LISTS] = 161664,
+ [GA_HSCH_SYSTEM] = 184000,
+ [GA_HSCH_MMGT] = 162368,
+ [GA_HSCH_TAS_CONFIG] = 162384,
+ [GA_PTP_PTP_CFG] = 320,
+ [GA_PTP_PTP_TOD_DOMAINS] = 336,
+ [GA_PTP_PHASE_DETECTOR_CTRL] = 420,
+ [GA_QSYS_CALCFG] = 2304,
+ [GA_QSYS_RAM_CTRL] = 2344,
+ [GA_REW_COMMON] = 387264,
+ [GA_REW_PORT] = 360448,
+ [GA_REW_VOE_PORT_LM_CNT] = 393216,
+ [GA_REW_RAM_CTRL] = 378696,
+ [GA_VOP_RAM_CTRL] = 279176,
+ [GA_XQS_SYSTEM] = 6768,
+ [GA_XQS_QLIMIT_SHR] = 7936,
+};
+
+const unsigned int sparx5_gcnt[GCNT_LAST] = {
+ [GC_ANA_AC_SRC] = 102,
+ [GC_ANA_AC_PGID] = 3290,
+ [GC_ANA_AC_TSN_SF_CFG] = 1024,
+ [GC_ANA_AC_STAT_CNT_CFG_PORT] = 70,
+ [GC_ANA_ACL_KEY_SEL] = 134,
+ [GC_ANA_ACL_CNT_A] = 4096,
+ [GC_ANA_ACL_CNT_B] = 4096,
+ [GC_ANA_AC_SDLB_LBGRP_TBL] = 10,
+ [GC_ANA_AC_SDLB_LBSET_TBL] = 4616,
+ [GC_ANA_CL_PORT] = 70,
+ [GC_ANA_L2_ISDX_LIMIT] = 1536,
+ [GC_ANA_L2_ISDX] = 4096,
+ [GC_ANA_L3_VLAN] = 5120,
+ [GC_ASM_DEV_STATISTICS] = 65,
+ [GC_EACL_ES2_KEY_SELECT_PROFILE] = 138,
+ [GC_EACL_CNT_TBL] = 2048,
+ [GC_GCB_SIO_CTRL] = 3,
+ [GC_HSCH_HSCH_CFG] = 5040,
+ [GC_HSCH_HSCH_DWRR] = 72,
+ [GC_PTP_PTP_PINS] = 5,
+ [GC_PTP_PHASE_DETECTOR_CTRL] = 5,
+ [GC_REW_PORT] = 70,
+ [GC_REW_VOE_PORT_LM_CNT] = 520,
+};
+
+const unsigned int sparx5_gsize[GSIZE_LAST] = {
+ [GW_ANA_AC_SRC] = 16,
+ [GW_ANA_L2_COMMON] = 700,
+ [GW_ASM_CFG] = 1088,
+ [GW_CPU_CPU_REGS] = 204,
+ [GW_DEV2G5_PHASE_DETECTOR_CTRL] = 8,
+ [GW_FDMA_FDMA] = 428,
+ [GW_GCB_CHIP_REGS] = 424,
+ [GW_HSCH_TAS_CONFIG] = 12,
+ [GW_PTP_PHASE_DETECTOR_CTRL] = 8,
+ [GW_QSYS_PAUSE_CFG] = 1128,
+};
+
+const unsigned int sparx5_fpos[FPOS_LAST] = {
+ [FP_CPU_PROC_CTRL_AARCH64_MODE_ENA] = 12,
+ [FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS] = 11,
+ [FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS] = 10,
+ [FP_CPU_PROC_CTRL_BE_EXCEP_MODE] = 9,
+ [FP_CPU_PROC_CTRL_VINITHI] = 8,
+ [FP_CPU_PROC_CTRL_CFGTE] = 7,
+ [FP_CPU_PROC_CTRL_CP15S_DISABLE] = 6,
+ [FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE] = 5,
+ [FP_CPU_PROC_CTRL_L2_FLUSH_REQ] = 1,
+ [FP_DEV2G5_PHAD_CTRL_PHAD_ENA] = 7,
+ [FP_DEV2G5_PHAD_CTRL_PHAD_FAILED] = 6,
+ [FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE] = 7,
+ [FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY] = 6,
+ [FP_FDMA_CH_CFG_CH_INJ_PORT] = 5,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION] = 26,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC] = 24,
+ [FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL] = 23,
+ [FP_PTP_PHAD_CTRL_PHAD_ENA] = 7,
+ [FP_PTP_PHAD_CTRL_PHAD_FAILED] = 6,
+};
+
+const unsigned int sparx5_fsize[FSIZE_LAST] = {
+ [FW_ANA_AC_PROBE_PORT_CFG_PROBE_PORT_MASK] = 32,
+ [FW_ANA_AC_SRC_CFG_PORT_MASK] = 32,
+ [FW_ANA_AC_PGID_CFG_PORT_MASK] = 32,
+ [FW_ANA_AC_TSN_SF_PORT_NUM] = 9,
+ [FW_ANA_AC_TSN_SF_CFG_TSN_SGID] = 10,
+ [FW_ANA_AC_TSN_SF_STATUS_TSN_SFID] = 10,
+ [FW_ANA_AC_SG_ACCESS_CTRL_SGID] = 10,
+ [FW_ANA_AC_PORT_SGE_CFG_MASK] = 16,
+ [FW_ANA_AC_SDLB_XLB_START_LBSET_START] = 13,
+ [FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT] = 5,
+ [FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT] = 13,
+ [FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT] = 13,
+ [FW_ANA_AC_SDLB_XLB_NEXT_LBGRP] = 4,
+ [FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR] = 13,
+ [FW_ANA_L2_AUTO_LRN_CFG_AUTO_LRN_ENA] = 32,
+ [FW_ANA_L2_DLB_CFG_DLB_IDX] = 13,
+ [FW_ANA_L2_TSN_CFG_TSN_SFID] = 10,
+ [FW_ANA_L3_VLAN_MASK_CFG_VLAN_PORT_MASK] = 32,
+ [FW_FDMA_CH_CFG_CH_DCB_DB_CNT] = 4,
+ [FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL] = 9,
+ [FW_HSCH_SE_CFG_SE_DWRR_CNT] = 7,
+ [FW_HSCH_SE_CONNECT_SE_LEAK_LINK] = 16,
+ [FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT] = 7,
+ [FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX] = 13,
+ [FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST] = 16,
+ [FW_HSCH_FLUSH_CTRL_FLUSH_PORT] = 7,
+ [FW_HSCH_FLUSH_CTRL_FLUSH_HIER] = 16,
+ [FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW] = 14,
+ [FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX] = 11,
+ [FW_LRN_AUTOAGE_CFG_2_NEXT_ROW] = 14,
+ [FW_PTP_PTP_PIN_INTR_INTR_PTP] = 5,
+ [FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA] = 5,
+ [FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT] = 5,
+ [FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT] = 2,
+ [FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL] = 7,
+ [FW_QRES_RES_CFG_WM_HIGH] = 12,
+ [FW_QRES_RES_STAT_MAXUSE] = 21,
+ [FW_QRES_RES_STAT_CUR_INUSE] = 21,
+ [FW_QSYS_PAUSE_CFG_PAUSE_START] = 12,
+ [FW_QSYS_PAUSE_CFG_PAUSE_STOP] = 12,
+ [FW_QSYS_ATOP_ATOP] = 12,
+ [FW_QSYS_ATOP_TOT_CFG_ATOP_TOT] = 12,
+ [FW_REW_RTAG_ETAG_CTRL_IPE_TBL] = 7,
+ [FW_XQS_STAT_CFG_STAT_VIEW] = 13,
+ [FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP] = 15,
+ [FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP] = 15,
+ [FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP] = 15,
+ [FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM] = 15,
+};
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.h
new file mode 100644
index 000000000000..ea28130c2341
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_regs.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc.
+ */
+
+/* This file is autogenerated by cml-utils 2024-09-30 11:48:29 +0200.
+ * Commit ID: 9d07b8d19363f3cd3590ddb3f7a2e2768e16524b
+ */
+
+#ifndef _SPARX5_REGS_H_
+#define _SPARX5_REGS_H_
+
+/* These enumerated values are used to index the platform specific structs
+ * containing the addresses, counts, size and positions, of register groups,
+ * registers and fields.
+ */
+
+enum sparx5_tsize_enum {
+ TC_DEV10G,
+ TC_DEV2G5,
+ TC_DEV5G,
+ TC_PCS10G_BR,
+ TC_PCS5G_BR,
+ TSIZE_LAST,
+};
+
+enum sparx5_raddr_enum {
+ RA_CPU_PROC_CTRL,
+ RA_GCB_SOFT_RST,
+ RA_GCB_HW_SGPIO_TO_SD_MAP_CFG,
+ RADDR_LAST,
+};
+
+enum sparx5_rcnt_enum {
+ RC_ANA_AC_OWN_UPSID,
+ RC_ANA_ACL_VCAP_S2_CFG,
+ RC_ANA_ACL_OWN_UPSID,
+ RC_ANA_CL_OWN_UPSID,
+ RC_ANA_L2_OWN_UPSID,
+ RC_ASM_PORT_CFG,
+ RC_DSM_BUF_CFG,
+ RC_DSM_DEV_TX_STOP_WM_CFG,
+ RC_DSM_RX_PAUSE_CFG,
+ RC_DSM_MAC_CFG,
+ RC_DSM_MAC_ADDR_BASE_HIGH_CFG,
+ RC_DSM_MAC_ADDR_BASE_LOW_CFG,
+ RC_DSM_TAXI_CAL_CFG,
+ RC_GCB_HW_SGPIO_TO_SD_MAP_CFG,
+ RC_HSCH_PORT_MODE,
+ RC_QFWD_SWITCH_PORT_MODE,
+ RC_QSYS_PAUSE_CFG,
+ RC_QSYS_ATOP,
+ RC_QSYS_FWD_PRESSURE,
+ RC_QSYS_CAL_AUTO,
+ RC_REW_OWN_UPSID,
+ RC_REW_RTAG_ETAG_CTRL,
+ RCNT_LAST,
+};
+
+enum sparx5_gaddr_enum {
+ GA_ANA_AC_RAM_CTRL,
+ GA_ANA_AC_PS_COMMON,
+ GA_ANA_AC_MIRROR_PROBE,
+ GA_ANA_AC_SRC,
+ GA_ANA_AC_PGID,
+ GA_ANA_AC_TSN_SF,
+ GA_ANA_AC_TSN_SF_CFG,
+ GA_ANA_AC_TSN_SF_STATUS,
+ GA_ANA_AC_SG_ACCESS,
+ GA_ANA_AC_SG_CONFIG,
+ GA_ANA_AC_SG_STATUS,
+ GA_ANA_AC_SG_STATUS_STICKY,
+ GA_ANA_AC_STAT_GLOBAL_CFG_PORT,
+ GA_ANA_AC_STAT_CNT_CFG_PORT,
+ GA_ANA_AC_STAT_GLOBAL_CFG_ACL,
+ GA_ANA_ACL_COMMON,
+ GA_ANA_ACL_KEY_SEL,
+ GA_ANA_ACL_CNT_B,
+ GA_ANA_ACL_STICKY,
+ GA_ANA_AC_POL_POL_ALL_CFG,
+ GA_ANA_AC_POL_COMMON_BDLB,
+ GA_ANA_AC_POL_COMMON_BUM_SLB,
+ GA_ANA_AC_SDLB_LBGRP_TBL,
+ GA_ANA_CL_PORT,
+ GA_ANA_CL_COMMON,
+ GA_ANA_L2_COMMON,
+ GA_ANA_L3_COMMON,
+ GA_ANA_L3_VLAN_ARP_L3MC_STICKY,
+ GA_ASM_CFG,
+ GA_ASM_PFC_TIMER_CFG,
+ GA_ASM_LBK_WM_CFG,
+ GA_ASM_LBK_MISC_CFG,
+ GA_ASM_RAM_CTRL,
+ GA_EACL_ES2_KEY_SELECT_PROFILE,
+ GA_EACL_CNT_TBL,
+ GA_EACL_POL_CFG,
+ GA_EACL_ES2_STICKY,
+ GA_EACL_RAM_CTRL,
+ GA_GCB_SIO_CTRL,
+ GA_HSCH_HSCH_DWRR,
+ GA_HSCH_HSCH_MISC,
+ GA_HSCH_HSCH_LEAK_LISTS,
+ GA_HSCH_SYSTEM,
+ GA_HSCH_MMGT,
+ GA_HSCH_TAS_CONFIG,
+ GA_PTP_PTP_CFG,
+ GA_PTP_PTP_TOD_DOMAINS,
+ GA_PTP_PHASE_DETECTOR_CTRL,
+ GA_QSYS_CALCFG,
+ GA_QSYS_RAM_CTRL,
+ GA_REW_COMMON,
+ GA_REW_PORT,
+ GA_REW_VOE_PORT_LM_CNT,
+ GA_REW_RAM_CTRL,
+ GA_VOP_RAM_CTRL,
+ GA_XQS_SYSTEM,
+ GA_XQS_QLIMIT_SHR,
+ GADDR_LAST,
+};
+
+enum sparx5_gcnt_enum {
+ GC_ANA_AC_SRC,
+ GC_ANA_AC_PGID,
+ GC_ANA_AC_TSN_SF_CFG,
+ GC_ANA_AC_STAT_CNT_CFG_PORT,
+ GC_ANA_ACL_KEY_SEL,
+ GC_ANA_ACL_CNT_A,
+ GC_ANA_ACL_CNT_B,
+ GC_ANA_AC_SDLB_LBGRP_TBL,
+ GC_ANA_AC_SDLB_LBSET_TBL,
+ GC_ANA_CL_PORT,
+ GC_ANA_L2_ISDX_LIMIT,
+ GC_ANA_L2_ISDX,
+ GC_ANA_L3_VLAN,
+ GC_ASM_DEV_STATISTICS,
+ GC_EACL_ES2_KEY_SELECT_PROFILE,
+ GC_EACL_CNT_TBL,
+ GC_GCB_SIO_CTRL,
+ GC_HSCH_HSCH_CFG,
+ GC_HSCH_HSCH_DWRR,
+ GC_PTP_PTP_PINS,
+ GC_PTP_PHASE_DETECTOR_CTRL,
+ GC_REW_PORT,
+ GC_REW_VOE_PORT_LM_CNT,
+ GCNT_LAST,
+};
+
+enum sparx5_gsize_enum {
+ GW_ANA_AC_SRC,
+ GW_ANA_L2_COMMON,
+ GW_ASM_CFG,
+ GW_CPU_CPU_REGS,
+ GW_DEV2G5_PHASE_DETECTOR_CTRL,
+ GW_FDMA_FDMA,
+ GW_GCB_CHIP_REGS,
+ GW_HSCH_TAS_CONFIG,
+ GW_PTP_PHASE_DETECTOR_CTRL,
+ GW_QSYS_PAUSE_CFG,
+ GSIZE_LAST,
+};
+
+enum sparx5_fpos_enum {
+ FP_CPU_PROC_CTRL_AARCH64_MODE_ENA,
+ FP_CPU_PROC_CTRL_L2_RST_INVALIDATE_DIS,
+ FP_CPU_PROC_CTRL_L1_RST_INVALIDATE_DIS,
+ FP_CPU_PROC_CTRL_BE_EXCEP_MODE,
+ FP_CPU_PROC_CTRL_VINITHI,
+ FP_CPU_PROC_CTRL_CFGTE,
+ FP_CPU_PROC_CTRL_CP15S_DISABLE,
+ FP_CPU_PROC_CTRL_PROC_CRYPTO_DISABLE,
+ FP_CPU_PROC_CTRL_L2_FLUSH_REQ,
+ FP_DEV2G5_PHAD_CTRL_PHAD_ENA,
+ FP_DEV2G5_PHAD_CTRL_PHAD_FAILED,
+ FP_FDMA_CH_CFG_CH_XTR_STATUS_MODE,
+ FP_FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY,
+ FP_FDMA_CH_CFG_CH_INJ_PORT,
+ FP_PTP_PTP_PIN_CFG_PTP_PIN_ACTION,
+ FP_PTP_PTP_PIN_CFG_PTP_PIN_SYNC,
+ FP_PTP_PTP_PIN_CFG_PTP_PIN_INV_POL,
+ FP_PTP_PHAD_CTRL_PHAD_ENA,
+ FP_PTP_PHAD_CTRL_PHAD_FAILED,
+ FPOS_LAST,
+};
+
+enum sparx5_fsize_enum {
+ FW_ANA_AC_PROBE_PORT_CFG_PROBE_PORT_MASK,
+ FW_ANA_AC_SRC_CFG_PORT_MASK,
+ FW_ANA_AC_PGID_CFG_PORT_MASK,
+ FW_ANA_AC_TSN_SF_PORT_NUM,
+ FW_ANA_AC_TSN_SF_CFG_TSN_SGID,
+ FW_ANA_AC_TSN_SF_STATUS_TSN_SFID,
+ FW_ANA_AC_SG_ACCESS_CTRL_SGID,
+ FW_ANA_AC_PORT_SGE_CFG_MASK,
+ FW_ANA_AC_SDLB_XLB_START_LBSET_START,
+ FW_ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT,
+ FW_ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT,
+ FW_ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT,
+ FW_ANA_AC_SDLB_XLB_NEXT_LBGRP,
+ FW_ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR,
+ FW_ANA_L2_AUTO_LRN_CFG_AUTO_LRN_ENA,
+ FW_ANA_L2_DLB_CFG_DLB_IDX,
+ FW_ANA_L2_TSN_CFG_TSN_SFID,
+ FW_ANA_L3_VLAN_MASK_CFG_VLAN_PORT_MASK,
+ FW_FDMA_CH_CFG_CH_DCB_DB_CNT,
+ FW_GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL,
+ FW_HSCH_SE_CFG_SE_DWRR_CNT,
+ FW_HSCH_SE_CONNECT_SE_LEAK_LINK,
+ FW_HSCH_SE_DLB_SENSE_SE_DLB_DPORT,
+ FW_HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
+ FW_HSCH_HSCH_LEAK_CFG_LEAK_FIRST,
+ FW_HSCH_FLUSH_CTRL_FLUSH_PORT,
+ FW_HSCH_FLUSH_CTRL_FLUSH_HIER,
+ FW_LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_ROW,
+ FW_LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX,
+ FW_LRN_AUTOAGE_CFG_2_NEXT_ROW,
+ FW_PTP_PTP_PIN_INTR_INTR_PTP,
+ FW_PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA,
+ FW_PTP_PTP_INTR_IDENT_INTR_PTP_IDENT,
+ FW_PTP_PTP_PIN_CFG_PTP_PIN_SELECT,
+ FW_QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL,
+ FW_QRES_RES_CFG_WM_HIGH,
+ FW_QRES_RES_STAT_MAXUSE,
+ FW_QRES_RES_STAT_CUR_INUSE,
+ FW_QSYS_PAUSE_CFG_PAUSE_START,
+ FW_QSYS_PAUSE_CFG_PAUSE_STOP,
+ FW_QSYS_ATOP_ATOP,
+ FW_QSYS_ATOP_TOT_CFG_ATOP_TOT,
+ FW_REW_RTAG_ETAG_CTRL_IPE_TBL,
+ FW_XQS_STAT_CFG_STAT_VIEW,
+ FW_XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP,
+ FW_XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP,
+ FW_XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP,
+ FW_XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM,
+ FSIZE_LAST,
+};
+
+extern const unsigned int sparx5_tsize[TSIZE_LAST];
+extern const unsigned int sparx5_raddr[RADDR_LAST];
+extern const unsigned int sparx5_rcnt[RCNT_LAST];
+extern const unsigned int sparx5_gaddr[GADDR_LAST];
+extern const unsigned int sparx5_gcnt[GCNT_LAST];
+extern const unsigned int sparx5_gsize[GSIZE_LAST];
+extern const unsigned int sparx5_fpos[FPOS_LAST];
+extern const unsigned int sparx5_fsize[FSIZE_LAST];
+
+#endif /* _SPARX5_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
index f5267218caeb..98a3f44c569c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
@@ -20,17 +20,18 @@ struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT] = {
{ 5000000ULL, 8192 / 8, 64 } /* 5 M */
};
-int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5)
+struct sparx5_sdlb_group *sparx5_get_sdlb_group(int idx)
+{
+ return &sdlb_groups[idx];
+}
+
+u64 sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5)
{
- u32 clk_per_100ps;
u64 clk_hz;
- clk_per_100ps = HSCH_SYS_CLK_PER_100PS_GET(spx5_rd(sparx5,
- HSCH_SYS_CLK_PER));
- if (!clk_per_100ps)
- clk_per_100ps = SPX5_CLK_PER_100PS_DEFAULT;
+ clk_hz = (10 * 1000 * 1000) /
+ (sparx5_clk_period(sparx5->coreclock) / 100);
- clk_hz = (10 * 1000 * 1000) / clk_per_100ps;
return clk_hz *= 1000;
}
@@ -178,14 +179,15 @@ static int sparx5_sdlb_group_get_count(struct sparx5 *sparx5, u32 group)
int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
const struct sparx5_sdlb_group *group;
u64 rate_bps;
int i, count;
rate_bps = rate * 1000;
- for (i = SPX5_SDLB_GROUP_CNT - 1; i >= 0; i--) {
- group = &sdlb_groups[i];
+ for (i = sparx5->data->consts->n_lb_groups - 1; i >= 0; i--) {
+ group = ops->get_sdlb_group(i);
count = sparx5_sdlb_group_get_count(sparx5, i);
@@ -208,7 +210,7 @@ int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group)
u32 itr, next;
int i;
- for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ for (i = 0; i < sparx5->data->consts->n_lb_groups; i++) {
if (sparx5_sdlb_group_is_empty(sparx5, i))
continue;
@@ -303,11 +305,12 @@ int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx)
void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
u32 frame_size, u32 idx)
{
+ const struct sparx5_ops *ops = sparx5->data->ops;
u32 thres_shift, mask = 0x01, power = 0;
struct sparx5_sdlb_group *group;
u64 max_token;
- group = &sdlb_groups[idx];
+ group = ops->get_sdlb_group(idx);
/* Number of positions to right-shift LB's threshold value. */
while ((min_burst & mask) == 0) {
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 0b4abc3eb53d..bc9ecb9392cd 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -32,24 +32,34 @@ static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
{
bool should_flood = flood_flag || port->is_mrouter;
+ struct sparx5 *sparx5 = port->sparx5;
int pgid;
- for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++)
+ for (pgid = sparx5_get_pgid(sparx5, PGID_IPV4_MC_DATA);
+ pgid <= sparx5_get_pgid(sparx5, PGID_IPV6_MC_CTRL); pgid++)
sparx5_pgid_update_mask(port, pgid, should_flood);
}
static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
struct switchdev_brport_flags flags)
{
+ struct sparx5 *sparx5 = port->sparx5;
+
if (flags.mask & BR_MCAST_FLOOD) {
- sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD));
+ sparx5_pgid_update_mask(port,
+ sparx5_get_pgid(sparx5, PGID_MC_FLOOD),
+ !!(flags.val & BR_MCAST_FLOOD));
sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
}
if (flags.mask & BR_FLOOD)
- sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
+ sparx5_pgid_update_mask(port,
+ sparx5_get_pgid(sparx5, PGID_UC_FLOOD),
+ !!(flags.val & BR_FLOOD));
if (flags.mask & BR_BCAST_FLOOD)
- sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
+ sparx5_pgid_update_mask(port,
+ sparx5_get_pgid(sparx5, PGID_BCAST),
+ !!(flags.val & BR_BCAST_FLOOD));
}
static void sparx5_attr_stp_state_set(struct sparx5_port *port,
@@ -219,7 +229,8 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
port->vid = NULL_VID;
/* Forward frames to CPU */
- sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
+ port->ndev->dev_addr, 0);
/* Port enters in host more therefore restore mc list */
__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
@@ -254,7 +265,8 @@ static int sparx5_port_add_addr(struct net_device *dev, bool up)
u16 vid = port->pvid;
if (up)
- sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
+ port->ndev->dev_addr, vid);
else
sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
@@ -330,7 +342,8 @@ static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
if (host_addr)
- sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
+ sparx5_add_mact_entry(sparx5, dev,
+ sparx5_get_pgid(sparx5, PGID_CPU),
fdb_info->addr, vid);
else
sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
@@ -418,8 +431,8 @@ static int sparx5_handle_port_vlan_add(struct net_device *dev,
switchdev_blocking_nb);
/* Flood broadcast to CPU */
- sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
- v->vid);
+ sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_BCAST),
+ dev->broadcast, v->vid);
return 0;
}
@@ -547,7 +560,7 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
/* Add any mrouter ports to the new entry */
if (is_new && ether_addr_is_ip_mcast(v->addr))
- for (i = 0; i < SPX5_PORTS; i++)
+ for (i = 0; i < spx5->data->consts->n_ports; i++)
if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
sparx5_pgid_update_mask(spx5->ports[i],
entry->pgid_idx,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
index e80f3166db7d..28b2514c8330 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -60,8 +60,8 @@ static int sparx5_tc_setup_block(struct net_device *ndev,
cb, ndev, ndev, false);
}
-static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
- u32 *idx)
+static void sparx5_tc_get_layer_and_idx(struct sparx5 *sparx5, u32 parent,
+ u32 portno, u32 *layer, u32 *idx)
{
if (parent == TC_H_ROOT) {
*layer = 2;
@@ -90,8 +90,8 @@ static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev,
struct sparx5_port *port = netdev_priv(ndev);
u32 layer, se_idx;
- sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer,
- &se_idx);
+ sparx5_tc_get_layer_and_idx(port->sparx5, qopt->parent, port->portno,
+ &layer, &se_idx);
switch (qopt->command) {
case TC_TBF_REPLACE:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 8d67d9f24c76..4dc1ebd5d510 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -785,7 +785,9 @@ static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
* allocate a stream gate that is always open.
*/
if (sg_idx < 0) {
- sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
+ /* Always-open stream gate is always the last */
+ sg_idx = sparx5_pool_idx_to_id(sparx5->data->consts->n_gates -
+ 1);
sg->ipv = 0; /* Disabled */
sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
sg->num_entries = 1;
@@ -1282,6 +1284,11 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
/* Setup PSFP */
if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
+ if (!sparx5_has_feature(sparx5, SPX5_FEATURE_PSFP)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
tc_pol_idx, &sg, &fm, &sf);
if (err)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
index 7d106f1276fe..e68f5639a40a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h
@@ -10,6 +10,8 @@
#ifndef __SPARX5_VCAP_AG_API_H__
#define __SPARX5_VCAP_AG_API_H__
+#include "vcap_api.h"
+
/* VCAPs */
extern const struct vcap_info sparx5_vcaps[];
extern const struct vcap_statistics sparx5_vcap_stats;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index 967c8621c250..25066ddb8d4d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -17,7 +17,6 @@
#define SUPER_VCAP_BLK_SIZE 3072 /* addresses per Super VCAP block */
#define STREAMSIZE (64 * 4) /* bytes in the VCAP cache area */
-#define SPARX5_IS2_LOOKUPS 4
#define VCAP_IS2_KEYSEL(_ena, _noneth, _v4_mc, _v4_uc, _v6_mc, _v6_uc, _arp) \
(ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(_ena) | \
ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(_noneth) | \
@@ -27,7 +26,6 @@
ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(_v6_uc) | \
ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(_arp))
-#define SPARX5_IS0_LOOKUPS 6
#define VCAP_IS0_KEYSEL(_ena, _etype, _ipv4, _ipv6, _mpls_uc, _mpls_mc, _mlbs) \
(ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(_ena) | \
ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(_etype) | \
@@ -37,31 +35,17 @@
ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(_mpls_mc) | \
ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(_mlbs))
-#define SPARX5_ES0_LOOKUPS 1
#define VCAP_ES0_KEYSEL(_key) (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(_key))
#define SPARX5_STAT_ESDX_GRN_PKTS 0x300
#define SPARX5_STAT_ESDX_YEL_PKTS 0x301
-#define SPARX5_ES2_LOOKUPS 2
#define VCAP_ES2_KEYSEL(_ena, _arp, _ipv4, _ipv6) \
(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(_ena) | \
EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(_arp) | \
EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(_ipv4) | \
EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(_ipv6))
-static struct sparx5_vcap_inst {
- enum vcap_type vtype; /* type of vcap */
- int vinst; /* instance number within the same type */
- int lookups; /* number of lookups in this vcap type */
- int lookups_per_instance; /* number of lookups in this instance */
- int first_cid; /* first chain id in this vcap */
- int last_cid; /* last chain id in this vcap */
- int count; /* number of available addresses, not in super vcap */
- int map_id; /* id in the super vcap block mapping (if applicable) */
- int blockno; /* starting block in super vcap (if applicable) */
- int blocks; /* number of blocks in super vcap (if applicable) */
- bool ingress; /* is vcap in the ingress path */
-} sparx5_vcap_inst_cfg[] = {
+const struct sparx5_vcap_inst sparx5_vcap_inst_cfg[] = {
{
.vtype = VCAP_TYPE_IS0, /* CLM-0 */
.vinst = 0,
@@ -1793,6 +1777,7 @@ void sparx5_vcap_set_port_keyset(struct net_device *ndev,
static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
u32 keysel;
@@ -1804,7 +1789,7 @@ static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
VCAP_IS0_PS_MLBS_FOLLOW_ETYPE);
for (lookup = 0; lookup < admin->lookups; ++lookup) {
- for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ for (portno = 0; portno < consts->n_ports; ++portno) {
spx5_wr(keysel, sparx5,
ANA_CL_ADV_CL_CFG(portno, lookup));
spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
@@ -1819,6 +1804,7 @@ static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
u32 keysel;
@@ -1829,13 +1815,13 @@ static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
VCAP_IS2_PS_IPV6_UC_IP_7TUPLE,
VCAP_IS2_PS_ARP_ARP);
for (lookup = 0; lookup < admin->lookups; ++lookup) {
- for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ for (portno = 0; portno < consts->n_ports; ++portno) {
spx5_wr(keysel, sparx5,
ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
}
}
/* IS2 lookups are in bit 0:3 */
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf),
ANA_ACL_VCAP_S2_CFG_SEC_ENA,
sparx5,
@@ -1846,11 +1832,12 @@ static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_es0_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno;
u32 keysel;
keysel = VCAP_ES0_KEYSEL(VCAP_ES0_PS_FORCE_ISDX_LOOKUPS);
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(keysel, REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA,
sparx5, REW_RTAG_ETAG_CTRL(portno));
@@ -1862,6 +1849,7 @@ static void sparx5_vcap_es0_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
u32 keysel;
@@ -1869,7 +1857,7 @@ static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
VCAP_ES2_PS_IPV6_IP_7TUPLE);
for (lookup = 0; lookup < admin->lookups; ++lookup)
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_wr(keysel, sparx5,
EACL_VCAP_ES2_KEY_SEL(portno, lookup));
}
@@ -1901,19 +1889,20 @@ static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
struct vcap_admin *admin)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
int portno, lookup;
switch (admin->vtype) {
case VCAP_TYPE_IS0:
for (lookup = 0; lookup < admin->lookups; ++lookup)
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(0),
ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
sparx5,
ANA_CL_ADV_CL_CFG(portno, lookup));
break;
case VCAP_TYPE_IS2:
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0),
ANA_ACL_VCAP_S2_CFG_SEC_ENA,
sparx5,
@@ -1925,7 +1914,7 @@ static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
break;
case VCAP_TYPE_ES2:
for (lookup = 0; lookup < admin->lookups; ++lookup)
- for (portno = 0; portno < SPX5_PORTS; ++portno)
+ for (portno = 0; portno < consts->n_ports; ++portno)
spx5_rmw(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(0),
EACL_VCAP_ES2_KEY_SEL_KEY_ENA,
sparx5,
@@ -2042,6 +2031,7 @@ static void sparx5_vcap_block_alloc(struct sparx5 *sparx5,
/* Allocate a vcap control and vcap instances and configure the system */
int sparx5_vcap_init(struct sparx5 *sparx5)
{
+ const struct sparx5_consts *consts = sparx5->data->consts;
const struct sparx5_vcap_inst *cfg;
struct vcap_control *ctrl;
struct vcap_admin *admin;
@@ -2063,14 +2053,14 @@ int sparx5_vcap_init(struct sparx5 *sparx5)
sparx5->vcap_ctrl = ctrl;
/* select the sparx5 VCAP model */
- ctrl->vcaps = sparx5_vcaps;
- ctrl->stats = &sparx5_vcap_stats;
+ ctrl->vcaps = consts->vcaps;
+ ctrl->stats = consts->vcap_stats;
/* Setup callbacks to allow the API to use the VCAP HW */
ctrl->ops = &sparx5_vcap_ops;
INIT_LIST_HEAD(&ctrl->list);
for (idx = 0; idx < ARRAY_SIZE(sparx5_vcap_inst_cfg); ++idx) {
- cfg = &sparx5_vcap_inst_cfg[idx];
+ cfg = &consts->vcaps_cfg[idx];
admin = sparx5_vcap_admin_alloc(sparx5, ctrl, cfg);
if (IS_ERR(admin)) {
err = PTR_ERR(admin);
@@ -2085,7 +2075,7 @@ int sparx5_vcap_init(struct sparx5 *sparx5)
list_add_tail(&admin->list, &ctrl->list);
}
dir = vcap_debugfs(sparx5->dev, sparx5->debugfs_root, ctrl);
- for (idx = 0; idx < SPX5_PORTS; ++idx)
+ for (idx = 0; idx < consts->n_ports; ++idx)
if (sparx5->ports[idx])
vcap_port_debugfs(sparx5->dev, dir, ctrl,
sparx5->ports[idx]->ndev);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
index 2684d9199b05..d0a42406bf26 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -16,6 +16,11 @@
#include "vcap_api.h"
#include "vcap_api_client.h"
+#define SPARX5_IS2_LOOKUPS 4
+#define SPARX5_IS0_LOOKUPS 6
+#define SPARX5_ES0_LOOKUPS 1
+#define SPARX5_ES2_LOOKUPS 2
+
#define SPARX5_VCAP_CID_IS0_L0 VCAP_CID_INGRESS_L0 /* IS0/CLM lookup 0 */
#define SPARX5_VCAP_CID_IS0_L1 VCAP_CID_INGRESS_L1 /* IS0/CLM lookup 1 */
#define SPARX5_VCAP_CID_IS0_L2 VCAP_CID_INGRESS_L2 /* IS0/CLM lookup 2 */
@@ -40,6 +45,22 @@
#define SPARX5_VCAP_CID_ES2_MAX \
(VCAP_CID_EGRESS_STAGE2_L1 + VCAP_CID_LOOKUP_SIZE - 1) /* ES2 Max */
+struct sparx5_vcap_inst {
+ enum vcap_type vtype; /* type of vcap */
+ int vinst; /* instance number within the same type */
+ int lookups; /* number of lookups in this vcap type */
+ int lookups_per_instance; /* number of lookups in this instance */
+ int first_cid; /* first chain id in this vcap */
+ int last_cid; /* last chain id in this vcap */
+ int count; /* number of available addresses, not in super vcap */
+ int map_id; /* id in the super vcap block mapping (if applicable) */
+ int blockno; /* starting block in super vcap (if applicable) */
+ int blocks; /* number of blocks in super vcap (if applicable) */
+ bool ingress; /* is vcap in the ingress path */
+};
+
+extern const struct sparx5_vcap_inst sparx5_vcap_inst_cfg[];
+
/* IS0 port keyset selection control */
/* IS0 ethernet, IPv4, IPv6 traffic type keyset generation */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index ac001ae59a38..d42097aa60a0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -16,8 +16,10 @@ static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
/* Output mask to respective registers */
spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
- spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
- spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
+ if (is_sparx5(sparx5)) {
+ spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
+ spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
+ }
return 0;
}
@@ -141,15 +143,19 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
{
spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
- spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
- spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
+ if (is_sparx5(spx5)) {
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
+ }
}
void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
{
portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
- portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
- portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
+ if (is_sparx5(spx5)) {
+ portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
+ portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
+ }
}
void sparx5_update_fwd(struct sparx5 *sparx5)
@@ -162,26 +168,33 @@ void sparx5_update_fwd(struct sparx5 *sparx5)
bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
/* Update flood masks */
- for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
+ for (port = sparx5_get_pgid(sparx5, PGID_UC_FLOOD);
+ port <= sparx5_get_pgid(sparx5, PGID_BCAST); port++) {
spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
- spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
- spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
+ if (is_sparx5(sparx5)) {
+ spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
+ spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
+ }
}
/* Update SRC masks */
- for (port = 0; port < SPX5_PORTS; port++) {
+ for (port = 0; port < sparx5->data->consts->n_ports; port++) {
if (test_bit(port, sparx5->bridge_fwd_mask)) {
/* Allow to send to all bridged but self */
bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
clear_bit(port, workmask);
bitmap_to_arr32(mask, workmask, SPX5_PORTS);
spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
- spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
- spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
+ if (is_sparx5(sparx5)) {
+ spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
+ spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
+ }
} else {
spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
- spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
- spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
+ if (is_sparx5(sparx5)) {
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
+ spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
+ }
}
}
@@ -192,8 +205,10 @@ void sparx5_update_fwd(struct sparx5 *sparx5)
/* Apply learning mask */
spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
- spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
- spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
+ if (is_sparx5(sparx5)) {
+ spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
+ spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
+ }
}
void sparx5_vlan_port_apply(struct sparx5 *sparx5,
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index ca4ed58f1206..e97af7ac2bb2 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/utsname.h>
@@ -8,6 +9,8 @@
#include <net/mana/mana.h>
+struct dentry *mana_debugfs_root;
+
static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
{
return readl(g->bar0_va + offset);
@@ -1516,6 +1519,12 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
+ if (gc->is_pf)
+ gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
+ else
+ gc->mana_pci_debugfs = debugfs_create_dir(pci_slot_name(pdev->slot),
+ mana_debugfs_root);
+
err = mana_gd_setup(pdev);
if (err)
goto unmap_bar;
@@ -1529,6 +1538,13 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cleanup_gd:
mana_gd_cleanup(pdev);
unmap_bar:
+ /*
+ * at this point we know that the other debugfs child dir/files
+ * are either not yet created or are already cleaned up.
+ * The pci debugfs folder clean-up now, will only be cleaning up
+ * adapter-MTU file and apc->mana_pci_debugfs folder.
+ */
+ debugfs_remove_recursive(gc->mana_pci_debugfs);
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1549,6 +1565,8 @@ static void mana_gd_remove(struct pci_dev *pdev)
mana_gd_cleanup(pdev);
+ debugfs_remove_recursive(gc->mana_pci_debugfs);
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
@@ -1600,6 +1618,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
mana_gd_cleanup(pdev);
+ debugfs_remove_recursive(gc->mana_pci_debugfs);
+
pci_disable_device(pdev);
}
@@ -1619,7 +1639,28 @@ static struct pci_driver mana_driver = {
.shutdown = mana_gd_shutdown,
};
-module_pci_driver(mana_driver);
+static int __init mana_driver_init(void)
+{
+ int err;
+
+ mana_debugfs_root = debugfs_create_dir("mana", NULL);
+
+ err = pci_register_driver(&mana_driver);
+ if (err)
+ debugfs_remove(mana_debugfs_root);
+
+ return err;
+}
+
+static void __exit mana_driver_exit(void)
+{
+ debugfs_remove(mana_debugfs_root);
+
+ pci_unregister_driver(&mana_driver);
+}
+
+module_init(mana_driver_init);
+module_exit(mana_driver_exit);
MODULE_DEVICE_TABLE(pci, mana_id_table);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index c47266d1c7c2..57ac732e7707 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -3,6 +3,7 @@
#include <uapi/linux/bpf.h>
+#include <linux/debugfs.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -30,6 +31,21 @@ static void mana_adev_idx_free(int idx)
ida_free(&mana_adev_ida, idx);
}
+static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct gdma_queue *gdma_q = filp->private_data;
+
+ return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr,
+ gdma_q->queue_size);
+}
+
+static const struct file_operations mana_dbg_q_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mana_dbg_q_read,
+};
+
/* Microsoft Azure Network Adapter (MANA) functions */
static int mana_open(struct net_device *ndev)
@@ -721,6 +737,13 @@ static const struct net_device_ops mana_devops = {
static void mana_cleanup_port_context(struct mana_port_context *apc)
{
+ /*
+ * at this point all dir/files under the vport directory
+ * are already cleaned up.
+ * We are sure the apc->mana_port_debugfs remove will not
+ * cause any freed memory access issues
+ */
+ debugfs_remove(apc->mana_port_debugfs);
kfree(apc->rxqs);
apc->rxqs = NULL;
}
@@ -943,6 +966,8 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
else
gc->adapter_mtu = ETH_FRAME_LEN;
+ debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
+
return 0;
}
@@ -1228,6 +1253,8 @@ static void mana_destroy_eq(struct mana_context *ac)
if (!ac->eqs)
return;
+ debugfs_remove_recursive(ac->mana_eqs_debugfs);
+
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
if (!eq)
@@ -1240,6 +1267,18 @@ static void mana_destroy_eq(struct mana_context *ac)
ac->eqs = NULL;
}
+static void mana_create_eq_debugfs(struct mana_context *ac, int i)
+{
+ struct mana_eq eq = ac->eqs[i];
+ char eqnum[32];
+
+ sprintf(eqnum, "eq%d", i);
+ eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs);
+ debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head);
+ debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail);
+ debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops);
+}
+
static int mana_create_eq(struct mana_context *ac)
{
struct gdma_dev *gd = ac->gdma_dev;
@@ -1260,11 +1299,14 @@ static int mana_create_eq(struct mana_context *ac)
spec.eq.context = ac->eqs;
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
+ ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs);
+
for (i = 0; i < gc->max_num_queues; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
if (err)
goto out;
+ mana_create_eq_debugfs(ac, i);
}
return 0;
@@ -1871,6 +1913,8 @@ static void mana_destroy_txq(struct mana_port_context *apc)
return;
for (i = 0; i < apc->num_queues; i++) {
+ debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
+
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
napi_synchronize(napi);
@@ -1889,6 +1933,31 @@ static void mana_destroy_txq(struct mana_port_context *apc)
apc->tx_qp = NULL;
}
+static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx)
+{
+ struct mana_tx_qp *tx_qp = &apc->tx_qp[idx];
+ char qnum[32];
+
+ sprintf(qnum, "TX-%d", idx);
+ tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
+ debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->txq.gdma_sq->head);
+ debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->txq.gdma_sq->tail);
+ debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->txq.pending_skbs.qlen);
+ debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->tx_cq.gdma_cq->head);
+ debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->tx_cq.gdma_cq->tail);
+ debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs,
+ &tx_qp->tx_cq.budget);
+ debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs,
+ tx_qp->txq.gdma_sq, &mana_dbg_q_fops);
+ debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs,
+ tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops);
+}
+
static int mana_create_txq(struct mana_port_context *apc,
struct net_device *net)
{
@@ -2000,6 +2069,8 @@ static int mana_create_txq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
+ mana_create_txq_debugfs(apc, i);
+
netif_napi_add_tx(net, &cq->napi, mana_poll);
napi_enable(&cq->napi);
txq->napi_initialized = true;
@@ -2027,6 +2098,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (!rxq)
return;
+ debugfs_remove_recursive(rxq->mana_rx_debugfs);
+
napi = &rxq->rx_cq.napi;
if (napi_initialized) {
@@ -2308,6 +2381,28 @@ out:
return NULL;
}
+static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx)
+{
+ struct mana_rxq *rxq;
+ char qnum[32];
+
+ rxq = apc->rxqs[idx];
+
+ sprintf(qnum, "RX-%d", idx);
+ rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
+ debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head);
+ debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail);
+ debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf);
+ debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs,
+ &rxq->rx_cq.gdma_cq->head);
+ debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs,
+ &rxq->rx_cq.gdma_cq->tail);
+ debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget);
+ debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops);
+ debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq,
+ &mana_dbg_q_fops);
+}
+
static int mana_add_rx_queues(struct mana_port_context *apc,
struct net_device *ndev)
{
@@ -2326,6 +2421,8 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
u64_stats_init(&rxq->stats.syncp);
apc->rxqs[i] = rxq;
+
+ mana_create_rxq_debugfs(apc, i);
}
apc->default_rxobj = apc->rxqs[0]->rxobj;
@@ -2518,14 +2615,19 @@ void mana_query_gf_stats(struct mana_port_context *apc)
static int mana_init_port(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
+ struct gdma_dev *gd = apc->ac->gdma_dev;
u32 max_txq, max_rxq, max_queues;
int port_idx = apc->port_idx;
+ struct gdma_context *gc;
+ char vport[32];
int err;
err = mana_init_port_context(apc);
if (err)
return err;
+ gc = gd->gdma_context;
+
err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
&apc->indir_table_sz);
if (err) {
@@ -2542,7 +2644,8 @@ static int mana_init_port(struct net_device *ndev)
apc->num_queues = apc->max_queues;
eth_hw_addr_set(ndev, apc->mac_addr);
-
+ sprintf(vport, "vport%d", port_idx);
+ apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
return 0;
reset_apc:
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index dc3864377538..c419626073f5 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -91,53 +91,34 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
- u8 *p = data;
int i;
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
- memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
+ ethtool_puts(&data, mana_eth_stats[i].name);
for (i = 0; i < num_queues; i++) {
- sprintf(p, "rx_%d_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_%d_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_%d_xdp_drop", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_%d_xdp_tx", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_%d_xdp_redirect", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "rx_%d_packets", i);
+ ethtool_sprintf(&data, "rx_%d_bytes", i);
+ ethtool_sprintf(&data, "rx_%d_xdp_drop", i);
+ ethtool_sprintf(&data, "rx_%d_xdp_tx", i);
+ ethtool_sprintf(&data, "rx_%d_xdp_redirect", i);
}
for (i = 0; i < num_queues; i++) {
- sprintf(p, "tx_%d_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_xdp_xmit", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_tso_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_tso_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_tso_inner_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_tso_inner_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_long_pkt_fmt", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_short_pkt_fmt", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_csum_partial", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_%d_mana_map_err", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "tx_%d_packets", i);
+ ethtool_sprintf(&data, "tx_%d_bytes", i);
+ ethtool_sprintf(&data, "tx_%d_xdp_xmit", i);
+ ethtool_sprintf(&data, "tx_%d_tso_packets", i);
+ ethtool_sprintf(&data, "tx_%d_tso_bytes", i);
+ ethtool_sprintf(&data, "tx_%d_tso_inner_packets", i);
+ ethtool_sprintf(&data, "tx_%d_tso_inner_bytes", i);
+ ethtool_sprintf(&data, "tx_%d_long_pkt_fmt", i);
+ ethtool_sprintf(&data, "tx_%d_short_pkt_fmt", i);
+ ethtool_sprintf(&data, "tx_%d_csum_partial", i);
+ ethtool_sprintf(&data, "tx_%d_mana_map_err", i);
}
}
@@ -443,6 +424,15 @@ out:
return err;
}
+static int mana_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_OTHER;
+
+ return 0;
+}
+
const struct ethtool_ops mana_ethtool_ops = {
.get_ethtool_stats = mana_get_ethtool_stats,
.get_sset_count = mana_get_sset_count,
@@ -456,4 +446,6 @@ const struct ethtool_ops mana_ethtool_ops = {
.set_channels = mana_set_channels,
.get_ringparam = mana_get_ringparam,
.set_ringparam = mana_set_ringparam,
+ .get_link_ksettings = mana_get_link_ksettings,
+ .get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 96dc69e7141f..8bd60168624a 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -576,7 +576,7 @@ MODULE_DEVICE_TABLE(of, moxart_mac_match);
static struct platform_driver moxart_mac_driver = {
.probe = moxart_mac_probe,
- .remove_new = moxart_remove,
+ .remove = moxart_remove,
.driver = {
.name = "moxart-ethernet",
.of_match_table = moxart_mac_match,
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index a057ec3dab97..986b1f150e3b 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -228,6 +228,32 @@ ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter,
return 0;
}
+static int
+ocelot_flower_parse_egress_port(struct ocelot *ocelot, struct flow_cls_offload *f,
+ const struct flow_action_entry *a, bool mirror,
+ struct netlink_ext_ack *extack)
+{
+ const char *act_string = mirror ? "mirror" : "redirect";
+ int egress_port = ocelot->ops->netdev_to_port(a->dev);
+ enum flow_action_id offloadable_act_id;
+
+ offloadable_act_id = mirror ? FLOW_ACTION_MIRRED : FLOW_ACTION_REDIRECT;
+
+ /* Mirroring towards foreign interfaces is handled in software */
+ if (egress_port < 0 || a->id != offloadable_act_id) {
+ if (f->common.skip_sw) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can only %s to %s if filter also runs in software",
+ act_string, egress_port < 0 ?
+ "CPU" : "ingress of ocelot port");
+ return -EOPNOTSUPP;
+ }
+ egress_port = ocelot->num_phys_ports;
+ }
+
+ return egress_port;
+}
+
static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
@@ -356,6 +382,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_REDIRECT_INGRESS:
if (filter->block_id != VCAP_IS2) {
NL_SET_ERR_MSG_MOD(extack,
"Redirect action can only be offloaded to VCAP IS2");
@@ -366,17 +393,19 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
- egress_port = ocelot->ops->netdev_to_port(a->dev);
- if (egress_port < 0) {
- NL_SET_ERR_MSG_MOD(extack,
- "Destination not an ocelot port");
- return -EOPNOTSUPP;
- }
+
+ egress_port = ocelot_flower_parse_egress_port(ocelot, f,
+ a, false,
+ extack);
+ if (egress_port < 0)
+ return egress_port;
+
filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
filter->action.port_mask = BIT(egress_port);
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_MIRRED:
+ case FLOW_ACTION_MIRRED_INGRESS:
if (filter->block_id != VCAP_IS2) {
NL_SET_ERR_MSG_MOD(extack,
"Mirror action can only be offloaded to VCAP IS2");
@@ -387,12 +416,13 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
- egress_port = ocelot->ops->netdev_to_port(a->dev);
- if (egress_port < 0) {
- NL_SET_ERR_MSG_MOD(extack,
- "Destination not an ocelot port");
- return -EOPNOTSUPP;
- }
+
+ egress_port = ocelot_flower_parse_egress_port(ocelot, f,
+ a, true,
+ extack);
+ if (egress_port < 0)
+ return egress_port;
+
filter->egress_port.value = egress_port;
filter->action.mirror_ena = true;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 7c9540a71725..558e03301aa8 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -730,7 +730,7 @@ static void ocelot_get_stats64(struct net_device *dev,
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 vid, u16 flags,
+ u16 vid, u16 flags, bool *notified,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -744,7 +744,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
- struct netlink_ext_ack *extack)
+ bool *notified, struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index c09dd2e3343c..055b55651a49 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -416,7 +416,7 @@ static void mscc_ocelot_remove(struct platform_device *pdev)
static struct platform_driver mscc_ocelot_driver = {
.probe = mscc_ocelot_probe,
- .remove_new = mscc_ocelot_remove,
+ .remove = mscc_ocelot_remove,
.driver = {
.name = "ocelot-switch",
.of_match_table = mscc_ocelot_match,
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 2b6e097df28f..6d29d2e1fa7c 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -241,7 +241,7 @@ static void jazz_sonic_device_remove(struct platform_device *pdev)
static struct platform_driver jazz_sonic_driver = {
.probe = jazz_sonic_probe,
- .remove_new = jazz_sonic_device_remove,
+ .remove = jazz_sonic_device_remove,
.driver = {
.name = jazz_sonic_string,
},
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 2fc63860dbdb..a740e24a9759 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -545,7 +545,7 @@ static void mac_sonic_platform_remove(struct platform_device *pdev)
static struct platform_driver mac_sonic_platform_driver = {
.probe = mac_sonic_platform_probe,
- .remove_new = mac_sonic_platform_remove,
+ .remove = mac_sonic_platform_remove,
.driver = {
.name = "macsonic",
},
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 998586872599..bea969dfa536 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -2090,7 +2090,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
*/
/* Ramit : 1024 DMA is not a good idea, it ends up banging
* some DELL and COMPAQ SMP systems
- * Turn on ALP, only we are accpeting Jumbo Packets */
+ * Turn on ALP, only we are accepting Jumbo Packets */
writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD
| RXCFG_STRIPCRC
//| RXCFG_ALP
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 8943e7244310..c01a4cb5dc0f 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -264,7 +264,7 @@ static void xtsonic_device_remove(struct platform_device *pdev)
static struct platform_driver xtsonic_driver = {
.probe = xtsonic_probe,
- .remove_new = xtsonic_device_remove,
+ .remove = xtsonic_device_remove,
.driver = {
.name = xtsonic_string,
},
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index f235e76e4ce9..f8016dc25e0a 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8523,7 +8523,7 @@ static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index d215efc6cad0..f1c6c47564b1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -1179,7 +1179,7 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
} while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->rx_dim, dim_sample);
+ net_dim(&r_vec->rx_dim, &dim_sample);
}
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
@@ -1194,7 +1194,7 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
} while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->tx_dim, dim_sample);
+ net_dim(&r_vec->tx_dim, &dim_sample);
}
return pkts_polled;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index dae5af7d1845..ebeb6ab4465c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -1289,7 +1289,7 @@ int nfp_nfdk_poll(struct napi_struct *napi, int budget)
} while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->rx_dim, dim_sample);
+ net_dim(&r_vec->rx_dim, &dim_sample);
}
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
@@ -1304,7 +1304,7 @@ int nfp_nfdk_poll(struct napi_struct *napi, int budget)
} while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
- net_dim(&r_vec->tx_dim, dim_sample);
+ net_dim(&r_vec->tx_dim, &dim_sample);
}
return pkts_polled;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6e0929af0f72..98e098c09c03 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -829,7 +829,7 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
return err;
}
- irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
+ irq_update_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
r_vec->irq_entry);
@@ -840,7 +840,7 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
- irq_set_affinity_hint(r_vec->irq_vector, NULL);
+ irq_update_affinity_hint(r_vec->irq_vector, NULL);
nfp_net_napi_del(&nn->dp, r_vec);
free_irq(r_vec->irq_vector, r_vec);
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 2aa4ad9cf96e..230d5ff99dd7 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1415,7 +1415,7 @@ static void nixge_remove(struct platform_device *pdev)
static struct platform_driver nixge_driver = {
.probe = nixge_probe,
- .remove_new = nixge_remove,
+ .remove = nixge_remove,
.driver = {
.name = "nixge",
.of_match_table = nixge_dt_ids,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index dd3e58a1319c..8b9a3e3bba30 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1503,7 +1503,7 @@ MODULE_DEVICE_TABLE(of, lpc_eth_match);
static struct platform_driver lpc_eth_driver = {
.probe = lpc_eth_drv_probe,
- .remove_new = lpc_eth_drv_remove,
+ .remove = lpc_eth_drv_remove,
#ifdef CONFIG_PM
.suspend = lpc_eth_drv_suspend,
.resume = lpc_eth_drv_resume,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 0eeda7e502db..2ac59564ded1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -928,7 +928,7 @@ static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
dim_update_sample(qcq->cq.bound_intr->rearm_count,
pkts, bytes, &dim_sample);
- net_dim(&qcq->dim, dim_sample);
+ net_dim(&qcq->dim, &dim_sample);
}
int ionic_tx_napi(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 8c4cb910e09b..e7d8999049e1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -648,18 +648,18 @@ netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
static void
netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- int index;
+ const char *str;
+ int i;
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *netxen_nic_gstrings_test,
- NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < NETXEN_NIC_TEST_LEN; i++)
+ ethtool_puts(&data, netxen_nic_gstrings_test[i]);
break;
case ETH_SS_STATS:
- for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- netxen_nic_gstrings_stats[index].stat_string,
- ETH_GSTRING_LEN);
+ for (i = 0; i < NETXEN_NIC_STATS_LEN; i++) {
+ str = netxen_nic_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index f67be4b8ad43..464a72afb758 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -2873,6 +2873,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
false,
SPLIT_TYPE_NONE, 0);
}
+ cond_resched();
}
return offset;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 6263f847b6b9..9e5f0dbc8a07 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -596,6 +596,7 @@ static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
barrier();
while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
udelay(DMAE_MIN_WAIT_TIME);
+ cond_resched();
if (++wait_cnt > wait_cnt_limit) {
DP_NOTICE(p_hwfn->cdev,
"Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 16e6bd466143..26a714bfad4e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -459,12 +459,11 @@ static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
static int
_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_mcp_mb_params *p_mb_params,
- u32 max_retries, u32 usecs)
+ struct qed_mcp_mb_params *p_mb_params)
{
- u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
struct qed_mcp_cmd_elem *p_cmd_elem;
u16 seq_num;
+ u32 cnt = 0;
int rc = 0;
/* Wait until the mailbox is non-occupied */
@@ -488,12 +487,13 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
- msleep(msecs);
+ usleep_range(QED_MCP_RESP_ITER_US,
+ QED_MCP_RESP_ITER_US * 2);
else
- udelay(usecs);
- } while (++cnt < max_retries);
+ udelay(QED_MCP_RESP_ITER_US);
+ } while (++cnt < QED_DRV_MB_MAX_RETRIES);
- if (cnt >= max_retries) {
+ if (cnt >= QED_DRV_MB_MAX_RETRIES) {
DP_NOTICE(p_hwfn,
"The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
@@ -520,9 +520,10 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
*/
if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
- msleep(msecs);
+ usleep_range(QED_MCP_RESP_ITER_US,
+ QED_MCP_RESP_ITER_US * 2);
else
- udelay(usecs);
+ udelay(QED_MCP_RESP_ITER_US);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
@@ -536,9 +537,9 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
goto err;
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
- } while (++cnt < max_retries);
+ } while (++cnt < QED_DRV_MB_MAX_RETRIES);
- if (cnt >= max_retries) {
+ if (cnt >= QED_DRV_MB_MAX_RETRIES) {
DP_NOTICE(p_hwfn,
"The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
@@ -564,7 +565,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
"MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
p_mb_params->mcp_resp,
p_mb_params->mcp_param,
- (cnt * usecs) / 1000, (cnt * usecs) % 1000);
+ (cnt * QED_MCP_RESP_ITER_US) / 1000,
+ (cnt * QED_MCP_RESP_ITER_US) % 1000);
/* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -581,8 +583,6 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_mcp_mb_params *p_mb_params)
{
size_t union_data_size = sizeof(union drv_union_data);
- u32 max_retries = QED_DRV_MB_MAX_RETRIES;
- u32 usecs = QED_MCP_RESP_ITER_US;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -606,13 +606,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
- max_retries = DIV_ROUND_UP(max_retries, 1000);
- usecs *= 1000;
- }
-
- return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
- usecs);
+ return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params);
}
static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -3085,20 +3079,13 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param,
&read_len,
- (u32 *)(p_buf + offset), false);
+ (u32 *)(p_buf + offset), true);
if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
break;
}
- /* This can be a lengthy process, and it's possible scheduler
- * isn't preemptible. Sleep a bit to prevent CPU hogging.
- */
- if (bytes_left % 0x1000 <
- (bytes_left - read_len) % 0x1000)
- usleep_range(1000, 2000);
-
offset += read_len;
bytes_left -= read_len;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 97b059be1041..e50e1df0a433 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -272,16 +272,14 @@ static void qede_get_strings_stats_txq(struct qede_dev *edev,
{
int i;
- for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+ for (i = 0; i < QEDE_NUM_TQSTATS; i++)
if (txq->is_xdp)
- sprintf(*buf, "%d [XDP]: %s",
- QEDE_TXQ_XDP_TO_IDX(edev, txq),
- qede_tqstats_arr[i].string);
+ ethtool_sprintf(buf, "%d [XDP]: %s",
+ QEDE_TXQ_XDP_TO_IDX(edev, txq),
+ qede_tqstats_arr[i].string);
else
- sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
- qede_tqstats_arr[i].string);
- *buf += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(buf, "%d_%d: %s", txq->index, txq->cos,
+ qede_tqstats_arr[i].string);
}
static void qede_get_strings_stats_rxq(struct qede_dev *edev,
@@ -289,11 +287,9 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev,
{
int i;
- for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
- sprintf(*buf, "%d: %s", rxq->rxq_id,
- qede_rqstats_arr[i].string);
- *buf += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < QEDE_NUM_RQSTATS; i++)
+ ethtool_sprintf(buf, "%d: %s", rxq->rxq_id,
+ qede_rqstats_arr[i].string);
}
static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
@@ -331,26 +327,26 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
for (i = 0; i < QEDE_NUM_STATS; i++) {
if (qede_is_irrelevant_stat(edev, i))
continue;
- strcpy(buf, qede_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ ethtool_puts(&buf, qede_stats_arr[i].string);
}
}
static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct qede_dev *edev = netdev_priv(dev);
+ int i;
switch (stringset) {
case ETH_SS_STATS:
qede_get_strings_stats(edev, buf);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(buf, qede_private_arr,
- ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+ for (i = 0; i < QEDE_PRI_FLAG_LEN; i++)
+ ethtool_puts(&buf, qede_private_arr[i]);
break;
case ETH_SS_TEST:
- memcpy(buf, qede_tests_str_arr,
- ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+ for (i = 0; i < QEDE_ETHTOOL_TEST_MAX; i++)
+ ethtool_puts(&buf, qede_tests_str_arr[i]);
break;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index c1436e1554de..17450e05c437 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1196,60 +1196,56 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int index, i, num_stats;
+ const char *str;
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *qlcnic_gstrings_test,
- QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < QLCNIC_TEST_LEN; i++)
+ ethtool_puts(&data, qlcnic_gstrings_test[i]);
break;
case ETH_SS_STATS:
num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
- for (i = 0; i < adapter->drv_tx_rings; i++) {
+ for (i = 0; i < adapter->drv_tx_rings; i++)
for (index = 0; index < num_stats; index++) {
- sprintf(data, "tx_queue_%d %s", i,
- qlcnic_tx_queue_stats_strings[index]);
- data += ETH_GSTRING_LEN;
+ str = qlcnic_tx_queue_stats_strings[index];
+ ethtool_sprintf(&data, "tx_queue_%d %s", i,
+ str);
}
- }
- for (index = 0; index < QLCNIC_STATS_LEN; index++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_gstrings_stats[index].stat_string,
- ETH_GSTRING_LEN);
+ for (i = 0; i < QLCNIC_STATS_LEN; i++) {
+ str = qlcnic_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
if (qlcnic_83xx_check(adapter)) {
num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
- for (i = 0; i < num_stats; i++, index++)
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_83xx_tx_stats_strings[i],
- ETH_GSTRING_LEN);
+ for (i = 0; i < num_stats; i++) {
+ str = qlcnic_83xx_tx_stats_strings[i];
+ ethtool_puts(&data, str);
+ }
num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
- for (i = 0; i < num_stats; i++, index++)
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_83xx_mac_stats_strings[i],
- ETH_GSTRING_LEN);
+ for (i = 0; i < num_stats; i++) {
+ str = qlcnic_83xx_mac_stats_strings[i];
+ ethtool_puts(&data, str);
+ }
num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
- for (i = 0; i < num_stats; i++, index++)
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_83xx_rx_stats_strings[i],
- ETH_GSTRING_LEN);
+ for (i = 0; i < num_stats; i++) {
+ str = qlcnic_83xx_rx_stats_strings[i];
+ ethtool_puts(&data, str);
+ }
return;
} else {
num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
- for (i = 0; i < num_stats; i++, index++)
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_83xx_mac_stats_strings[i],
- ETH_GSTRING_LEN);
+ for (i = 0; i < num_stats; i++) {
+ str = qlcnic_83xx_mac_stats_strings[i];
+ ethtool_puts(&data, str);
+ }
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
- for (i = 0; i < num_stats; index++, i++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_device_gstrings_stats[i],
- ETH_GSTRING_LEN);
- }
+ for (i = 0; i < num_stats; i++)
+ ethtool_puts(&data, qlcnic_device_gstrings_stats[i]);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index b3588a1ebc25..eb69121df726 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -367,7 +367,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
- const unsigned char *addr, u16 vid,
+ const unsigned char *addr, u16 vid, bool *notified,
struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -394,7 +394,7 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *netdev,
const unsigned char *addr, u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+ bool *notified, struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index e4bc18009d08..a508ebc4b206 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -293,6 +293,11 @@ static struct sgmii_ops qdf2400_ops = {
};
#endif
+struct emac_match_data {
+ struct sgmii_ops **sgmii_ops;
+ struct device *target_device;
+};
+
static int emac_sgmii_acpi_match(struct device *dev, void *data)
{
#ifdef CONFIG_ACPI
@@ -303,7 +308,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
{}
};
const struct acpi_device_id *id = acpi_match_device(match_table, dev);
- struct sgmii_ops **ops = data;
+ struct emac_match_data *match_data = data;
if (id) {
acpi_handle handle = ACPI_HANDLE(dev);
@@ -324,10 +329,12 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
switch (hrv) {
case 1:
- *ops = &qdf2432_ops;
+ *match_data->sgmii_ops = &qdf2432_ops;
+ match_data->target_device = dev;
return 1;
case 2:
- *ops = &qdf2400_ops;
+ *match_data->sgmii_ops = &qdf2400_ops;
+ match_data->target_device = dev;
return 1;
}
}
@@ -356,16 +363,21 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
int ret;
if (has_acpi_companion(&pdev->dev)) {
+ struct emac_match_data match_data = {
+ .sgmii_ops = &phy->sgmii_ops,
+ .target_device = NULL,
+ };
struct device *dev;
- dev = device_find_child(&pdev->dev, &phy->sgmii_ops,
- emac_sgmii_acpi_match);
+ device_for_each_child(&pdev->dev, &match_data, emac_sgmii_acpi_match);
+ dev = match_data.target_device;
if (!dev) {
dev_warn(&pdev->dev, "cannot find internal phy node\n");
return 0;
}
+ get_device(dev);
sgmii_pdev = to_platform_device(dev);
} else {
const struct of_device_id *match;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 99d4647bf245..699a8afc214a 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -760,7 +760,7 @@ static void emac_shutdown(struct platform_device *pdev)
static struct platform_driver emac_platform_driver = {
.probe = emac_probe,
- .remove_new = emac_remove,
+ .remove = emac_remove,
.driver = {
.name = "qcom-emac",
.of_match_table = emac_dt_match,
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index ad06da0fdaa0..13deb3da4a64 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -98,8 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
seq_printf(s, "IRQ : %d\n",
qca->spi_dev->irq);
- seq_printf(s, "INTR : %lx\n",
- qca->intr);
+ seq_printf(s, "FLAGS : %lx\n",
+ qca->flags);
seq_printf(s, "SPI max speed : %lu\n",
(unsigned long)qca->spi_dev->max_speed_hz);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 8f7ce6b51a1c..ef9c02b000e4 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -35,7 +35,8 @@
#define MAX_DMA_BURST_LEN 5000
-#define SPI_INTR 0
+#define SPI_INTR 0
+#define SPI_RESET 1
/* Modules parameters */
#define QCASPI_CLK_SPEED_MIN 1000000
@@ -495,7 +496,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
if (qca->sync == QCASPI_SYNC_READY)
qca->stats.bad_signature++;
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n");
return;
} else {
@@ -505,12 +506,17 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
if (wrbuf_space != QCASPI_HW_BUF_LEN) {
netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n");
qca->sync = QCASPI_SYNC_UNKNOWN;
+ qca->stats.buf_avail_err++;
} else {
netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n");
qca->sync = QCASPI_SYNC_READY;
return;
}
}
+ } else {
+ /* Handle reset only on QCASPI_EVENT_UPDATE */
+ if (test_and_clear_bit(SPI_RESET, &qca->flags))
+ qca->sync = QCASPI_SYNC_UNKNOWN;
}
switch (qca->sync) {
@@ -521,7 +527,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
qca->stats.bad_signature++;
netdev_dbg(qca->net_dev, "sync: bad signature, restart\n");
/* don't reset right away */
@@ -552,7 +558,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qca->reset_count);
if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
/* reset did not seem to take place, try again */
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
qca->stats.reset_timeout++;
netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n");
}
@@ -581,14 +587,14 @@ qcaspi_spi_thread(void *data)
continue;
}
- if (!test_bit(SPI_INTR, &qca->intr) &&
+ if (!qca->flags &&
!qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);
netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
- qca->intr,
+ qca->flags,
qca->txr.skb[qca->txr.head]);
qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
@@ -602,7 +608,7 @@ qcaspi_spi_thread(void *data)
msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
}
- if (test_and_clear_bit(SPI_INTR, &qca->intr)) {
+ if (test_and_clear_bit(SPI_INTR, &qca->flags)) {
start_spi_intr_handling(qca, &intr_cause);
if (intr_cause & SPI_INT_CPU_ON) {
@@ -627,7 +633,7 @@ qcaspi_spi_thread(void *data)
/* restart sync */
netdev_dbg(qca->net_dev, "===> rdbuf error!\n");
qca->stats.read_buf_err++;
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
continue;
}
@@ -635,7 +641,7 @@ qcaspi_spi_thread(void *data)
/* restart sync */
netdev_dbg(qca->net_dev, "===> wrbuf error!\n");
qca->stats.write_buf_err++;
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
continue;
}
@@ -664,7 +670,7 @@ qcaspi_intr_handler(int irq, void *data)
{
struct qcaspi *qca = data;
- set_bit(SPI_INTR, &qca->intr);
+ set_bit(SPI_INTR, &qca->flags);
if (qca->spi_thread)
wake_up_process(qca->spi_thread);
@@ -680,7 +686,7 @@ qcaspi_netdev_open(struct net_device *dev)
if (!qca)
return -EINVAL;
- set_bit(SPI_INTR, &qca->intr);
+ set_bit(SPI_INTR, &qca->flags);
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
@@ -799,7 +805,7 @@ qcaspi_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
jiffies, jiffies - dev_trans_start(dev));
qca->net_dev->stats.tx_errors++;
/* Trigger tx queue flush and QCA7000 reset */
- qca->sync = QCASPI_SYNC_UNKNOWN;
+ set_bit(SPI_RESET, &qca->flags);
if (qca->spi_thread)
wake_up_process(qca->spi_thread);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 8f4808695e82..7ba5c9e2f61c 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -81,7 +81,7 @@ struct qcaspi {
struct qcafrm_handle frm_handle;
struct sk_buff *rx_skb;
- unsigned long intr;
+ unsigned long flags;
u16 reset_count;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index e2db944e6fa8..be4c9622618d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -68,6 +68,7 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
+ RTL_GIGA_MAC_VER_64,
RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_VER_66,
RTL_GIGA_MAC_NONE
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index ed6e721b1555..bf055078a855 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -215,7 +215,7 @@ int rtl_fw_request_firmware(struct rtl_fw *rtl_fw)
{
int rc;
- rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev);
+ rc = firmware_request_nowarn(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev);
if (rc < 0)
goto out;
@@ -227,7 +227,7 @@ int rtl_fw_request_firmware(struct rtl_fw *rtl_fw)
return 0;
out:
- dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n",
- rtl_fw->fw_name, rc);
+ dev_warn(rtl_fw->dev, "Unable to load firmware %s (%d)\n",
+ rtl_fw->fw_name, rc);
return rc;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 713a89bb21e9..739707a7b40f 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
+#include <linux/hwmon.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -55,6 +56,7 @@
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
+#define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
@@ -138,6 +140,7 @@ static const struct {
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
+ [RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1},
[RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
[RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3},
};
@@ -344,6 +347,8 @@ enum rtl8125_registers {
TxPoll_8125 = 0x90,
LEDSEL3 = 0x96,
MAC0_BKP = 0x19e0,
+ RSS_CTRL_8125 = 0x4500,
+ Q_NUM_CTRL_8125 = 0x4800,
EEE_TXIDLE_TIMER_8125 = 0x6048,
};
@@ -617,7 +622,6 @@ struct rtl8169_tc_offsets {
};
enum rtl_flag {
- RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
@@ -658,13 +662,9 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- raw_spinlock_t config25_lock;
raw_spinlock_t mac_ocp_lock;
struct mutex led_lock; /* serialize LED ctrl RMW access */
- raw_spinlock_t cfg9346_usage_lock;
- int cfg9346_usage_count;
-
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
unsigned dash_enabled:1;
@@ -707,6 +707,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
+MODULE_FIRMWARE(FIRMWARE_8125D_1);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
@@ -717,22 +718,12 @@ static inline struct device *tp_to_dev(struct rtl8169_private *tp)
static void rtl_lock_config_regs(struct rtl8169_private *tp)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
- if (!--tp->cfg9346_usage_count)
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
}
static void rtl_unlock_config_regs(struct rtl8169_private *tp)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
- if (!tp->cfg9346_usage_count++)
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
}
static void rtl_pci_commit(struct rtl8169_private *tp)
@@ -743,24 +734,32 @@ static void rtl_pci_commit(struct rtl8169_private *tp)
static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
{
- unsigned long flags;
u8 val;
- raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config2);
RTL_W8(tp, Config2, (val & ~clear) | set);
- raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
}
static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
{
- unsigned long flags;
u8 val;
- raw_spin_lock_irqsave(&tp->config25_lock, flags);
val = RTL_R8(tp, Config5);
RTL_W8(tp, Config5, (val & ~clear) | set);
- raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
+}
+
+static void r8169_mod_reg8_cond(struct rtl8169_private *tp, int reg,
+ u8 bits, bool cond)
+{
+ u8 val, old_val;
+
+ old_val = RTL_R8(tp, reg);
+ if (cond)
+ val = old_val | bits;
+ else
+ val = old_val & ~bits;
+ if (val != old_val)
+ RTL_W8(tp, reg, val);
}
static bool rtl_is_8125(struct rtl8169_private *tp)
@@ -1346,40 +1345,19 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
-static void rtl_dash_loop_wait(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned long usecs, int n, bool high)
-{
- if (!tp->dash_enabled)
- return;
- rtl_loop_wait(tp, c, usecs, n, high);
-}
-
-static void rtl_dash_loop_wait_high(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned long d, int n)
-{
- rtl_dash_loop_wait(tp, c, d, n, true);
-}
-
-static void rtl_dash_loop_wait_low(struct rtl8169_private *tp,
- const struct rtl_cond *c,
- unsigned long d, int n)
-{
- rtl_dash_loop_wait(tp, c, d, n, false);
-}
-
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ if (tp->dash_enabled)
+ rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+ if (tp->dash_enabled)
+ rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
@@ -1393,7 +1371,8 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ if (tp->dash_enabled)
+ rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
@@ -1401,7 +1380,8 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl8168ep_stop_cmac(tp);
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ if (tp->dash_enabled)
+ rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
@@ -1451,19 +1431,11 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
- if (enable)
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
- else
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN);
- break;
- default:
- break;
- }
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_25 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_28 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_31 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_38)
+ r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, !enable);
}
static void rtl_reset_packet_filter(struct rtl8169_private *tp)
@@ -1572,61 +1544,40 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{
- static const struct {
- u32 opt;
- u16 reg;
- u8 mask;
- } cfg[] = {
- { WAKE_PHY, Config3, LinkUp },
- { WAKE_UCAST, Config5, UWF },
- { WAKE_BCAST, Config5, BWF },
- { WAKE_MCAST, Config5, MWF },
- { WAKE_ANY, Config5, LanWake },
- { WAKE_MAGIC, Config3, MagicPacket }
- };
- unsigned int i, tmp = ARRAY_SIZE(cfg);
- unsigned long flags;
- u8 options;
-
rtl_unlock_config_regs(tp);
if (rtl_is_8168evl_up(tp)) {
- tmp--;
if (wolopts & WAKE_MAGIC)
rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2);
else
rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2);
} else if (rtl_is_8125(tp)) {
- tmp--;
if (wolopts & WAKE_MAGIC)
r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0));
else
r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
+ } else {
+ r8169_mod_reg8_cond(tp, Config3, MagicPacket,
+ wolopts & WAKE_MAGIC);
}
- raw_spin_lock_irqsave(&tp->config25_lock, flags);
- for (i = 0; i < tmp; i++) {
- options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
- if (wolopts & cfg[i].opt)
- options |= cfg[i].mask;
- RTL_W8(tp, cfg[i].reg, options);
- }
- raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
+ r8169_mod_reg8_cond(tp, Config3, LinkUp, wolopts & WAKE_PHY);
+ if (rtl_is_8125(tp))
+ r8168_mac_ocp_modify(tp, 0xe0c6, 0x3f,
+ wolopts & WAKE_PHY ? 0x13 : 0);
+ r8169_mod_reg8_cond(tp, Config5, UWF, wolopts & WAKE_UCAST);
+ r8169_mod_reg8_cond(tp, Config5, BWF, wolopts & WAKE_BCAST);
+ r8169_mod_reg8_cond(tp, Config5, MWF, wolopts & WAKE_MCAST);
+ r8169_mod_reg8_cond(tp, Config5, LanWake, wolopts);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
- options = RTL_R8(tp, Config1) & ~PMEnable;
- if (wolopts)
- options |= PMEnable;
- RTL_W8(tp, Config1, options);
+ r8169_mod_reg8_cond(tp, Config1, PMEnable, wolopts);
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
- if (wolopts)
- rtl_mod_config2(tp, 0, PME_SIGNAL);
- else
- rtl_mod_config2(tp, PME_SIGNAL, 0);
+ r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
break;
default:
break;
@@ -2098,10 +2049,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
tp->tx_lpi_timer = timer_val;
r8168_mac_ocp_write(tp, 0xe048, timer_val);
break;
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2160,6 +2108,19 @@ static void rtl8169_get_ringparam(struct net_device *dev,
data->tx_pending = NUM_TX_DESC;
}
+static void rtl8169_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!rtl_is_8125(tp))
+ return;
+
+ rtl8169_update_counters(tp);
+ pause_stats->tx_pause_frames = le32_to_cpu(tp->counters->tx_pause_on);
+ pause_stats->rx_pause_frames = le32_to_cpu(tp->counters->rx_pause_on);
+}
+
static void rtl8169_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *data)
{
@@ -2186,6 +2147,69 @@ static int rtl8169_set_pauseparam(struct net_device *dev,
return 0;
}
+static void rtl8169_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ rtl8169_update_counters(tp);
+
+ mac_stats->FramesTransmittedOK =
+ le64_to_cpu(tp->counters->tx_packets);
+ mac_stats->SingleCollisionFrames =
+ le32_to_cpu(tp->counters->tx_one_collision);
+ mac_stats->MultipleCollisionFrames =
+ le32_to_cpu(tp->counters->tx_multi_collision);
+ mac_stats->FramesReceivedOK =
+ le64_to_cpu(tp->counters->rx_packets);
+ mac_stats->AlignmentErrors =
+ le16_to_cpu(tp->counters->align_errors);
+ mac_stats->FramesLostDueToIntMACXmitError =
+ le64_to_cpu(tp->counters->tx_errors);
+ mac_stats->BroadcastFramesReceivedOK =
+ le64_to_cpu(tp->counters->rx_broadcast);
+ mac_stats->MulticastFramesReceivedOK =
+ le32_to_cpu(tp->counters->rx_multicast);
+
+ if (!rtl_is_8125(tp))
+ return;
+
+ mac_stats->AlignmentErrors =
+ le32_to_cpu(tp->counters->align_errors32);
+ mac_stats->OctetsTransmittedOK =
+ le64_to_cpu(tp->counters->tx_octets);
+ mac_stats->LateCollisions =
+ le32_to_cpu(tp->counters->tx_late_collision);
+ mac_stats->FramesAbortedDueToXSColls =
+ le32_to_cpu(tp->counters->tx_aborted32);
+ mac_stats->OctetsReceivedOK =
+ le64_to_cpu(tp->counters->rx_octets);
+ mac_stats->FramesLostDueToIntMACRcvError =
+ le32_to_cpu(tp->counters->rx_mac_error);
+ mac_stats->MulticastFramesXmittedOK =
+ le64_to_cpu(tp->counters->tx_multicast64);
+ mac_stats->BroadcastFramesXmittedOK =
+ le64_to_cpu(tp->counters->tx_broadcast64);
+ mac_stats->MulticastFramesReceivedOK =
+ le64_to_cpu(tp->counters->rx_multicast64);
+ mac_stats->FrameTooLongErrors =
+ le32_to_cpu(tp->counters->rx_frame_too_long);
+}
+
+static void rtl8169_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!rtl_is_8125(tp))
+ return;
+
+ rtl8169_update_counters(tp);
+
+ ctrl_stats->UnsupportedOpcodesReceived =
+ le32_to_cpu(tp->counters->rx_unknown_opcode);
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -2207,8 +2231,11 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = rtl8169_get_ringparam,
+ .get_pause_stats = rtl8169_get_pause_stats,
.get_pauseparam = rtl8169_get_pauseparam,
.set_pauseparam = rtl8169_set_pauseparam,
+ .get_eth_mac_stats = rtl8169_get_eth_mac_stats,
+ .get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats,
};
static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
@@ -2233,6 +2260,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 },
{ 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
+ /* 8125D family. */
+ { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 },
+
/* 8125B family. */
{ 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
@@ -2423,11 +2453,9 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
- if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
- return;
-
set_bit(flag, tp->wk.flags);
- schedule_work(&tp->wk.work);
+ if (!schedule_work(&tp->wk.work))
+ clear_bit(flag, tp->wk.flags);
}
static void rtl8169_init_phy(struct rtl8169_private *tp)
@@ -2500,9 +2528,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2517,86 +2543,31 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
}
-static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
-}
-
-static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
-}
-
-static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
-}
-
-static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
-}
-
-static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, MaxTxPacketSize, 0x24);
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
-}
-
-static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, MaxTxPacketSize, 0x3f);
- RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
-}
-
-static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
-}
-
-static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
-{
- RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
-}
-
static void rtl_jumbo_config(struct rtl8169_private *tp)
{
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
int readrq = 4096;
+ if (jumbo && tp->mac_version >= RTL_GIGA_MAC_VER_17 &&
+ tp->mac_version <= RTL_GIGA_MAC_VER_26)
+ readrq = 512;
+
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_17:
- if (jumbo) {
- readrq = 512;
- r8168b_1_hw_jumbo_enable(tp);
- } else {
- r8168b_1_hw_jumbo_disable(tp);
- }
+ r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
- if (jumbo) {
- readrq = 512;
- r8168c_hw_jumbo_enable(tp);
- } else {
- r8168c_hw_jumbo_disable(tp);
- }
+ r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
+ r8169_mod_reg8_cond(tp, Config4, Jumbo_En1, jumbo);
break;
case RTL_GIGA_MAC_VER_28:
- if (jumbo)
- r8168dp_hw_jumbo_enable(tp);
- else
- r8168dp_hw_jumbo_disable(tp);
+ r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- if (jumbo)
- r8168e_hw_jumbo_enable(tp);
- else
- r8168e_hw_jumbo_disable(tp);
+ RTL_W8(tp, MaxTxPacketSize, jumbo ? 0x24 : 0x3f);
+ r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
+ r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo);
break;
default:
break;
@@ -3707,8 +3678,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_disable(tp);
RTL_W16(tp, 0x382, 0x221b);
- RTL_W8(tp, 0x4500, 0);
- RTL_W16(tp, 0x4800, 0);
+ RTL_W32(tp, RSS_CTRL_8125, 0);
+ RTL_W16(tp, Q_NUM_CTRL_8125, 0);
/* disable UPS */
r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000);
@@ -3814,6 +3785,12 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
rtl_hw_start_8125_common(tp);
}
+static void rtl_hw_start_8125d(struct rtl8169_private *tp)
+{
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_start_8126a(struct rtl8169_private *tp)
{
rtl_set_def_aspm_entry_latency(tp);
@@ -3862,6 +3839,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
+ [RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
[RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
[RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
};
@@ -3879,6 +3857,7 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
/* disable interrupt coalescing */
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_64:
for (i = 0xa00; i < 0xb00; i += 4)
RTL_W32(tp, i, 0);
break;
@@ -3893,6 +3872,9 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
break;
}
+ /* enable extended tally counter */
+ r8168_mac_ocp_modify(tp, 0xea84, 0, BIT(1) | BIT(0));
+
rtl_hw_config(tp);
}
@@ -4233,8 +4215,8 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
{
unsigned int padto = 0, len = skb->len;
- if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
- rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
+ if (len < 128 + RTL_MIN_PATCH_LEN && rtl_skb_is_udp(skb) &&
+ skb_transport_header_was_set(skb)) {
unsigned int trans_data_len = skb_tail_pointer(skb) -
skb_transport_header(skb);
@@ -4258,9 +4240,15 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
struct sk_buff *skb)
{
- unsigned int padto;
+ unsigned int padto = 0;
- padto = rtl8125_quirk_udp_padto(tp, skb);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ padto = rtl8125_quirk_udp_padto(tp, skb);
+ break;
+ default:
+ break;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
@@ -4712,11 +4700,6 @@ static void rtl_task(struct work_struct *work)
container_of(work, struct rtl8169_private, wk.work);
int ret;
- rtnl_lock();
-
- if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
- goto out_unlock;
-
if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
/* if chip isn't accessible, reset bus to revive it */
if (RTL_R32(tp, TxConfig) == ~0) {
@@ -4724,7 +4707,7 @@ static void rtl_task(struct work_struct *work)
if (ret < 0) {
netdev_err(tp->dev, "Can't reset secondary PCI bus, detach NIC\n");
netif_device_detach(tp->dev);
- goto out_unlock;
+ return;
}
}
@@ -4743,8 +4726,6 @@ reset:
} else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
rtl_reset_work(tp);
}
-out_unlock:
- rtnl_unlock();
}
static int rtl8169_poll(struct napi_struct *napi, int budget)
@@ -4771,11 +4752,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
if (netif_carrier_ok(ndev)) {
rtl_link_chg_patch(tp);
pm_request_resume(d);
- netif_wake_queue(tp->dev);
} else {
- /* In few cases rx is broken after link-down otherwise */
- if (rtl_is_8125(tp))
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
pm_runtime_idle(d);
}
@@ -4806,6 +4783,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
static void rtl8169_down(struct rtl8169_private *tp)
{
+ disable_work_sync(&tp->wk.work);
/* Clear all task flags */
bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
@@ -4834,7 +4812,7 @@ static void rtl8169_up(struct rtl8169_private *tp)
phy_resume(tp->phydev);
rtl8169_init_phy(tp);
napi_enable(&tp->napi);
- set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ enable_work(&tp->wk.work);
rtl_reset_work(tp);
phy_start(tp->phydev);
@@ -4851,8 +4829,6 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(tp);
rtl8169_rx_clear(tp);
- cancel_work(&tp->wk.work);
-
free_irq(tp->irq, tp);
phy_disconnect(tp->phydev);
@@ -5085,7 +5061,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
- cancel_work_sync(&tp->wk.work);
+ disable_work_sync(&tp->wk.work);
if (IS_ENABLED(CONFIG_R8169_LEDS))
r8169_remove_leds(tp->leds);
@@ -5252,6 +5228,12 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
phy_support_eee(tp->phydev);
phy_support_asym_pause(tp->phydev);
+ /* mimic behavior of r8125/r8126 vendor drivers */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_61)
+ phy_set_eee_broken(tp->phydev,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
+ phy_set_eee_broken(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
+
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
@@ -5365,6 +5347,43 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
return false;
}
+static umode_t r8169_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct rtl8169_private *tp = dev_get_drvdata(dev);
+ int val_raw;
+
+ val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff;
+ if (val_raw >= 512)
+ val_raw -= 1024;
+
+ *val = 1000 * val_raw / 2;
+
+ return 0;
+}
+
+static const struct hwmon_ops r8169_hwmon_ops = {
+ .is_visible = r8169_hwmon_is_visible,
+ .read = r8169_hwmon_read,
+};
+
+static const struct hwmon_channel_info * const r8169_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info r8169_hwmon_chip_info = {
+ .ops = &r8169_hwmon_ops,
+ .info = r8169_hwmon_info,
+};
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5386,8 +5405,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
tp->ocp_base = OCP_STD_PHY_BASE;
- raw_spin_lock_init(&tp->cfg9346_usage_lock);
- raw_spin_lock_init(&tp->config25_lock);
raw_spin_lock_init(&tp->mac_ocp_lock);
mutex_init(&tp->led_lock);
@@ -5462,6 +5479,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->irq = pci_irq_vector(pdev, 0);
INIT_WORK(&tp->wk.work, rtl_task);
+ disable_work(&tp->wk.work);
rtl_init_mac_address(tp);
@@ -5487,11 +5505,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= dev->hw_features;
- /* There has been a number of reports that using SG/TSO results in
- * tx timeouts. However for a lot of people SG/TSO works fine.
- * Therefore disable both features by default, but allow users to
- * enable them. Use at own risk!
- */
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V2);
@@ -5502,6 +5515,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V1);
}
+ /* There has been a number of reports that using SG/TSO results in
+ * tx timeouts. However for a lot of people SG/TSO works fine.
+ * It's not fully clear which chip versions are affected. Vendor
+ * drivers enable SG/TSO for certain chip versions per default,
+ * let's mimic this here. On other chip versions users can
+ * use ethtool to enable SG/TSO, use at own risk!
+ */
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_61)
+ dev->features |= dev->hw_features;
+
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
@@ -5539,6 +5563,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
+ /* The temperature sensor is available from RTl8125B */
+ if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63)
+ /* ignore errors */
+ devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp,
+ &r8169_hwmon_chip_info,
+ NULL);
rc = register_netdev(dev);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index cf29b1208482..5307c6ff4e25 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -89,20 +89,17 @@ static void rtl8168h_config_eee_phy(struct phy_device *phydev)
phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
}
-static void rtl8125a_config_eee_phy(struct phy_device *phydev)
+static void rtl8125_common_config_eee_phy(struct phy_device *phydev)
{
- rtl8168h_config_eee_phy(phydev);
-
- phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
+ phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000);
+ phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000);
}
-static void rtl8125b_config_eee_phy(struct phy_device *phydev)
+static void rtl8125_config_eee_phy(struct phy_device *phydev)
{
- phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
- phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000);
- phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000);
+ rtl8168g_config_eee_phy(phydev);
+ rtl8125_common_config_eee_phy(phydev);
}
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp,
@@ -1061,15 +1058,15 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168g_enable_gphy_10m(phydev);
rtl8168g_disable_aldps(phydev);
- rtl8125a_config_eee_phy(phydev);
+ rtl8125_config_eee_phy(phydev);
}
static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090);
phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001);
@@ -1101,13 +1098,27 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
rtl8125_legacy_force_mode(phydev);
rtl8168g_disable_aldps(phydev);
- rtl8125b_config_eee_phy(phydev);
+ rtl8125_config_eee_phy(phydev);
+}
+
+static void rtl8125d_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
}
static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_common_config_eee_phy(phydev);
}
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
@@ -1160,6 +1171,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
+ [RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
[RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
[RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config,
};
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index 583c33930f88..942f1e531a85 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -170,7 +170,7 @@ enum rtase_registers {
RTASE_INT_MITI_TX = 0x0A00,
RTASE_INT_MITI_RX = 0x0A80,
- RTASE_VLAN_ENTRY_0 = 0xAC80,
+ RTASE_VLAN_ENTRY_0 = 0xAC80,
};
enum rtase_desc_status_bit {
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index f8777b7663d3..874994d9ceb9 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -2115,7 +2115,7 @@ static int rtase_init_one(struct pci_dev *pdev,
ret = rtase_alloc_interrupt(pdev, tp);
if (ret < 0) {
dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
- goto err_out_1;
+ goto err_out_del_napi;
}
rtase_init_netdev_ops(dev);
@@ -2148,7 +2148,7 @@ static int rtase_init_one(struct pci_dev *pdev,
GFP_KERNEL);
if (!tp->tally_vaddr) {
ret = -ENOMEM;
- goto err_out;
+ goto err_out_free_dma;
}
rtase_tally_counter_clear(tp);
@@ -2159,13 +2159,13 @@ static int rtase_init_one(struct pci_dev *pdev,
ret = register_netdev(dev);
if (ret != 0)
- goto err_out;
+ goto err_out_free_dma;
netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
return 0;
-err_out:
+err_out_free_dma:
if (tp->tally_vaddr) {
dma_free_coherent(&pdev->dev,
sizeof(*tp->tally_vaddr),
@@ -2175,7 +2175,7 @@ err_out:
tp->tally_vaddr = NULL;
}
-err_out_1:
+err_out_del_napi:
for (i = 0; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
netif_napi_del(&ivec->napi);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index a7de5cf6b317..7b48060c250b 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -998,6 +998,8 @@ enum CSR1_BIT {
CSR1_TDHD = 0x08000000,
};
+#define CSR1_CSUM_ENABLE (CSR1_TTCP4 | CSR1_TUDP4 | CSR1_TTCP6 | CSR1_TUDP6)
+
enum CSR2_BIT {
CSR2_RIP4 = 0x00000001,
CSR2_RTCP4 = 0x00000010,
@@ -1012,6 +1014,9 @@ enum CSR2_BIT {
CSR2_RDHD = 0x08000000,
};
+#define CSR2_CSUM_ENABLE (CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4 | \
+ CSR2_RTCP6 | CSR2_RUDP6 | CSR2_RICMP6)
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
@@ -1050,6 +1055,7 @@ struct ravb_hw_info {
size_t gstrings_size;
netdev_features_t net_hw_features;
netdev_features_t net_features;
+ netdev_features_t vlan_features;
int stats_len;
u32 tccr_mask;
u32 tx_max_frame_size;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 907af4651c55..ac0f093f647a 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -504,11 +504,10 @@ static void ravb_csum_init_gbeth(struct net_device *ndev)
ndev->features &= ~NETIF_F_RXCSUM;
} else {
if (tx_enable)
- ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1);
+ ravb_write(ndev, CSR1_CSUM_ENABLE, CSR1);
if (rx_enable)
- ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4,
- CSR2);
+ ravb_write(ndev, CSR2_CSUM_ENABLE, CSR2);
}
done:
@@ -750,38 +749,34 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
static void ravb_rx_csum_gbeth(struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
- __wsum csum_ip_hdr, csum_proto;
- skb_frag_t *last_frag;
- u8 *hw_csum;
+ size_t csum_len;
+ u16 *hw_csum;
- /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
- * bytes appended to packet data. First 2 bytes is ip header checksum
- * and last 2 bytes is protocol checksum.
+ /* The hardware checksum status is contained in 4 bytes appended to
+ * packet data.
+ *
+ * For ipv4, the first 2 bytes are the ip header checksum status. We can
+ * ignore this as it will always be re-checked in inet_gro_receive().
+ *
+ * The last 2 bytes are the protocol checksum status which will be zero
+ * if the checksum has been validated.
*/
- if (unlikely(skb->len < sizeof(__sum16) * 2))
+ csum_len = sizeof(*hw_csum) * 2;
+ if (unlikely(skb->len < csum_len))
return;
if (skb_is_nonlinear(skb)) {
- last_frag = &shinfo->frags[shinfo->nr_frags - 1];
- hw_csum = skb_frag_address(last_frag) +
- skb_frag_size(last_frag);
+ skb_frag_t *last_frag = &shinfo->frags[shinfo->nr_frags - 1];
+
+ hw_csum = (u16 *)(skb_frag_address(last_frag) +
+ skb_frag_size(last_frag));
+ skb_frag_size_sub(last_frag, csum_len);
} else {
- hw_csum = skb_tail_pointer(skb);
+ hw_csum = (u16 *)skb_tail_pointer(skb);
+ skb_trim(skb, skb->len - csum_len);
}
- hw_csum -= sizeof(__sum16);
- csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
-
- hw_csum -= sizeof(__sum16);
- csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
-
- if (skb_is_nonlinear(skb))
- skb_frag_size_sub(last_frag, 2 * sizeof(__sum16));
- else
- skb_trim(skb, skb->len - 2 * sizeof(__sum16));
-
- /* TODO: IPV6 Rx checksum */
- if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
+ if (!get_unaligned(--hw_csum))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -2067,32 +2062,44 @@ out_unlock:
static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
{
- struct iphdr *ip = ip_hdr(skb);
+ u16 net_protocol = ntohs(skb->protocol);
+ u8 inner_protocol;
- /* TODO: Need to add support for VLAN tag 802.1Q */
- if (skb_vlan_tag_present(skb))
- return false;
+ /* GbEth IP can calculate the checksum if:
+ * - there are zero or one VLAN headers with TPID=0x8100
+ * - the network protocol is IPv4 or IPv6
+ * - the transport protocol is TCP, UDP or ICMP
+ * - the packet is not fragmented
+ */
- /* TODO: Need to add hardware checksum for IPv6 */
- if (skb->protocol != htons(ETH_P_IP))
- return false;
+ if (net_protocol == ETH_P_8021Q) {
+ struct vlan_hdr vhdr, *vh;
- switch (ip->protocol) {
- case IPPROTO_TCP:
- break;
- case IPPROTO_UDP:
- /* If the checksum value in the UDP header field is 0, TOE does
- * not calculate checksum for UDP part of this frame as it is
- * optional function as per standards.
- */
- if (udp_hdr(skb)->check == 0)
+ vh = skb_header_pointer(skb, ETH_HLEN, sizeof(vhdr), &vhdr);
+ if (!vh)
return false;
+
+ net_protocol = ntohs(vh->h_vlan_encapsulated_proto);
+ }
+
+ switch (net_protocol) {
+ case ETH_P_IP:
+ inner_protocol = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ inner_protocol = ipv6_hdr(skb)->nexthdr;
break;
default:
return false;
}
- return true;
+ switch (inner_protocol) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ return true;
+ default:
+ return false;
+ }
}
/* Packet transmit function for Ethernet AVB */
@@ -2530,7 +2537,7 @@ static int ravb_set_features_gbeth(struct net_device *ndev,
spin_lock_irqsave(&priv->lock, flags);
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM)
- val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4;
+ val = CSR2_CSUM_ENABLE;
else
val = 0;
@@ -2541,7 +2548,7 @@ static int ravb_set_features_gbeth(struct net_device *ndev,
if (changed & NETIF_F_HW_CSUM) {
if (features & NETIF_F_HW_CSUM)
- val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4;
+ val = CSR1_CSUM_ENABLE;
else
val = 0;
@@ -2778,6 +2785,7 @@ static const struct ravb_hw_info gbeth_hw_info = {
.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
.net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
+ .vlan_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
.tx_max_frame_size = 1522,
@@ -2920,6 +2928,7 @@ static int ravb_probe(struct platform_device *pdev)
ndev->features = info->net_features;
ndev->hw_features = info->net_hw_features;
+ ndev->vlan_features = info->vlan_features;
error = reset_control_deassert(rstc);
if (error)
@@ -3290,7 +3299,7 @@ static const struct dev_pm_ops ravb_dev_pm_ops = {
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
- .remove_new = ravb_remove,
+ .remove = ravb_remove,
.driver = {
.name = "ravb",
.pm = pm_ptr(&ravb_dev_pm_ops),
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index b80aa27a7214..8d18dae4d8fb 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -2188,7 +2188,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
static struct platform_driver renesas_eth_sw_driver_platform = {
.probe = renesas_eth_sw_probe,
- .remove_new = renesas_eth_sw_remove,
+ .remove = renesas_eth_sw_remove,
.driver = {
.name = "renesas_eth_sw",
.pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7a25903e35c3..8887b8921009 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3560,7 +3560,7 @@ MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
static struct platform_driver sh_eth_driver = {
.probe = sh_eth_drv_probe,
- .remove_new = sh_eth_drv_remove,
+ .remove = sh_eth_drv_remove,
.id_table = sh_eth_id_table,
.driver = {
.name = CARDNAME,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 84fa911c78db..fe0bf1d3217a 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2502,7 +2502,7 @@ static void rocker_carrier_init(const struct rocker_port *rocker_port)
u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
bool link_up;
- link_up = link_status & (1 << rocker_port->pport);
+ link_up = link_status & (1ULL << rocker_port->pport);
if (link_up)
netif_carrier_on(rocker_port->dev);
else
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index e6e130dbe1de..2eccc7617507 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
static struct platform_driver sxgbe_platform_driver = {
.probe = sxgbe_platform_probe,
- .remove_new = sxgbe_platform_remove,
+ .remove = sxgbe_platform_remove,
.driver = {
.name = SXGBE_RESOURCE_NAME,
.pm = &sxgbe_platform_pm_ops,
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 76356dadf233..7967a0ee320b 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -832,7 +832,7 @@ static void sgiseeq_remove(struct platform_device *pdev)
static struct platform_driver sgiseeq_driver = {
.probe = sgiseeq_probe,
- .remove_new = sgiseeq_remove,
+ .remove = sgiseeq_remove,
.driver = {
.name = "sgiseeq",
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index de131fc5fa0b..452009ed7a43 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1751,7 +1751,7 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
#endif
}
-static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
+static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 **names)
{
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 5c2551369812..6c3b74000d3b 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -59,6 +59,7 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.rxfh_per_ctx_key = true,
+ .cap_rss_rxnfc_adds = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 6da06931187d..62e674d6ff60 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -583,7 +583,7 @@ static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = {
EFX_GENERIC_SW_STAT(rx_noskb_drops),
};
-static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names)
+static size_t ef100_describe_stats(struct efx_nic *efx, u8 **names)
{
DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 83d9db71d7d7..44dc75feb162 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -134,6 +134,9 @@ void __ef100_rx_packet(struct efx_channel *channel)
goto free_rx_buffer;
}
+ ++rx_queue->rx_packets;
+ rx_queue->rx_bytes += rx_buf->len;
+
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
goto out;
@@ -149,8 +152,6 @@ static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
- ++rx_queue->rx_packets;
-
netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received id %x\n",
efx_rx_queue_index(rx_queue), index);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 36b3b57e2055..650136dfc642 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -22,6 +22,7 @@
#include "net_driver.h"
#include <net/gre.h>
#include <net/udp_tunnel.h>
+#include <net/netdev_queues.h>
#include "efx.h"
#include "efx_common.h"
#include "efx_channels.h"
@@ -417,14 +418,6 @@ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
-unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
-{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
- return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
-}
-
/* Set interrupt moderation parameters */
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
@@ -626,6 +619,113 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_bpf = efx_xdp
};
+static void efx_get_queue_stats_rx(struct net_device *net_dev, int idx,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ channel = efx_get_channel(efx, idx);
+ rx_queue = efx_channel_get_rx_queue(channel);
+ /* Count only packets since last time datapath was started */
+ stats->packets = rx_queue->rx_packets - rx_queue->old_rx_packets;
+ stats->bytes = rx_queue->rx_bytes - rx_queue->old_rx_bytes;
+ stats->hw_drops = efx_get_queue_stat_rx_hw_drops(channel) -
+ channel->old_n_rx_hw_drops;
+ stats->hw_drop_overruns = channel->n_rx_nodesc_trunc -
+ channel->old_n_rx_hw_drop_overruns;
+}
+
+static void efx_get_queue_stats_tx(struct net_device *net_dev, int idx,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+
+ channel = efx_get_tx_channel(efx, idx);
+ stats->packets = 0;
+ stats->bytes = 0;
+ stats->hw_gso_packets = 0;
+ stats->hw_gso_wire_packets = 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ stats->packets += tx_queue->complete_packets -
+ tx_queue->old_complete_packets;
+ stats->bytes += tx_queue->complete_bytes -
+ tx_queue->old_complete_bytes;
+ /* Note that, unlike stats->packets and stats->bytes,
+ * these count TXes enqueued, rather than completed,
+ * which may not be what users expect.
+ */
+ stats->hw_gso_packets += tx_queue->tso_bursts -
+ tx_queue->old_tso_bursts;
+ stats->hw_gso_wire_packets += tx_queue->tso_packets -
+ tx_queue->old_tso_packets;
+ }
+}
+
+static void efx_get_base_stats(struct net_device *net_dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->hw_drops = 0;
+ rx->hw_drop_overruns = 0;
+ tx->packets = 0;
+ tx->bytes = 0;
+ tx->hw_gso_packets = 0;
+ tx->hw_gso_wire_packets = 0;
+
+ /* Count all packets on non-core queues, and packets before last
+ * datapath start on core queues.
+ */
+ efx_for_each_channel(channel, efx) {
+ rx_queue = efx_channel_get_rx_queue(channel);
+ if (channel->channel >= net_dev->real_num_rx_queues) {
+ rx->packets += rx_queue->rx_packets;
+ rx->bytes += rx_queue->rx_bytes;
+ rx->hw_drops += efx_get_queue_stat_rx_hw_drops(channel);
+ rx->hw_drop_overruns += channel->n_rx_nodesc_trunc;
+ } else {
+ rx->packets += rx_queue->old_rx_packets;
+ rx->bytes += rx_queue->old_rx_bytes;
+ rx->hw_drops += channel->old_n_rx_hw_drops;
+ rx->hw_drop_overruns += channel->old_n_rx_hw_drop_overruns;
+ }
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ if (channel->channel < efx->tx_channel_offset ||
+ channel->channel >= efx->tx_channel_offset +
+ net_dev->real_num_tx_queues) {
+ tx->packets += tx_queue->complete_packets;
+ tx->bytes += tx_queue->complete_bytes;
+ tx->hw_gso_packets += tx_queue->tso_bursts;
+ tx->hw_gso_wire_packets += tx_queue->tso_packets;
+ } else {
+ tx->packets += tx_queue->old_complete_packets;
+ tx->bytes += tx_queue->old_complete_bytes;
+ tx->hw_gso_packets += tx_queue->old_tso_bursts;
+ tx->hw_gso_wire_packets += tx_queue->old_tso_packets;
+ }
+ /* Include XDP TX in device-wide stats */
+ tx->packets += tx_queue->complete_xdp_packets;
+ tx->bytes += tx_queue->complete_xdp_bytes;
+ }
+ }
+}
+
+static const struct netdev_stat_ops efx_stat_ops = {
+ .get_queue_stats_rx = efx_get_queue_stats_rx,
+ .get_queue_stats_tx = efx_get_queue_stats_tx,
+ .get_base_stats = efx_get_base_stats,
+};
+
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
{
struct bpf_prog *old_prog;
@@ -716,6 +816,7 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
+ net_dev->stat_ops = &efx_stat_ops;
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->ethtool_ops = &efx_ethtool_ops;
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 7a6cab883d66..45e191686625 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -168,7 +168,6 @@ extern const struct ethtool_ops efx_ethtool_ops;
/* Global */
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
-unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index f1723a6fb082..06b4f52713ef 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -1100,6 +1100,10 @@ void efx_start_channels(struct efx_nic *efx)
atomic_inc(&efx->active_queues);
}
+ /* reset per-queue stats */
+ channel->old_n_rx_hw_drops = efx_get_queue_stat_rx_hw_drops(channel);
+ channel->old_n_rx_hw_drop_overruns = channel->n_rx_nodesc_trunc;
+
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
@@ -1209,6 +1213,8 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
tx_queue->pkts_compl,
tx_queue->bytes_compl);
}
+ tx_queue->complete_packets += tx_queue->pkts_compl;
+ tx_queue->complete_bytes += tx_queue->bytes_compl;
}
/* Receive any packets we queued up */
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
index 46b702648721..547cf94014a3 100644
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -43,6 +43,13 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel);
void efx_start_channels(struct efx_nic *efx);
void efx_stop_channels(struct efx_nic *efx);
+static inline u64 efx_get_queue_stat_rx_hw_drops(struct efx_channel *channel)
+{
+ return channel->n_rx_eth_crc_err + channel->n_rx_frm_trunc +
+ channel->n_rx_overlength + channel->n_rx_nodesc_trunc +
+ channel->n_rx_mport_bad;
+}
+
void efx_init_napi_channel(struct efx_channel *channel);
void efx_init_napi(struct efx_nic *efx);
void efx_fini_napi_channel(struct efx_channel *channel);
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 13cf647051af..c88ec3e24836 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -635,22 +635,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
return rc;
}
-/* Reinitialise the MAC to pick up new PHY settings, even if the port is
- * disabled.
- */
-int efx_reconfigure_port(struct efx_nic *efx)
-{
- int rc;
-
- EFX_ASSERT_RESET_SERIALISED(efx);
-
- mutex_lock(&efx->mac_lock);
- rc = __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- return rc;
-}
-
/**************************************************************************
*
* Device reset and suspend
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
index 2c54dac3e662..19a8ca530969 100644
--- a/drivers/net/ethernet/sfc/efx_common.h
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -40,7 +40,6 @@ void efx_destroy_reset_workqueue(void);
void efx_start_monitor(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx);
-int efx_reconfigure_port(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index bb1930818beb..83d715544f7f 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -263,6 +263,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.rxfh_per_ctx_key = true,
+ .cap_rss_rxnfc_adds = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 6ded44b86052..2d734496733f 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -75,7 +75,6 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
@@ -83,8 +82,8 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
- EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_overlength),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
@@ -396,7 +395,7 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
return n;
}
-static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 **strings)
{
size_t n_stats = 0;
struct efx_channel *channel;
@@ -404,24 +403,22 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
efx_for_each_channel(channel, efx) {
if (efx_channel_has_tx_queues(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EFX_MAX_TXQ_PER_CHANNEL);
+ if (!strings)
+ continue;
- strings += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(strings, "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_MAX_TXQ_PER_CHANNEL);
}
}
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "rx-%d.rx_packets",
+ channel->channel);
}
}
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
@@ -429,11 +426,11 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
n_stats++;
- if (strings) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-xdp-cpu-%hu.tx_packets", xdp);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "tx-xdp-cpu-%hu.tx_packets",
+ xdp);
}
}
@@ -465,15 +462,11 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ efx->type->describe_stats(efx, &strings);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (efx_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
- efx_ptp_describe_stats(efx, strings);
+ ethtool_puts(&strings, efx_sw_stat_desc[i].name);
+ efx_describe_per_queue_stats(efx, &strings);
+ efx_ptp_describe_stats(efx, &strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 8925745f1c17..b07f7e4e2877 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1886,14 +1886,6 @@ unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns;
}
-unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
-{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
- return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
-}
-
/* Set interrupt moderation parameters */
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h
index d3b4646545fa..52508f2c8cb2 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.h
+++ b/drivers/net/ethernet/sfc/falcon/efx.h
@@ -198,7 +198,6 @@ int ef4_try_recovery(struct ef4_nic *efx);
/* Global */
void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
-unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index f4db683b80f7..04766448a545 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -353,7 +353,7 @@ static int ef4_ethtool_fill_self_tests(struct ef4_nic *efx,
return n;
}
-static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
+static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 **strings)
{
size_t n_stats = 0;
struct ef4_channel *channel;
@@ -361,24 +361,22 @@ static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
ef4_for_each_channel(channel, efx) {
if (ef4_channel_has_tx_queues(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EF4_TXQ_TYPES);
+ if (!strings)
+ continue;
- strings += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(strings, "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EF4_TXQ_TYPES);
}
}
ef4_for_each_channel(channel, efx) {
if (ef4_channel_has_rx_queue(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "rx-%d.rx_packets",
+ channel->channel);
}
}
return n_stats;
@@ -409,14 +407,10 @@ static void ef4_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ efx->type->describe_stats(efx, &strings);
for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (ef4_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ ethtool_puts(&strings, ef4_sw_stat_desc[i].name);
+ ef4_describe_per_queue_stats(efx, &strings);
break;
case ETH_SS_TEST:
ef4_ethtool_fill_self_tests(efx, NULL, strings, NULL);
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 36114ce88034..4af56333ea49 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -2564,7 +2564,7 @@ static void falcon_remove_nic(struct ef4_nic *efx)
efx->nic_data = NULL;
}
-static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names)
+static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 **names)
{
return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
falcon_stat_mask, names);
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index c64623c2e80c..01017c41338e 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -1631,28 +1631,6 @@ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
}
}
-/* Looks at available SRAM resources and works out how many queues we
- * can support, and where things like descriptor caches should live.
- *
- * SRAM is split up as follows:
- * 0 buftbl entries for channels
- * efx->vf_buftbl_base buftbl entries for SR-IOV
- * efx->rx_dc_base RX descriptor caches
- * efx->tx_dc_base TX descriptor caches
- */
-void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
-{
- unsigned vi_count;
-
- /* Account for the buffer table entries backing the datapath channels
- * and the descriptor caches for those channels.
- */
- vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
-
- efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
- efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
-}
-
u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
{
ef4_oword_t altera_build;
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index a2c7139f2b32..7ab0db44720d 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -1057,7 +1057,7 @@ struct ef4_nic_type {
void (*finish_flush)(struct ef4_nic *efx);
void (*prepare_flr)(struct ef4_nic *efx);
void (*finish_flr)(struct ef4_nic *efx);
- size_t (*describe_stats)(struct ef4_nic *efx, u8 *names);
+ size_t (*describe_stats)(struct ef4_nic *efx, u8 **names);
size_t (*update_stats)(struct ef4_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct ef4_nic *efx);
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index 78c851b5a56f..a6304686bc90 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -444,18 +444,15 @@ void ef4_nic_get_regs(struct ef4_nic *efx, void *buf)
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names)
+ const unsigned long *mask, u8 **names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
- if (names) {
- strscpy(names, desc[index].name,
- ETH_GSTRING_LEN);
- names += ETH_GSTRING_LEN;
- }
+ if (names)
+ ethtool_puts(names, desc[index].name);
++visible;
}
}
@@ -511,14 +508,3 @@ void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
}
}
}
-
-void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops)
-{
- /* if down, or this is the first update after coming up */
- if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
- efx->rx_nodesc_drops_while_down +=
- *rx_nodesc_drops - efx->rx_nodesc_drops_total;
- efx->rx_nodesc_drops_total = *rx_nodesc_drops;
- efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
- *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
-}
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
index ada6e036fd97..bc6ef937d0fe 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.h
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -477,7 +477,6 @@ void ef4_farch_finish_flr(struct ef4_nic *efx);
void falcon_start_nic_stats(struct ef4_nic *efx);
void falcon_stop_nic_stats(struct ef4_nic *efx);
int falcon_reset_xaui(struct ef4_nic *efx);
-void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw);
void ef4_farch_init_common(struct ef4_nic *efx);
void ef4_farch_rx_push_indir_table(struct ef4_nic *efx);
@@ -498,14 +497,10 @@ size_t ef4_nic_get_regs_len(struct ef4_nic *efx);
void ef4_nic_get_regs(struct ef4_nic *efx, void *buf);
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
+ const unsigned long *mask, u8 **names);
void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
-void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat);
-
-#define EF4_MAX_FLUSH_TIME 5000
-
void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
ef4_qword_t *event);
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index b9369483758c..e6e80b039ca2 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -40,14 +40,6 @@ static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset;
}
-u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
- struct ef4_tx_buffer *buffer, size_t len)
-{
- if (len > EF4_TX_CB_SIZE)
- return NULL;
- return ef4_tx_get_copy_buffer(tx_queue, buffer);
-}
-
static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer,
unsigned int *pkts_compl,
diff --git a/drivers/net/ethernet/sfc/falcon/tx.h b/drivers/net/ethernet/sfc/falcon/tx.h
index 2a88c59cbbbe..868ed8a861ab 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.h
+++ b/drivers/net/ethernet/sfc/falcon/tx.h
@@ -15,9 +15,6 @@
unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
-u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
- struct ef4_tx_buffer *buffer, size_t len);
-
int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 10709d828a63..50f097487b14 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -76,17 +76,6 @@ void efx_mae_mport_uplink(struct efx_nic *efx __always_unused, u32 *out)
*out = EFX_DWORD_VAL(mport);
}
-void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
-{
- efx_dword_t mport;
-
- EFX_POPULATE_DWORD_3(mport,
- MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
- MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
- MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id);
- *out = EFX_DWORD_VAL(mport);
-}
-
/* Constructs an mport selector from an mport ID, because they're not the same */
void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32 *out)
{
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 8df30bc4f3ba..db79912c86d8 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -23,7 +23,6 @@ int efx_mae_free_mport(struct efx_nic *efx, u32 id);
void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
-void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 76578502226e..d461b1a6ce81 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1051,15 +1051,6 @@ efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
cookie, false);
}
-int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen,
- size_t outlen, efx_mcdi_async_completer *complete,
- unsigned long cookie)
-{
- return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
- cookie, true);
-}
-
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
@@ -1068,14 +1059,6 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
outlen_actual, false, NULL, NULL);
}
-int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
- efx_dword_t *outbuf, size_t outlen,
- size_t *outlen_actual)
-{
- return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
- outlen_actual, true, NULL, NULL);
-}
-
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
size_t inlen, efx_dword_t *outbuf,
size_t outlen, int rc)
@@ -1982,33 +1965,6 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
}
-int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
- size_t outlen;
- int rc;
-
- rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
- rc = -EIO;
- goto fail;
- }
-
- *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
-
- return 0;
-
-fail:
- *id_out = -1;
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
@@ -2021,38 +1977,6 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
return rc;
}
-int efx_mcdi_flush_rxqs(struct efx_nic *efx)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
- int rc, count;
-
- BUILD_BUG_ON(EFX_MAX_CHANNELS >
- MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
-
- count = 0;
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (rx_queue->flush_pending) {
- rx_queue->flush_pending = false;
- atomic_dec(&efx->rxq_flush_pending);
- MCDI_SET_ARRAY_DWORD(
- inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
- count, efx_rx_queue_index(rx_queue));
- count++;
- }
- }
- }
-
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
- MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
- WARN_ON(rc < 0);
-
- return rc;
-}
-
int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
{
int rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index ea612c619874..cdb17d7c147f 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -155,9 +155,6 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
- size_t inlen, efx_dword_t *outbuf,
- size_t outlen, size_t *outlen_actual);
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
unsigned long cookie, int rc,
@@ -167,11 +164,6 @@ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete,
unsigned long cookie);
-int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
- const efx_dword_t *inbuf, size_t inlen,
- size_t outlen,
- efx_mcdi_async_completer *complete,
- unsigned long cookie);
void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
size_t inlen, efx_dword_t *outbuf,
@@ -410,10 +402,8 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx);
int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
int *id_out);
-int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-int efx_mcdi_flush_rxqs(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b85c51cbe7f9..620ba6ef3514 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -193,6 +193,12 @@ struct efx_tx_buffer {
* @initialised: Has hardware queue been initialised?
* @timestamping: Is timestamping enabled for this channel?
* @xdp_tx: Is this an XDP tx queue?
+ * @old_complete_packets: Value of @complete_packets as of last
+ * efx_init_tx_queue()
+ * @old_complete_bytes: Value of @complete_bytes as of last
+ * efx_init_tx_queue()
+ * @old_tso_bursts: Value of @tso_bursts as of last efx_init_tx_queue()
+ * @old_tso_packets: Value of @tso_packets as of last efx_init_tx_queue()
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
* @old_write_count: The value of @write_count when last checked.
@@ -202,6 +208,20 @@ struct efx_tx_buffer {
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @merge_events: Number of TX merged completion events
+ * @bytes_compl: Number of bytes completed during this NAPI poll
+ * (efx_process_channel()). For BQL.
+ * @pkts_compl: Number of packets completed during this NAPI poll.
+ * @complete_packets: Number of packets completed since this struct was
+ * created. Only counts SKB packets, not XDP TX (it accumulates
+ * the same values that are reported to BQL).
+ * @complete_bytes: Number of bytes completed since this struct was
+ * created. For TSO, counts the superframe size, not the sizes of
+ * generated frames on the wire (i.e. the headers are only counted
+ * once)
+ * @complete_xdp_packets: Number of XDP TX packets completed since this
+ * struct was created.
+ * @complete_xdp_bytes: Number of XDP TX bytes completed since this
+ * struct was created.
* @completed_timestamp_major: Top part of the most recent tx timestamp.
* @completed_timestamp_minor: Low part of the most recent tx timestamp.
* @insert_count: Current insert pointer
@@ -232,6 +252,7 @@ struct efx_tx_buffer {
* @xmit_pending: Are any packets waiting to be pushed to the NIC
* @cb_packets: Number of times the TX copybreak feature has been used
* @notify_count: Count of notified descriptors to the NIC
+ * @tx_packets: Number of packets sent since this struct was created
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -255,6 +276,10 @@ struct efx_tx_queue {
bool initialised;
bool timestamping;
bool xdp_tx;
+ unsigned long old_complete_packets;
+ unsigned long old_complete_bytes;
+ unsigned int old_tso_bursts;
+ unsigned int old_tso_packets;
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
@@ -262,6 +287,10 @@ struct efx_tx_queue {
unsigned int merge_events;
unsigned int bytes_compl;
unsigned int pkts_compl;
+ unsigned long complete_packets;
+ unsigned long complete_bytes;
+ unsigned long complete_xdp_packets;
+ unsigned long complete_xdp_bytes;
u32 completed_timestamp_major;
u32 completed_timestamp_minor;
@@ -370,6 +399,10 @@ struct efx_rx_page_state {
* @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
* @grant_work: workitem used to grant credits to the MAE if @grant_credits
+ * @rx_packets: Number of packets received since this struct was created
+ * @rx_bytes: Number of bytes received since this struct was created
+ * @old_rx_packets: Value of @rx_packets as of last efx_init_rx_queue()
+ * @old_rx_bytes: Value of @rx_bytes as of last efx_init_rx_queue()
* @xdp_rxq_info: XDP specific RX queue information.
* @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
*/
@@ -406,6 +439,9 @@ struct efx_rx_queue {
struct work_struct grant_work;
/* Statistics to supplement MAC stats */
unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long old_rx_packets;
+ unsigned long old_rx_bytes;
struct xdp_rxq_info xdp_rxq_info;
bool xdp_rxq_info_valid;
};
@@ -451,10 +487,8 @@ enum efx_sync_events_state {
* @filter_work: Work item for efx_filter_rfs_expire()
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
- * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
- * @n_rx_mcast_mismatch: Count of unmatched multicast frames
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
@@ -468,6 +502,10 @@ enum efx_sync_events_state {
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
* @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
* not recognised
+ * @old_n_rx_hw_drops: Count of all RX packets dropped for any reason as of last
+ * efx_start_channels()
+ * @old_n_rx_hw_drop_overruns: Value of @n_rx_nodesc_trunc as of last
+ * efx_start_channels()
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -511,7 +549,6 @@ struct efx_channel {
u32 *rps_flow_id;
#endif
- unsigned int n_rx_tobe_disc;
unsigned int n_rx_ip_hdr_chksum_err;
unsigned int n_rx_tcp_udp_chksum_err;
unsigned int n_rx_outer_ip_hdr_chksum_err;
@@ -519,7 +556,6 @@ struct efx_channel {
unsigned int n_rx_inner_ip_hdr_chksum_err;
unsigned int n_rx_inner_tcp_udp_chksum_err;
unsigned int n_rx_eth_crc_err;
- unsigned int n_rx_mcast_mismatch;
unsigned int n_rx_frm_trunc;
unsigned int n_rx_overlength;
unsigned int n_skbuff_leaks;
@@ -532,6 +568,9 @@ struct efx_channel {
unsigned int n_rx_xdp_redirect;
unsigned int n_rx_mport_bad;
+ unsigned int old_n_rx_hw_drops;
+ unsigned int old_n_rx_hw_drop_overruns;
+
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -1369,7 +1408,7 @@ struct efx_nic_type {
int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flr)(struct efx_nic *efx);
void (*finish_flr)(struct efx_nic *efx);
- size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 **names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index a33ed473cc8a..80aa5e9c732a 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -299,18 +299,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names)
+ const unsigned long *mask, u8 **names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
- if (names) {
- strscpy(names, desc[index].name,
- ETH_GSTRING_LEN);
- names += ETH_GSTRING_LEN;
- }
+ if (names)
+ ethtool_puts(names, desc[index].name);
++visible;
}
}
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 7ec4ac7b7ff5..821d91efda19 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -241,7 +241,7 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
+ const unsigned long *mask, u8 **names);
int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest);
void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index aaacdcfa54ae..4c7222bf26be 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -399,7 +399,7 @@ static const unsigned long efx_ptp_stat_mask[] = {
[0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
};
-size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 **strings)
{
if (!efx->ptp_data)
return 0;
@@ -1800,11 +1800,6 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-int efx_ptp_get_mode(struct efx_nic *efx)
-{
- return efx->ptp_data->mode;
-}
-
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode)
{
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 6946203499ef..cb9b077921e8 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -26,12 +26,11 @@ int efx_ptp_get_ts_config(struct efx_nic *efx,
void efx_ptp_get_ts_info(struct efx_nic *efx,
struct kernel_ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-int efx_ptp_get_mode(struct efx_nic *efx);
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 **strings);
size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index f77a2d3ef37e..ffca82207e47 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -125,8 +125,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf;
- rx_queue->rx_packets++;
-
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->flags |= flags;
@@ -394,6 +392,9 @@ void __efx_rx_packet(struct efx_channel *channel)
goto out;
}
+ rx_queue->rx_packets++;
+ rx_queue->rx_bytes += rx_buf->len;
+
if (!efx_do_xdp(efx, channel, rx_buf, &eh))
goto out;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 0b7dc75c40f9..ab358fe13e1d 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -241,6 +241,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->page_recycle_failed = 0;
rx_queue->page_recycle_full = 0;
+ rx_queue->old_rx_packets = rx_queue->rx_packets;
+ rx_queue->old_rx_bytes = rx_queue->rx_bytes;
+
/* Initialise limit fields */
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
max_trigger =
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index 075fef64de68..eeee676fdca7 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -395,7 +395,7 @@ fail:
test->flags |= ETH_TEST_FL_FAILED;
}
-static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 **strings)
{
size_t n_stats = 0;
struct efx_channel *channel;
@@ -403,24 +403,22 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
efx_for_each_channel(channel, efx) {
if (efx_channel_has_tx_queues(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-%u.tx_packets",
- channel->tx_queue[0].queue /
- EFX_MAX_TXQ_PER_CHANNEL);
+ if (!strings)
+ continue;
- strings += ETH_GSTRING_LEN;
- }
+ ethtool_sprintf(strings, "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_MAX_TXQ_PER_CHANNEL);
}
}
efx_for_each_channel(channel, efx) {
if (efx_channel_has_rx_queue(channel)) {
n_stats++;
- if (strings != NULL) {
- snprintf(strings, ETH_GSTRING_LEN,
- "rx-%d.rx_packets", channel->channel);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "rx-%d.rx_packets",
+ channel->channel);
}
}
if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
@@ -428,11 +426,11 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
n_stats++;
- if (strings) {
- snprintf(strings, ETH_GSTRING_LEN,
- "tx-xdp-cpu-%hu.tx_packets", xdp);
- strings += ETH_GSTRING_LEN;
- }
+ if (!strings)
+ continue;
+
+ ethtool_sprintf(strings, "tx-xdp-cpu-%hu.tx_packets",
+ xdp);
}
}
@@ -464,15 +462,11 @@ void efx_siena_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
- strings += (efx->type->describe_stats(efx, strings) *
- ETH_GSTRING_LEN);
+ efx->type->describe_stats(efx, &strings);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strscpy(strings + i * ETH_GSTRING_LEN,
- efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
- strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
- strings += (efx_describe_per_queue_stats(efx, strings) *
- ETH_GSTRING_LEN);
- efx_siena_ptp_describe_stats(efx, strings);
+ ethtool_puts(&strings, efx_sw_stat_desc[i].name);
+ efx_describe_per_queue_stats(efx, &strings);
+ efx_siena_ptp_describe_stats(efx, &strings);
break;
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 3fa7c652ae9b..9785eff10607 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -1307,7 +1307,7 @@ struct efx_nic_type {
void (*finish_flush)(struct efx_nic *efx);
void (*prepare_flr)(struct efx_nic *efx);
void (*finish_flr)(struct efx_nic *efx);
- size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 **names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c
index 0ea0433a6230..32fce70085e3 100644
--- a/drivers/net/ethernet/sfc/siena/nic.c
+++ b/drivers/net/ethernet/sfc/siena/nic.c
@@ -449,20 +449,20 @@ void efx_siena_get_regs(struct efx_nic *efx, void *buf)
* Returns the number of visible statistics, i.e. the number of set
* bits in the first @count bits of @mask for which a name is defined.
*/
-size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names)
+size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc,
+ size_t count, const unsigned long *mask,
+ u8 **names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
- if (names) {
- strscpy(names, desc[index].name,
- ETH_GSTRING_LEN);
- names += ETH_GSTRING_LEN;
- }
++visible;
+ if (!names)
+ continue;
+
+ ethtool_puts(names, desc[index].name);
}
}
diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h
index 3af0405eeaa4..b7fbe198008d 100644
--- a/drivers/net/ethernet/sfc/siena/nic_common.h
+++ b/drivers/net/ethernet/sfc/siena/nic_common.h
@@ -239,8 +239,9 @@ void efx_siena_get_regs(struct efx_nic *efx, void *buf);
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
- const unsigned long *mask, u8 *names);
+size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc,
+ size_t count, const unsigned long *mask,
+ u8 **names);
void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index 85005196b4c5..062c77c92077 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -393,7 +393,7 @@ static const unsigned long efx_ptp_stat_mask[] = {
[0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
};
-size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 **strings)
{
if (!efx->ptp_data)
return 0;
diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h
index b6133e7c5608..54840036ab67 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.h
+++ b/drivers/net/ethernet/sfc/siena/ptp.h
@@ -28,7 +28,7 @@ int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
unsigned int new_mode);
int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 **strings);
size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats);
void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel,
diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c
index ca33dc08e555..49f0c8a1a90a 100644
--- a/drivers/net/ethernet/sfc/siena/siena.c
+++ b/drivers/net/ethernet/sfc/siena/siena.c
@@ -545,7 +545,7 @@ static const unsigned long siena_stat_mask[] = {
[0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
};
-static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
+static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 **names)
{
return efx_siena_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
siena_stat_mask, names);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index fe2d476028e7..4dff19b6ef17 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -49,14 +49,6 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset;
}
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, size_t len)
-{
- if (len > EFX_TX_CB_SIZE)
- return NULL;
- return efx_tx_get_copy_buffer(tx_queue, buffer);
-}
-
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider all queues that the net core sees as one */
@@ -553,6 +545,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
{
+ unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
unsigned int pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
unsigned int read_ptr;
@@ -577,7 +570,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
if (buffer->flags & EFX_TX_BUF_SKB)
finished = true;
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl,
+ &xdp_bytes_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -585,6 +579,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
+ tx_queue->complete_xdp_packets += xdp_pkts_compl;
+ tx_queue->complete_xdp_bytes += xdp_bytes_compl;
EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
diff --git a/drivers/net/ethernet/sfc/tx.h b/drivers/net/ethernet/sfc/tx.h
index f2c4d2f89919..f882749af8c3 100644
--- a/drivers/net/ethernet/sfc/tx.h
+++ b/drivers/net/ethernet/sfc/tx.h
@@ -15,9 +15,6 @@
unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, size_t len);
-
/* What TXQ type will satisfy the checksum offloads required for this skb? */
static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 2adb132b2f7e..a22a0d634ffc 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -86,6 +86,11 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
+ tx_queue->old_complete_packets = tx_queue->complete_packets;
+ tx_queue->old_complete_bytes = tx_queue->complete_bytes;
+ tx_queue->old_tso_bursts = tx_queue->tso_bursts;
+ tx_queue->old_tso_packets = tx_queue->tso_packets;
+
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
tx_queue->tso_version = 0;
@@ -109,12 +114,14 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
unsigned int pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl,
+ &xdp_bytes_compl);
++tx_queue->read_count;
}
@@ -150,7 +157,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl,
- unsigned int *efv_pkts_compl)
+ unsigned int *efv_pkts_compl,
+ unsigned int *xdp_pkts,
+ unsigned int *xdp_bytes)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
@@ -195,6 +204,10 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
tx_queue->queue, tx_queue->read_count);
} else if (buffer->flags & EFX_TX_BUF_XDP) {
xdp_return_frame_rx_napi(buffer->xdpf);
+ if (xdp_pkts)
+ (*xdp_pkts)++;
+ if (xdp_bytes)
+ (*xdp_bytes) += buffer->xdpf->len;
}
buffer->len = 0;
@@ -210,7 +223,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
unsigned int *bytes_compl,
- unsigned int *efv_pkts_compl)
+ unsigned int *efv_pkts_compl,
+ unsigned int *xdp_pkts,
+ unsigned int *xdp_bytes)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -230,7 +245,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
}
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl,
- efv_pkts_compl);
+ efv_pkts_compl, xdp_pkts, xdp_bytes);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -253,15 +268,18 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+ unsigned int xdp_pkts_compl = 0, xdp_bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
struct efx_nic *efx = tx_queue->efx;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl, &xdp_bytes_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
+ tx_queue->complete_xdp_packets += xdp_pkts_compl;
+ tx_queue->complete_xdp_bytes += xdp_bytes_compl;
if (pkts_compl + efv_pkts_compl > 1)
++tx_queue->merge_events;
@@ -290,6 +308,8 @@ int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
+ unsigned int xdp_bytes_compl = 0;
+ unsigned int xdp_pkts_compl = 0;
unsigned int efv_pkts_compl = 0;
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
@@ -300,7 +320,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
- &efv_pkts_compl);
+ &efv_pkts_compl, &xdp_pkts_compl,
+ &xdp_bytes_compl);
}
}
diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h
index 1e9f42938aac..039eefafba23 100644
--- a/drivers/net/ethernet/sfc/tx_common.h
+++ b/drivers/net/ethernet/sfc/tx_common.h
@@ -20,7 +20,9 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl,
- unsigned int *efv_pkts_compl);
+ unsigned int *efv_pkts_compl,
+ unsigned int *xdp_pkts,
+ unsigned int *xdp_bytes);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 98d0b561a057..4535579018c9 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1273,7 +1273,7 @@ static void ioc3_set_multicast_list(struct net_device *dev)
static struct platform_driver ioc3eth_driver = {
.probe = ioc3eth_probe,
- .remove_new = ioc3eth_remove,
+ .remove = ioc3eth_remove,
.driver = {
.name = "ioc3-eth",
}
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 18b6f93d875e..f7c3a5a766b7 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -864,7 +864,7 @@ static void meth_remove(struct platform_device *pdev)
static struct platform_driver meth_driver = {
.probe = meth_probe,
- .remove_new = meth_remove,
+ .remove = meth_remove,
.driver = {
.name = "meth",
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index a5e23e2da90f..9d1a83a5fa7e 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2475,7 +2475,7 @@ static const struct dev_pm_ops smc_drv_pm_ops = {
static struct platform_driver smc_driver = {
.probe = smc_drv_probe,
- .remove_new = smc_drv_remove,
+ .remove = smc_drv_remove,
.driver = {
.name = CARDNAME,
.pm = &smc_drv_pm_ops,
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 74f1ccc96459..f539813878f5 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2667,7 +2667,7 @@ MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
- .remove_new = smsc911x_drv_remove,
+ .remove = smsc911x_drv_remove,
.driver = {
.name = SMSC_CHIPNAME,
.pm = SMSC911X_PM_OPS,
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 5ab8b81b84e6..dc99821c6226 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -2211,7 +2211,7 @@ MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
static struct platform_driver netsec_driver = {
.probe = netsec_probe,
- .remove_new = netsec_remove,
+ .remove = netsec_remove,
.driver = {
.name = "netsec",
.pm = &netsec_pm_ops,
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index eed24e67c5a6..66b3549636f8 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1974,7 +1974,7 @@ MODULE_DEVICE_TABLE(of, of_ave_match);
static struct platform_driver ave_driver = {
.probe = ave_probe,
- .remove_new = ave_remove,
+ .remove = ave_remove,
.driver = {
.name = "ave",
.pm = AVE_PM_OPS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 05cc07b8f48c..6658536a4e17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -228,6 +228,16 @@ config DWMAC_SUN8I
stmmac device driver. This driver is used for H3/A83T/A64
EMAC ethernet controller.
+config DWMAC_THEAD
+ tristate "T-HEAD dwmac support"
+ depends on OF && (ARCH_THEAD || COMPILE_TEST)
+ help
+ Support for ethernet controllers on T-HEAD RISC-V SoCs
+
+ This selects the T-HEAD platform specific glue layer support for
+ the stmmac device driver. This driver is used for T-HEAD TH1520
+ ethernet controller.
+
config DWMAC_IMX8
tristate "NXP IMX8 DWMAC support"
default ARCH_MXC
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c2f0e91f6bf8..2389fd261344 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
- stmmac_xdp.o stmmac_est.o \
+ stmmac_xdp.o stmmac_est.o stmmac_fpe.o \
$(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
@@ -28,6 +28,7 @@ obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
+obj-$(CONFIG_DWMAC_THEAD) += dwmac-thead.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
obj-$(CONFIG_DWMAC_LOONGSON1) += dwmac-loongson1.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 684489156dce..1367fa5c9b8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -549,8 +549,12 @@ extern const struct stmmac_desc_ops ndesc_ops;
struct mac_device_info;
extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_hwtimestamp dwmac1000_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+extern const struct ptp_clock_info stmmac_ptp_clock_ops;
+extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
+
struct mac_link {
u32 caps;
u32 speed_mask;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 643ee6d8d4dd..ef99ef3f1ab4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -135,7 +135,7 @@ MODULE_DEVICE_TABLE(of, anarion_dwmac_match);
static struct platform_driver anarion_dwmac_driver = {
.probe = anarion_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "anarion-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index ec924c6c76c6..83290e707df5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -479,7 +479,7 @@ MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
static struct platform_driver dwc_eth_dwmac_driver = {
.probe = dwc_eth_dwmac_probe,
- .remove_new = dwc_eth_dwmac_remove,
+ .remove = dwc_eth_dwmac_remove,
.driver = {
.name = "dwc-eth-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index 598eff926815..b9218c07eb6b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -56,6 +56,7 @@ static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},
{ .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac-3.72a"},
{ .compatible = "snps,dwmac-4.00"},
{ .compatible = "snps,dwmac-4.10a"},
{ .compatible = "snps,dwmac"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 6b65420e11b5..641f3cd019a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, imx_dwmac_match);
static struct platform_driver imx_dwmac_driver = {
.probe = imx_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "imx-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 19c93b998fb3..066783d66422 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -370,7 +370,7 @@ MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches);
static struct platform_driver ingenic_mac_driver = {
.probe = ingenic_mac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "ingenic-mac",
.pm = pm_ptr(&ingenic_mac_pm_ops),
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index 9739bc9867c5..d94f0a150e93 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -97,35 +97,38 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
dwmac->dev = &pdev->dev;
dwmac->tx_clk = NULL;
+ /*
+ * This cannot return NULL at this point because the driver’s
+ * compatibility with the device has already been validated in
+ * platform_match().
+ */
dwmac->data = device_get_match_data(&pdev->dev);
- if (dwmac->data) {
- if (dwmac->data->fix_mac_speed)
- plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
-
- /* Enable TX clock */
- if (dwmac->data->tx_clk_en) {
- dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- if (IS_ERR(dwmac->tx_clk))
- return PTR_ERR(dwmac->tx_clk);
+ if (dwmac->data->fix_mac_speed)
+ plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
+
+ /* Enable TX clock */
+ if (dwmac->data->tx_clk_en) {
+ dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ if (IS_ERR(dwmac->tx_clk))
+ return PTR_ERR(dwmac->tx_clk);
+
+ ret = clk_prepare_enable(dwmac->tx_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable tx_clk\n");
+ return ret;
+ }
- ret = clk_prepare_enable(dwmac->tx_clk);
+ /* Check and configure TX clock rate */
+ rate = clk_get_rate(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_rate &&
+ rate != dwmac->data->tx_clk_rate) {
+ rate = dwmac->data->tx_clk_rate;
+ ret = clk_set_rate(dwmac->tx_clk, rate);
if (ret) {
dev_err(&pdev->dev,
- "Failed to enable tx_clk\n");
- return ret;
- }
-
- /* Check and configure TX clock rate */
- rate = clk_get_rate(dwmac->tx_clk);
- if (dwmac->data->tx_clk_rate &&
- rate != dwmac->data->tx_clk_rate) {
- rate = dwmac->data->tx_clk_rate;
- ret = clk_set_rate(dwmac->tx_clk, rate);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to set tx_clk\n");
- goto err_tx_clk_disable;
- }
+ "Failed to set tx_clk\n");
+ goto err_tx_clk_disable;
}
}
@@ -176,7 +179,7 @@ static void intel_eth_plat_remove(struct platform_device *pdev)
static struct platform_driver intel_eth_plat_driver = {
.probe = intel_eth_plat_probe,
- .remove_new = intel_eth_plat_remove,
+ .remove = intel_eth_plat_remove,
.driver = {
.name = "intel-eth-plat",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 83ad7c7935e3..48acba5eb178 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -451,7 +451,7 @@ static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
* should always be an XPCS. The original code would always
* return this if present.
*/
- return &priv->hw->xpcs->pcs;
+ return xpcs_to_phylink_pcs(priv->hw->xpcs);
}
static int intel_mgbe_common_data(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 4ba15873d5b1..61227dcf56dc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -499,7 +499,7 @@ MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
static struct platform_driver ipq806x_gmac_dwmac_driver = {
.probe = ipq806x_gmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "ipq806x-gmac-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 4c810d8f5bea..22653ffd2a04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -72,7 +72,7 @@ MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
static struct platform_driver lpc18xx_dwmac_driver = {
.probe = lpc18xx_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "lpc18xx-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 001857c294fb..c9636832a570 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -699,7 +699,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
static struct platform_driver mediatek_dwmac_driver = {
.probe = mediatek_dwmac_probe,
- .remove_new = mediatek_dwmac_remove,
+ .remove = mediatek_dwmac_remove,
.driver = {
.name = "dwmac-mediatek",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index a16bfa9089ea..5469fa1b429e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -78,7 +78,7 @@ MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
static struct platform_driver meson6_dwmac_driver = {
.probe = meson6_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "meson6-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index b23944aa344e..9c2d62d133ad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -520,7 +520,7 @@ MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
static struct platform_driver meson8b_dwmac_driver = {
.probe = meson8b_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "meson8b-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 50073bdade46..8cb374668b74 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -2073,7 +2073,7 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
static struct platform_driver rk_gmac_dwmac_driver = {
.probe = rk_gmac_probe,
- .remove_new = rk_gmac_remove,
+ .remove = rk_gmac_remove,
.driver = {
.name = "rk_gmac-dwmac",
.pm = &rk_gmac_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
index 59a7bd560f96..13634965bc19 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
@@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(of, rzn1_dwmac_match);
static struct platform_driver rzn1_dwmac_driver = {
.probe = rzn1_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "rzn1-dwmac",
.of_match_table = rzn1_dwmac_match,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index fdb4c773ec98..248b30d7b864 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -485,6 +485,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
plat_dat->pcs_init = socfpga_dwmac_pcs_init;
plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
plat_dat->select_pcs = socfpga_dwmac_select_pcs;
+ plat_dat->has_gmac = true;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -582,7 +583,7 @@ MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
.probe = socfpga_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
.pm = &socfpga_dwmac_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 4e1076faee0c..421666279dd3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -176,7 +176,7 @@ MODULE_DEVICE_TABLE(of, starfive_dwmac_match);
static struct platform_driver starfive_dwmac_driver = {
.probe = starfive_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "starfive-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 4445cddc4cbe..a6ff02d905a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -358,7 +358,7 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
.probe = sti_dwmac_probe,
- .remove_new = sti_dwmac_remove,
+ .remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
.pm = &sti_dwmac_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index c1732955a697..1e8bac665cc9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -675,7 +675,7 @@ MODULE_DEVICE_TABLE(of, stm32_dwmac_match);
static struct platform_driver stm32_dwmac_driver = {
.probe = stm32_dwmac_probe,
- .remove_new = stm32_dwmac_remove,
+ .remove = stm32_dwmac_remove,
.driver = {
.name = "stm32-dwmac",
.pm = &stm32_dwmac_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4a0ae92b3055..4b7b2582a120 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1343,7 +1343,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
- .remove_new = sun8i_dwmac_remove,
+ .remove = sun8i_dwmac_remove,
.shutdown = sun8i_dwmac_shutdown,
.driver = {
.name = "dwmac-sun8i",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 2653a9f0958c..9ae318436c4a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -172,7 +172,7 @@ MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
static struct platform_driver sun7i_dwmac_driver = {
.probe = sun7i_gmac_probe,
- .remove_new = stmmac_pltfr_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "sun7i-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index 6fdd94c8919e..3827997d2132 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -381,7 +381,7 @@ static SIMPLE_DEV_PM_OPS(tegra_mgbe_pm_ops, tegra_mgbe_suspend, tegra_mgbe_resum
static struct platform_driver tegra_mgbe_driver = {
.probe = tegra_mgbe_probe,
- .remove_new = tegra_mgbe_remove,
+ .remove = tegra_mgbe_remove,
.driver = {
.name = "tegra-mgbe",
.pm = &tegra_mgbe_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
new file mode 100644
index 000000000000..dce84ed184e9
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * T-HEAD DWMAC platform driver
+ *
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ * Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org>
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+
+#define GMAC_CLK_EN 0x00
+#define GMAC_TX_CLK_EN BIT(1)
+#define GMAC_TX_CLK_N_EN BIT(2)
+#define GMAC_TX_CLK_OUT_EN BIT(3)
+#define GMAC_RX_CLK_EN BIT(4)
+#define GMAC_RX_CLK_N_EN BIT(5)
+#define GMAC_EPHY_REF_CLK_EN BIT(6)
+#define GMAC_RXCLK_DELAY_CTRL 0x04
+#define GMAC_RXCLK_BYPASS BIT(15)
+#define GMAC_RXCLK_INVERT BIT(14)
+#define GMAC_RXCLK_DELAY GENMASK(4, 0)
+#define GMAC_TXCLK_DELAY_CTRL 0x08
+#define GMAC_TXCLK_BYPASS BIT(15)
+#define GMAC_TXCLK_INVERT BIT(14)
+#define GMAC_TXCLK_DELAY GENMASK(4, 0)
+#define GMAC_PLLCLK_DIV 0x0c
+#define GMAC_PLLCLK_DIV_EN BIT(31)
+#define GMAC_PLLCLK_DIV_NUM GENMASK(7, 0)
+#define GMAC_GTXCLK_SEL 0x18
+#define GMAC_GTXCLK_SEL_PLL BIT(0)
+#define GMAC_INTF_CTRL 0x1c
+#define PHY_INTF_MASK BIT(0)
+#define PHY_INTF_RGMII FIELD_PREP(PHY_INTF_MASK, 1)
+#define PHY_INTF_MII_GMII FIELD_PREP(PHY_INTF_MASK, 0)
+#define GMAC_TXCLK_OEN 0x20
+#define TXCLK_DIR_MASK BIT(0)
+#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0)
+#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1)
+
+#define GMAC_GMII_RGMII_RATE 125000000
+#define GMAC_MII_RATE 25000000
+
+struct thead_dwmac {
+ struct plat_stmmacenet_data *plat;
+ void __iomem *apb_base;
+ struct device *dev;
+};
+
+static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat)
+{
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+ u32 phyif;
+
+ switch (plat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ phyif = PHY_INTF_MII_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ phyif = PHY_INTF_RGMII;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %d\n",
+ plat->mac_interface);
+ return -EINVAL;
+ }
+
+ writel(phyif, dwmac->apb_base + GMAC_INTF_CTRL);
+ return 0;
+}
+
+static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
+{
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+ u32 txclk_dir;
+
+ switch (plat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ txclk_dir = TXCLK_DIR_INPUT;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ txclk_dir = TXCLK_DIR_OUTPUT;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %d\n",
+ plat->mac_interface);
+ return -EINVAL;
+ }
+
+ writel(txclk_dir, dwmac->apb_base + GMAC_TXCLK_OEN);
+ return 0;
+}
+
+static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+{
+ struct plat_stmmacenet_data *plat;
+ struct thead_dwmac *dwmac = priv;
+ unsigned long rate;
+ u32 div, reg;
+
+ plat = dwmac->plat;
+
+ switch (plat->mac_interface) {
+ /* For MII, rxc/txc is provided by phy */
+ case PHY_INTERFACE_MODE_MII:
+ return;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rate = clk_get_rate(plat->stmmac_clk);
+ if (!rate || rate % GMAC_GMII_RGMII_RATE != 0 ||
+ rate % GMAC_MII_RATE != 0) {
+ dev_err(dwmac->dev, "invalid gmac rate %ld\n", rate);
+ return;
+ }
+
+ writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
+
+ switch (speed) {
+ case SPEED_1000:
+ div = rate / GMAC_GMII_RGMII_RATE;
+ break;
+ case SPEED_100:
+ div = rate / GMAC_MII_RATE;
+ break;
+ case SPEED_10:
+ div = rate * 10 / GMAC_MII_RATE;
+ break;
+ default:
+ dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ return;
+ }
+
+ reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
+ FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
+ writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %d\n",
+ plat->mac_interface);
+ return;
+ }
+}
+
+static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
+{
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+ u32 reg;
+
+ switch (plat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ reg = GMAC_RX_CLK_EN | GMAC_TX_CLK_EN;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* use pll */
+ writel(GMAC_GTXCLK_SEL_PLL, dwmac->apb_base + GMAC_GTXCLK_SEL);
+ reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN |
+ GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN;
+ break;
+
+ default:
+ dev_err(dwmac->dev, "unsupported phy interface %d\n",
+ plat->mac_interface);
+ return -EINVAL;
+ }
+
+ writel(reg, dwmac->apb_base + GMAC_CLK_EN);
+ return 0;
+}
+
+static int thead_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct thead_dwmac *dwmac = priv;
+ unsigned int reg;
+ int ret;
+
+ ret = thead_dwmac_set_phy_if(dwmac->plat);
+ if (ret)
+ return ret;
+
+ ret = thead_dwmac_set_txclk_dir(dwmac->plat);
+ if (ret)
+ return ret;
+
+ reg = readl(dwmac->apb_base + GMAC_RXCLK_DELAY_CTRL);
+ reg &= ~(GMAC_RXCLK_DELAY);
+ reg |= FIELD_PREP(GMAC_RXCLK_DELAY, 0);
+ writel(reg, dwmac->apb_base + GMAC_RXCLK_DELAY_CTRL);
+
+ reg = readl(dwmac->apb_base + GMAC_TXCLK_DELAY_CTRL);
+ reg &= ~(GMAC_TXCLK_DELAY);
+ reg |= FIELD_PREP(GMAC_TXCLK_DELAY, 0);
+ writel(reg, dwmac->apb_base + GMAC_TXCLK_DELAY_CTRL);
+
+ return thead_dwmac_enable_clk(dwmac->plat);
+}
+
+static int thead_dwmac_probe(struct platform_device *pdev)
+{
+ struct stmmac_resources stmmac_res;
+ struct plat_stmmacenet_data *plat;
+ struct thead_dwmac *dwmac;
+ void __iomem *apb;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get resources\n");
+
+ plat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat),
+ "dt configuration failed\n");
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ apb = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(apb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(apb),
+ "failed to remap gmac apb registers\n");
+
+ dwmac->dev = &pdev->dev;
+ dwmac->plat = plat;
+ dwmac->apb_base = apb;
+ plat->bsp_priv = dwmac;
+ plat->fix_mac_speed = thead_dwmac_fix_speed;
+ plat->init = thead_dwmac_init;
+
+ return devm_stmmac_pltfr_probe(pdev, plat, &stmmac_res);
+}
+
+static const struct of_device_id thead_dwmac_match[] = {
+ { .compatible = "thead,th1520-gmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, thead_dwmac_match);
+
+static struct platform_driver thead_dwmac_driver = {
+ .probe = thead_dwmac_probe,
+ .driver = {
+ .name = "thead-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = thead_dwmac_match,
+ },
+};
+module_platform_driver(thead_dwmac_driver);
+
+MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
+MODULE_AUTHOR("Drew Fustini <drew@pdp7.com>");
+MODULE_DESCRIPTION("T-HEAD DWMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index a5a5cfa989c6..eccf7f537467 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -268,7 +268,7 @@ MODULE_DEVICE_TABLE(of, visconti_eth_dwmac_match);
static struct platform_driver visconti_eth_dwmac_driver = {
.probe = visconti_eth_dwmac_probe,
- .remove_new = visconti_eth_dwmac_remove,
+ .remove = visconti_eth_dwmac_remove,
.driver = {
.name = "visconti-eth-dwmac",
.of_match_table = visconti_eth_dwmac_match,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 4296ddda8aaa..600fea8f712f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -329,5 +329,17 @@ enum rtc_control {
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
#define GMAC_EXTHASH_BASE 0x500
+/* PTP and timestamping registers */
+
+#define GMAC3_X_ATSNS GENMASK(19, 16)
+#define GMAC3_X_ATSNS_SHIFT 16
+
+#define GMAC_PTP_TCR_ATSFC BIT(24)
+#define GMAC_PTP_TCR_ATSEN0 BIT(25)
+
+#define GMAC3_X_TIMESTAMP_STATUS 0x28
+#define GMAC_PTP_ATNR 0x30
+#define GMAC_PTP_ATSR 0x34
+
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index d413d76a8936..96bcda0856ec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
+#include "stmmac_ptp.h"
#include "dwmac1000.h"
static void dwmac1000_core_init(struct mac_device_info *hw,
@@ -551,3 +552,103 @@ int dwmac1000_setup(struct stmmac_priv *priv)
return 0;
}
+
+/* DWMAC 1000 HW Timestaming ops */
+
+void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
+{
+ u64 ns;
+
+ ns = readl(ptpaddr + GMAC_PTP_ATNR);
+ ns += readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
+
+ *ptp_time = ns;
+}
+
+void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv)
+{
+ struct ptp_clock_event event;
+ u32 ts_status, num_snapshot;
+ unsigned long flags;
+ u64 ptp_time;
+ int i;
+
+ /* Clears the timestamp interrupt */
+ ts_status = readl(priv->ptpaddr + GMAC3_X_TIMESTAMP_STATUS);
+
+ if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN))
+ return;
+
+ num_snapshot = (ts_status & GMAC3_X_ATSNS) >> GMAC3_X_ATSNS_SHIFT;
+
+ for (i = 0; i < num_snapshot; i++) {
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_ptptime(priv, priv->ptpaddr, &ptp_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ptp_time;
+ ptp_clock_event(priv->ptp_clock, &event);
+ }
+}
+
+/* DWMAC 1000 ptp_clock_info ops */
+
+static void dwmac1000_timestamp_interrupt_cfg(struct stmmac_priv *priv, bool en)
+{
+ void __iomem *ioaddr = priv->ioaddr;
+
+ u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
+
+ if (en)
+ intr_mask &= ~GMAC_INT_DISABLE_TIMESTAMP;
+ else
+ intr_mask |= GMAC_INT_DISABLE_TIMESTAMP;
+
+ writel(intr_mask, ioaddr + GMAC_INT_MASK);
+}
+
+int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ void __iomem *ptpaddr = priv->ptpaddr;
+ int ret = -EOPNOTSUPP;
+ u32 tcr_val;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ mutex_lock(&priv->aux_ts_lock);
+ tcr_val = readl(ptpaddr + PTP_TCR);
+
+ if (on) {
+ tcr_val |= GMAC_PTP_TCR_ATSEN0;
+ tcr_val |= GMAC_PTP_TCR_ATSFC;
+ priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN;
+ } else {
+ tcr_val &= ~GMAC_PTP_TCR_ATSEN0;
+ priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN;
+ }
+
+ netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n",
+ on ? "enabled" : "disabled");
+ writel(tcr_val, ptpaddr + PTP_TCR);
+
+ /* wait for auxts fifo clear to finish */
+ ret = readl_poll_timeout(ptpaddr + PTP_TCR, tcr_val,
+ !(tcr_val & GMAC_PTP_TCR_ATSFC),
+ 10, 10000);
+
+ mutex_unlock(&priv->aux_ts_lock);
+
+ dwmac1000_timestamp_interrupt_cfg(priv, on);
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 93a78fd0737b..184d41a306af 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -44,6 +44,7 @@
#define GMAC_MDIO_DATA 0x00000204
#define GMAC_GPIO_STATUS 0x0000020C
#define GMAC_ARP_ADDR 0x00000210
+#define GMAC_EXT_CFG1 0x00000238
#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
#define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30)
@@ -68,7 +69,6 @@
#define GMAC_RXQCTRL_TACPQE BIT(21)
#define GMAC_RXQCTRL_TACPQE_SHIFT 21
#define GMAC_RXQCTRL_FPRQ GENMASK(26, 24)
-#define GMAC_RXQCTRL_FPRQ_SHIFT 24
/* MAC Packet Filtering */
#define GMAC_PACKET_FILTER_PR BIT(0)
@@ -284,6 +284,10 @@ enum power_event {
#define GMAC_HW_FEAT_DVLAN BIT(5)
#define GMAC_HW_FEAT_NRVF GENMASK(2, 0)
+/* MAC extended config 1 */
+#define GMAC_CONFIG1_SAVE_EN BIT(24)
+#define GMAC_CONFIG1_SPLM(v) FIELD_PREP(GENMASK(9, 8), v)
+
/* GMAC GPIO Status reg */
#define GMAC_GPO0 BIT(16)
#define GMAC_GPO1 BIT(17)
@@ -389,8 +393,8 @@ static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define MTL_OP_MODE_EHFC BIT(7)
-#define MTL_OP_MODE_RTC_MASK 0x18
-#define MTL_OP_MODE_RTC_SHIFT 3
+#define MTL_OP_MODE_RTC_MASK GENMASK(1, 0)
+#define MTL_OP_MODE_RTC_SHIFT 0
#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
#define MTL_OP_MODE_RTC_64 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index e65a65666cc1..c25781874aa7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include "stmmac.h"
+#include "stmmac_fpe.h"
#include "stmmac_pcs.h"
#include "dwmac4.h"
#include "dwmac5.h"
@@ -1261,11 +1262,6 @@ const struct stmmac_ops dwmac410_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .fpe_configure = dwmac5_fpe_configure,
- .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
- .fpe_irq_status = dwmac5_fpe_irq_status,
- .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
- .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
@@ -1316,11 +1312,6 @@ const struct stmmac_ops dwmac510_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .fpe_configure = dwmac5_fpe_configure,
- .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
- .fpe_irq_status = dwmac5_fpe_irq_status,
- .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
- .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index e99401bcc1f8..a5fb31eb0192 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -118,6 +118,8 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
x->ipv4_pkt_rcvd++;
if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++;
+ if (rdes1 & RDES1_IP_PAYLOAD_ERROR)
+ x->ip_payload_err++;
if (message_type == RDES_EXT_NO_PTP)
x->no_ptp_rx_msg_type_ext++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 6da070ccd737..1ce6f43d545a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -95,7 +95,7 @@
#define RDES1_IPV4_HEADER BIT(4)
#define RDES1_IPV6_HEADER BIT(5)
#define RDES1_IP_CSUM_BYPASSED BIT(6)
-#define RDES1_IP_CSUM_ERROR BIT(7)
+#define RDES1_IP_PAYLOAD_ERROR BIT(7)
#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8)
#define RDES1_PTP_PACKET_TYPE BIT(12)
#define RDES1_PTP_VER BIT(13)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 77b35abc6f6f..0cb84a0041a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -274,7 +274,7 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
mtl_rx_op &= ~MTL_OP_MODE_RSF;
- mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+ mtl_rx_op &= ~MTL_OP_MODE_RTC_MASK;
if (mode <= 32)
mtl_rx_op |= MTL_OP_MODE_RTC_32;
else if (mode <= 64)
@@ -343,7 +343,7 @@ static void dwmac4_dma_tx_chan_op_mode(struct stmmac_priv *priv,
} else {
pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
mtl_tx_op &= ~MTL_OP_MODE_TSF;
- mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
+ mtl_tx_op &= ~MTL_OP_MODE_TTC_MASK;
/* Set the transmit threshold */
if (mode <= 32)
mtl_tx_op |= MTL_OP_MODE_TTC_32;
@@ -534,6 +534,11 @@ static void dwmac4_enable_sph(struct stmmac_priv *priv, void __iomem *ioaddr,
value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
writel(value, ioaddr + GMAC_EXT_CONFIG);
+ value = readl(ioaddr + GMAC_EXT_CFG1);
+ value |= GMAC_CONFIG1_SPLM(1); /* Split mode set to L2OFST */
+ value |= GMAC_CONFIG1_SAVE_EN; /* Enable Split AV mode */
+ writel(value, ioaddr + GMAC_EXT_CFG1);
+
value = readl(ioaddr + DMA_CHAN_CONTROL(dwmac4_addrs, chan));
if (en)
value |= DMA_CONTROL_SPH;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 0d185e54eb7e..57c03d491774 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -185,8 +185,6 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
x->rx_buf_unav_irq++;
if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
x->rx_process_stopped_irq++;
- if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
- x->rx_watchdog_irq++;
if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
x->tx_early_irq++;
if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
@@ -198,6 +196,10 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
ret = tx_hard_error;
}
}
+
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
+ x->rx_watchdog_irq++;
+
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
u64_stats_update_begin(&stats->syncp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 08add508db84..1c431b918719 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -572,153 +572,3 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
-
-void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq, u32 num_rxq,
- bool tx_enable, bool pmac_enable)
-{
- u32 value;
-
- if (tx_enable) {
- cfg->fpe_csr = EFPE;
- value = readl(ioaddr + GMAC_RXQ_CTRL1);
- value &= ~GMAC_RXQCTRL_FPRQ;
- value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
- writel(value, ioaddr + GMAC_RXQ_CTRL1);
- } else {
- cfg->fpe_csr = 0;
- }
- writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
-
- value = readl(ioaddr + GMAC_INT_EN);
-
- if (pmac_enable) {
- if (!(value & GMAC_INT_FPE_EN)) {
- /* Dummy read to clear any pending masked interrupts */
- readl(ioaddr + MAC_FPE_CTRL_STS);
-
- value |= GMAC_INT_FPE_EN;
- }
- } else {
- value &= ~GMAC_INT_FPE_EN;
- }
-
- writel(value, ioaddr + GMAC_INT_EN);
-}
-
-int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
-{
- u32 value;
- int status;
-
- status = FPE_EVENT_UNKNOWN;
-
- /* Reads from the MAC_FPE_CTRL_STS register should only be performed
- * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
- */
- value = readl(ioaddr + MAC_FPE_CTRL_STS);
-
- if (value & TRSP) {
- status |= FPE_EVENT_TRSP;
- netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
- }
-
- if (value & TVER) {
- status |= FPE_EVENT_TVER;
- netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
- }
-
- if (value & RRSP) {
- status |= FPE_EVENT_RRSP;
- netdev_dbg(dev, "FPE: Respond mPacket is received\n");
- }
-
- if (value & RVER) {
- status |= FPE_EVENT_RVER;
- netdev_dbg(dev, "FPE: Verify mPacket is received\n");
- }
-
- return status;
-}
-
-void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- enum stmmac_mpacket_type type)
-{
- u32 value = cfg->fpe_csr;
-
- if (type == MPACKET_VERIFY)
- value |= SVER;
- else if (type == MPACKET_RESPONSE)
- value |= SRSP;
-
- writel(value, ioaddr + MAC_FPE_CTRL_STS);
-}
-
-int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr)
-{
- return FIELD_GET(DWMAC5_ADD_FRAG_SZ, readl(ioaddr + MTL_FPE_CTRL_STS));
-}
-
-void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size)
-{
- u32 value;
-
- value = readl(ioaddr + MTL_FPE_CTRL_STS);
- writel(u32_replace_bits(value, add_frag_size, DWMAC5_ADD_FRAG_SZ),
- ioaddr + MTL_FPE_CTRL_STS);
-}
-
-#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
-#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
-
-int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
- struct netlink_ext_ack *extack, u32 pclass)
-{
- u32 val, offset, count, queue_weight, preemptible_txqs = 0;
- struct stmmac_priv *priv = netdev_priv(ndev);
- u32 num_tc = ndev->num_tc;
-
- if (!pclass)
- goto update_mapping;
-
- /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
- *
- * Synopsys Databook:
- * "The number of Tx DMA channels is equal to the number of Tx queues,
- * and is direct one-to-one mapping."
- */
- for (u32 tc = 0; tc < num_tc; tc++) {
- count = ndev->tc_to_txq[tc].count;
- offset = ndev->tc_to_txq[tc].offset;
-
- if (pclass & BIT(tc))
- preemptible_txqs |= GENMASK(offset + count - 1, offset);
-
- /* This is 1:1 mapping, go to next TC */
- if (count == 1)
- continue;
-
- if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
- NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
- return -EINVAL;
- }
-
- queue_weight = priv->plat->tx_queues_cfg[offset].weight;
-
- for (u32 i = 1; i < count; i++) {
- if (priv->plat->tx_queues_cfg[offset + i].weight !=
- queue_weight) {
- NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
- queue_weight, tc);
- return -EINVAL;
- }
- }
- }
-
-update_mapping:
- val = readl(priv->ioaddr + MTL_FPE_CTRL_STS);
- writel(u32_replace_bits(val, preemptible_txqs, DWMAC5_PREEMPTION_CLASS),
- priv->ioaddr + MTL_FPE_CTRL_STS);
-
- return 0;
-}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 6c6eb6790e83..00b151b3b688 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -11,15 +11,6 @@
#define PRTYEN BIT(1)
#define TMOUTEN BIT(0)
-#define MAC_FPE_CTRL_STS 0x00000234
-#define TRSP BIT(19)
-#define TVER BIT(18)
-#define RRSP BIT(17)
-#define RVER BIT(16)
-#define SRSP BIT(2)
-#define SVER BIT(1)
-#define EFPE BIT(0)
-
#define MAC_PPS_CONTROL 0x00000b70
#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
#define PPS_MINIDX(x) ((x) * 8)
@@ -39,12 +30,6 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
-#define MTL_FPE_CTRL_STS 0x00000c90
-/* Preemption Classification */
-#define DWMAC5_PREEMPTION_CLASS GENMASK(15, 8)
-/* Additional Fragment Size of preempted frames */
-#define DWMAC5_ADD_FRAG_SZ GENMASK(1, 0)
-
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -108,16 +93,5 @@ int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
-void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq, u32 num_rxq,
- bool tx_enable, bool pmac_enable);
-void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
- struct stmmac_fpe_cfg *cfg,
- enum stmmac_mpacket_type type);
-int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
-int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr);
-void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size);
-int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
- struct netlink_ext_ack *extack, u32 pclass);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 6a2c7d22df1e..a04a79003692 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -84,8 +84,7 @@
#define XGMAC_MCBCQEN BIT(15)
#define XGMAC_MCBCQ GENMASK(11, 8)
#define XGMAC_MCBCQ_SHIFT 8
-#define XGMAC_RQ GENMASK(7, 4)
-#define XGMAC_RQ_SHIFT 4
+#define XGMAC_FPRQ GENMASK(7, 4)
#define XGMAC_UPQ GENMASK(3, 0)
#define XGMAC_UPQ_SHIFT 0
#define XGMAC_RXQ_CTRL2 0x000000a8
@@ -96,6 +95,7 @@
#define XGMAC_LPIIS BIT(5)
#define XGMAC_PMTIS BIT(4)
#define XGMAC_INT_EN 0x000000b4
+#define XGMAC_FPEIE BIT(15)
#define XGMAC_TSIE BIT(12)
#define XGMAC_LPIIE BIT(5)
#define XGMAC_PMTIE BIT(4)
@@ -193,8 +193,6 @@
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
-#define XGMAC_FPE_CTRL_STS 0x00000280
-#define XGMAC_EFPE BIT(0)
#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index f519d43738b0..9a60a6e8f633 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -8,6 +8,7 @@
#include <linux/crc32.h>
#include <linux/iopoll.h>
#include "stmmac.h"
+#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "dwxlgmac2.h"
#include "dwxgmac2.h"
@@ -1504,32 +1505,6 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
- struct stmmac_fpe_cfg *cfg,
- u32 num_txq, u32 num_rxq,
- bool tx_enable, bool pmac_enable)
-{
- u32 value;
-
- if (!tx_enable) {
- value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
-
- value &= ~XGMAC_EFPE;
-
- writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
- return;
- }
-
- value = readl(ioaddr + XGMAC_RXQ_CTRL1);
- value &= ~XGMAC_RQ;
- value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
- writel(value, ioaddr + XGMAC_RXQ_CTRL1);
-
- value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
- value |= XGMAC_EFPE;
- writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
-}
-
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
@@ -1570,7 +1545,7 @@ const struct stmmac_ops dwxgmac210_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .fpe_configure = dwxgmac3_fpe_configure,
+ .fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
};
static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
@@ -1627,7 +1602,7 @@ const struct stmmac_ops dwxlgmac2_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .fpe_configure = dwxgmac3_fpe_configure,
+ .fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 88cce28b2f98..a72d336a8350 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -6,6 +6,7 @@
#include "common.h"
#include "stmmac.h"
+#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "stmmac_est.h"
@@ -112,6 +113,7 @@ static const struct stmmac_hwif_entry {
const void *dma;
const void *mac;
const void *hwtimestamp;
+ const void *ptp;
const void *mode;
const void *tc;
const void *mmc;
@@ -132,7 +134,8 @@ static const struct stmmac_hwif_entry {
.desc = NULL,
.dma = &dwmac100_dma_ops,
.mac = &dwmac100_ops,
- .hwtimestamp = &stmmac_ptp,
+ .hwtimestamp = &dwmac1000_ptp,
+ .ptp = &dwmac1000_ptp_clock_ops,
.mode = NULL,
.tc = NULL,
.mmc = &dwmac_mmc_ops,
@@ -150,7 +153,8 @@ static const struct stmmac_hwif_entry {
.desc = NULL,
.dma = &dwmac1000_dma_ops,
.mac = &dwmac1000_ops,
- .hwtimestamp = &stmmac_ptp,
+ .hwtimestamp = &dwmac1000_ptp,
+ .ptp = &dwmac1000_ptp_clock_ops,
.mode = NULL,
.tc = NULL,
.mmc = &dwmac_mmc_ops,
@@ -170,6 +174,7 @@ static const struct stmmac_hwif_entry {
.dma = &dwmac4_dma_ops,
.mac = &dwmac4_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
.tc = &dwmac4_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -185,11 +190,13 @@ static const struct stmmac_hwif_entry {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
.est_off = EST_GMAC4_OFFSET,
+ .fpe_reg = &dwmac5_fpe_reg,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac410_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -205,11 +212,13 @@ static const struct stmmac_hwif_entry {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
.est_off = EST_GMAC4_OFFSET,
+ .fpe_reg = &dwmac5_fpe_reg,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac410_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -225,11 +234,13 @@ static const struct stmmac_hwif_entry {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
.est_off = EST_GMAC4_OFFSET,
+ .fpe_reg = &dwmac5_fpe_reg,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac510_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
@@ -246,11 +257,13 @@ static const struct stmmac_hwif_entry {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
.est_off = EST_XGMAC_OFFSET,
+ .fpe_reg = &dwxgmac3_fpe_reg,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxgmac210_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
.tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
@@ -267,11 +280,13 @@ static const struct stmmac_hwif_entry {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
.est_off = EST_XGMAC_OFFSET,
+ .fpe_reg = &dwxgmac3_fpe_reg,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxlgmac2_ops,
.hwtimestamp = &stmmac_ptp,
+ .ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
.tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
@@ -353,8 +368,11 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->est = mac->est ? : entry->est;
priv->hw = mac;
+ priv->fpe_cfg.reg = entry->regs.fpe_reg;
priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
+ memcpy(&priv->ptp_clock_ops, entry->ptp,
+ sizeof(struct ptp_clock_info));
if (entry->est)
priv->estaddr = priv->ioaddr + entry->regs.est_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index d5a9f01ecac5..64f8ed67dcc4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -420,15 +420,6 @@ struct stmmac_ops {
bool en, bool udp, bool sa, bool inv,
u32 match);
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
- void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq, u32 num_rxq,
- bool tx_enable, bool pmac_enable);
- void (*fpe_send_mpacket)(void __iomem *ioaddr,
- struct stmmac_fpe_cfg *cfg,
- enum stmmac_mpacket_type type);
- int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
- int (*fpe_get_add_frag_size)(const void __iomem *ioaddr);
- void (*fpe_set_add_frag_size)(void __iomem *ioaddr, u32 add_frag_size);
int (*fpe_map_preemption_class)(struct net_device *ndev,
struct netlink_ext_ack *extack,
u32 pclass);
@@ -530,16 +521,6 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
#define stmmac_set_arp_offload(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
-#define stmmac_fpe_configure(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
-#define stmmac_fpe_send_mpacket(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
-#define stmmac_fpe_irq_status(__priv, __args...) \
- stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
-#define stmmac_fpe_get_add_frag_size(__priv, __args...) \
- stmmac_do_callback(__priv, mac, fpe_get_add_frag_size, __args)
-#define stmmac_fpe_set_add_frag_size(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, fpe_set_add_frag_size, __args)
#define stmmac_fpe_map_preemption_class(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, fpe_map_preemption_class, __args)
@@ -678,6 +659,7 @@ struct stmmac_est_ops {
stmmac_do_void_callback(__priv, est, irq_status, __args)
struct stmmac_regs_off {
+ const struct stmmac_fpe_reg *fpe_reg;
u32 ptp_off;
u32 mmc_off;
u32 est_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index ea135203ff2e..1d86439b8a14 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -146,21 +146,13 @@ struct stmmac_channel {
u32 index;
};
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
-};
-
-#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
-#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
-
struct stmmac_fpe_cfg {
/* Serialize access to MAC Merge state between ethtool requests
* and link state updates.
*/
spinlock_t lock;
+ const struct stmmac_fpe_reg *reg;
u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
enum ethtool_mm_verify_status status;
@@ -420,7 +412,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
-void stmmac_fpe_apply(struct stmmac_priv *priv);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 2a37592a6281..1d77389ce953 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -17,9 +17,9 @@
#include <linux/net_tstamp.h>
#include "stmmac.h"
+#include "stmmac_fpe.h"
#include "dwmac_dma.h"
#include "dwxgmac2.h"
-#include "dwmac5.h"
#define REG_SPACE_SIZE 0x1060
#define GMAC4_REG_SPACE_SIZE 0x116C
@@ -1271,7 +1271,7 @@ static int stmmac_get_mm(struct net_device *ndev,
unsigned long flags;
u32 frag_size;
- if (!priv->dma_cap.fpesel)
+ if (!stmmac_fpe_supported(priv))
return -EOPNOTSUPP;
spin_lock_irqsave(&priv->fpe_cfg.lock, flags);
@@ -1294,7 +1294,7 @@ static int stmmac_get_mm(struct net_device *ndev,
else
state->tx_active = false;
- frag_size = stmmac_fpe_get_add_frag_size(priv, priv->ioaddr);
+ frag_size = stmmac_fpe_get_add_frag_size(priv);
state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags);
@@ -1329,7 +1329,7 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
if (!cfg->verify_enabled)
fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
- stmmac_fpe_set_add_frag_size(priv, priv->ioaddr, frag_size);
+ stmmac_fpe_set_add_frag_size(priv, frag_size);
stmmac_fpe_apply(priv);
spin_unlock_irqrestore(&fpe_cfg->lock, flags);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
new file mode 100644
index 000000000000..3a4bee029c7f
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Furong Xu <0x1207@gmail.com>
+ * stmmac FPE(802.3 Qbu) handling
+ */
+#include "stmmac.h"
+#include "stmmac_fpe.h"
+#include "dwmac4.h"
+#include "dwmac5.h"
+#include "dwxgmac2.h"
+
+#define GMAC5_MAC_FPE_CTRL_STS 0x00000234
+#define XGMAC_MAC_FPE_CTRL_STS 0x00000280
+
+#define GMAC5_MTL_FPE_CTRL_STS 0x00000c90
+#define XGMAC_MTL_FPE_CTRL_STS 0x00001090
+/* Preemption Classification */
+#define FPE_MTL_PREEMPTION_CLASS GENMASK(15, 8)
+/* Additional Fragment Size of preempted frames */
+#define FPE_MTL_ADD_FRAG_SZ GENMASK(1, 0)
+
+#define STMMAC_MAC_FPE_CTRL_STS_TRSP BIT(19)
+#define STMMAC_MAC_FPE_CTRL_STS_TVER BIT(18)
+#define STMMAC_MAC_FPE_CTRL_STS_RRSP BIT(17)
+#define STMMAC_MAC_FPE_CTRL_STS_RVER BIT(16)
+#define STMMAC_MAC_FPE_CTRL_STS_SRSP BIT(2)
+#define STMMAC_MAC_FPE_CTRL_STS_SVER BIT(1)
+#define STMMAC_MAC_FPE_CTRL_STS_EFPE BIT(0)
+
+/* FPE link-partner hand-shaking mPacket type */
+enum stmmac_mpacket_type {
+ MPACKET_VERIFY = 0,
+ MPACKET_RESPONSE = 1,
+};
+
+struct stmmac_fpe_reg {
+ const u32 mac_fpe_reg; /* offset of MAC_FPE_CTRL_STS */
+ const u32 mtl_fpe_reg; /* offset of MTL_FPE_CTRL_STS */
+ const u32 rxq_ctrl1_reg; /* offset of MAC_RxQ_Ctrl1 */
+ const u32 fprq_mask; /* Frame Preemption Residue Queue */
+ const u32 int_en_reg; /* offset of MAC_Interrupt_Enable */
+ const u32 int_en_bit; /* Frame Preemption Interrupt Enable */
+};
+
+bool stmmac_fpe_supported(struct stmmac_priv *priv)
+{
+ return priv->dma_cap.fpesel && priv->fpe_cfg.reg &&
+ priv->hw->mac->fpe_map_preemption_class;
+}
+
+static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
+ bool pmac_enable)
+{
+ struct stmmac_fpe_cfg *cfg = &priv->fpe_cfg;
+ const struct stmmac_fpe_reg *reg = cfg->reg;
+ u32 num_rxq = priv->plat->rx_queues_to_use;
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
+
+ if (tx_enable) {
+ cfg->fpe_csr = STMMAC_MAC_FPE_CTRL_STS_EFPE;
+ value = readl(ioaddr + reg->rxq_ctrl1_reg);
+ value &= ~reg->fprq_mask;
+ /* Keep this SHIFT, FIELD_PREP() expects a constant mask :-/ */
+ value |= (num_rxq - 1) << __ffs(reg->fprq_mask);
+ writel(value, ioaddr + reg->rxq_ctrl1_reg);
+ } else {
+ cfg->fpe_csr = 0;
+ }
+ writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg);
+
+ value = readl(ioaddr + reg->int_en_reg);
+
+ if (pmac_enable) {
+ if (!(value & reg->int_en_bit)) {
+ /* Dummy read to clear any pending masked interrupts */
+ readl(ioaddr + reg->mac_fpe_reg);
+
+ value |= reg->int_en_bit;
+ }
+ } else {
+ value &= ~reg->int_en_bit;
+ }
+
+ writel(value, ioaddr + reg->int_en_reg);
+}
+
+static void stmmac_fpe_send_mpacket(struct stmmac_priv *priv,
+ enum stmmac_mpacket_type type)
+{
+ const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value = priv->fpe_cfg.fpe_csr;
+
+ if (type == MPACKET_VERIFY)
+ value |= STMMAC_MAC_FPE_CTRL_STS_SVER;
+ else if (type == MPACKET_RESPONSE)
+ value |= STMMAC_MAC_FPE_CTRL_STS_SRSP;
+
+ writel(value, ioaddr + reg->mac_fpe_reg);
+}
+
+static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+
+ /* This is interrupt context, just spin_lock() */
+ spin_lock(&fpe_cfg->lock);
+
+ if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
+ goto unlock_out;
+
+ /* LP has sent verify mPacket */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
+ stmmac_fpe_send_mpacket(priv, MPACKET_RESPONSE);
+
+ /* Local has sent verify mPacket */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+
+ /* LP has sent response mPacket */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
+ fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+
+unlock_out:
+ spin_unlock(&fpe_cfg->lock);
+}
+
+void stmmac_fpe_irq_status(struct stmmac_priv *priv)
+{
+ const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ struct net_device *dev = priv->dev;
+ int status = FPE_EVENT_UNKNOWN;
+ u32 value;
+
+ /* Reads from the MAC_FPE_CTRL_STS register should only be performed
+ * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
+ */
+ value = readl(ioaddr + reg->mac_fpe_reg);
+
+ if (value & STMMAC_MAC_FPE_CTRL_STS_TRSP) {
+ status |= FPE_EVENT_TRSP;
+ netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
+ }
+
+ if (value & STMMAC_MAC_FPE_CTRL_STS_TVER) {
+ status |= FPE_EVENT_TVER;
+ netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
+ }
+
+ if (value & STMMAC_MAC_FPE_CTRL_STS_RRSP) {
+ status |= FPE_EVENT_RRSP;
+ netdev_dbg(dev, "FPE: Respond mPacket is received\n");
+ }
+
+ if (value & STMMAC_MAC_FPE_CTRL_STS_RVER) {
+ status |= FPE_EVENT_RVER;
+ netdev_dbg(dev, "FPE: Verify mPacket is received\n");
+ }
+
+ stmmac_fpe_event_status(priv, status);
+}
+
+/**
+ * stmmac_fpe_verify_timer - Timer for MAC Merge verification
+ * @t: timer_list struct containing private info
+ *
+ * Verify the MAC Merge capability in the local TX direction, by
+ * transmitting Verify mPackets up to 3 times. Wait until link
+ * partner responds with a Response mPacket, otherwise fail.
+ */
+static void stmmac_fpe_verify_timer(struct timer_list *t)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
+ struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
+ fpe_cfg);
+ unsigned long flags;
+ bool rearm = false;
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ switch (fpe_cfg->status) {
+ case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ if (fpe_cfg->verify_retries != 0) {
+ stmmac_fpe_send_mpacket(priv, MPACKET_VERIFY);
+ rearm = true;
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ }
+
+ fpe_cfg->verify_retries--;
+ break;
+
+ case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ stmmac_fpe_configure(priv, true, true);
+ break;
+
+ default:
+ break;
+ }
+
+ if (rearm) {
+ mod_timer(&fpe_cfg->verify_timer,
+ jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
+ }
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+}
+
+static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
+{
+ if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
+ fpe_cfg->verify_enabled &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
+ timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
+ mod_timer(&fpe_cfg->verify_timer, jiffies);
+ }
+}
+
+void stmmac_fpe_init(struct stmmac_priv *priv)
+{
+ priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
+ priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
+ priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
+ spin_lock_init(&priv->fpe_cfg.lock);
+
+ if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) &&
+ priv->dma_cap.fpesel)
+ dev_info(priv->device, "FPE is not supported by driver.\n");
+}
+
+void stmmac_fpe_apply(struct stmmac_priv *priv)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+
+ /* If verification is disabled, configure FPE right away.
+ * Otherwise let the timer code do it.
+ */
+ if (!fpe_cfg->verify_enabled) {
+ stmmac_fpe_configure(priv, fpe_cfg->tx_enabled,
+ fpe_cfg->pmac_enabled);
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
+ fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
+
+ if (netif_running(priv->dev))
+ stmmac_fpe_verify_timer_arm(fpe_cfg);
+ }
+}
+
+void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ unsigned long flags;
+
+ timer_shutdown_sync(&fpe_cfg->verify_timer);
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ if (is_up && fpe_cfg->pmac_enabled) {
+ /* VERIFY process requires pmac enabled when NIC comes up */
+ stmmac_fpe_configure(priv, false, true);
+
+ /* New link => maybe new partner => new verification process */
+ stmmac_fpe_apply(priv);
+ } else {
+ /* No link => turn off EFPE */
+ stmmac_fpe_configure(priv, false, false);
+ }
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+}
+
+int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv)
+{
+ const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ void __iomem *ioaddr = priv->ioaddr;
+
+ return FIELD_GET(FPE_MTL_ADD_FRAG_SZ, readl(ioaddr + reg->mtl_fpe_reg));
+}
+
+void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size)
+{
+ const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
+
+ value = readl(ioaddr + reg->mtl_fpe_reg);
+ writel(u32_replace_bits(value, add_frag_size, FPE_MTL_ADD_FRAG_SZ),
+ ioaddr + reg->mtl_fpe_reg);
+}
+
+#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
+#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
+
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass)
+{
+ u32 val, offset, count, queue_weight, preemptible_txqs = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int num_tc = netdev_get_num_tc(ndev);
+
+ if (!pclass)
+ goto update_mapping;
+
+ /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
+ *
+ * Synopsys Databook:
+ * "The number of Tx DMA channels is equal to the number of Tx queues,
+ * and is direct one-to-one mapping."
+ */
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ count = ndev->tc_to_txq[tc].count;
+ offset = ndev->tc_to_txq[tc].offset;
+
+ if (pclass & BIT(tc))
+ preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+ /* This is 1:1 mapping, go to next TC */
+ if (count == 1)
+ continue;
+
+ if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
+ NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
+ return -EINVAL;
+ }
+
+ queue_weight = priv->plat->tx_queues_cfg[offset].weight;
+
+ for (u32 i = 1; i < count; i++) {
+ if (priv->plat->tx_queues_cfg[offset + i].weight !=
+ queue_weight) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
+ queue_weight, tc);
+ return -EINVAL;
+ }
+ }
+ }
+
+update_mapping:
+ val = readl(priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
+ priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
+
+ return 0;
+}
+
+int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass)
+{
+ u32 val, offset, count, preemptible_txqs = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int num_tc = netdev_get_num_tc(ndev);
+
+ if (!num_tc) {
+ /* Restore default TC:Queue mapping */
+ for (u32 i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
+ writel(u32_replace_bits(val, i, XGMAC_Q2TCMAP),
+ priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
+ }
+ }
+
+ /* Synopsys Databook:
+ * "All Queues within a traffic class are selected in a round robin
+ * fashion (when packets are available) when the traffic class is
+ * selected by the scheduler for packet transmission. This is true for
+ * any of the scheduling algorithms."
+ */
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ count = ndev->tc_to_txq[tc].count;
+ offset = ndev->tc_to_txq[tc].offset;
+
+ if (pclass & BIT(tc))
+ preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+ for (u32 i = 0; i < count; i++) {
+ val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
+ writel(u32_replace_bits(val, tc, XGMAC_Q2TCMAP),
+ priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
+ }
+ }
+
+ val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
+ priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+
+ return 0;
+}
+
+const struct stmmac_fpe_reg dwmac5_fpe_reg = {
+ .mac_fpe_reg = GMAC5_MAC_FPE_CTRL_STS,
+ .mtl_fpe_reg = GMAC5_MTL_FPE_CTRL_STS,
+ .rxq_ctrl1_reg = GMAC_RXQ_CTRL1,
+ .fprq_mask = GMAC_RXQCTRL_FPRQ,
+ .int_en_reg = GMAC_INT_EN,
+ .int_en_bit = GMAC_INT_FPE_EN,
+};
+
+const struct stmmac_fpe_reg dwxgmac3_fpe_reg = {
+ .mac_fpe_reg = XGMAC_MAC_FPE_CTRL_STS,
+ .mtl_fpe_reg = XGMAC_MTL_FPE_CTRL_STS,
+ .rxq_ctrl1_reg = XGMAC_RXQ_CTRL1,
+ .fprq_mask = XGMAC_FPRQ,
+ .int_en_reg = XGMAC_INT_EN,
+ .int_en_bit = XGMAC_FPEIE,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
new file mode 100644
index 000000000000..b884eac7142d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Furong Xu <0x1207@gmail.com>
+ * stmmac FPE(802.3 Qbu) handling
+ */
+#ifndef _STMMAC_FPE_H_
+#define _STMMAC_FPE_H_
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
+#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
+
+struct stmmac_priv;
+
+void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up);
+bool stmmac_fpe_supported(struct stmmac_priv *priv);
+void stmmac_fpe_init(struct stmmac_priv *priv);
+void stmmac_fpe_apply(struct stmmac_priv *priv);
+void stmmac_fpe_irq_status(struct stmmac_priv *priv);
+int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv);
+void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size);
+
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass);
+int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass);
+
+extern const struct stmmac_fpe_reg dwmac5_fpe_reg;
+extern const struct stmmac_fpe_reg dwxgmac3_fpe_reg;
+
+#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 5ef52ef2698f..0f59aa982604 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -18,9 +18,22 @@
#include "dwmac4.h"
#include "stmmac.h"
+#define STMMAC_HWTS_CFG_MASK (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
+ PTP_TCR_TSINIT | PTP_TCR_TSUPDT | \
+ PTP_TCR_TSCTRLSSR | PTP_TCR_SNAPTYPSEL_1 | \
+ PTP_TCR_TSIPV4ENA | PTP_TCR_TSIPV6ENA | \
+ PTP_TCR_TSEVNTENA | PTP_TCR_TSMSTRENA | \
+ PTP_TCR_TSVER2ENA | PTP_TCR_TSIPENA | \
+ PTP_TCR_TSTRIG | PTP_TCR_TSENALL)
+
static void config_hw_tstamping(void __iomem *ioaddr, u32 data)
{
- writel(data, ioaddr + PTP_TCR);
+ u32 regval = readl(ioaddr + PTP_TCR);
+
+ regval &= ~STMMAC_HWTS_CFG_MASK;
+ regval |= data;
+
+ writel(regval, ioaddr + PTP_TCR);
}
static void config_sub_second_increment(void __iomem *ioaddr,
@@ -269,3 +282,14 @@ const struct stmmac_hwtimestamp stmmac_ptp = {
.timestamp_interrupt = timestamp_interrupt,
.hwtstamp_correct_latency = hwtstamp_correct_latency,
};
+
+const struct stmmac_hwtimestamp dwmac1000_ptp = {
+ .config_hw_tstamping = config_hw_tstamping,
+ .init_systime = init_systime,
+ .config_sub_second_increment = config_sub_second_increment,
+ .config_addend = config_addend,
+ .adjust_systime = adjust_systime,
+ .get_systime = get_systime,
+ .get_ptptime = dwmac1000_get_ptptime,
+ .timestamp_interrupt = dwmac1000_timestamp_interrupt,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7bf275f127c9..3cdc3910f3a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -43,6 +43,7 @@
#include <net/pkt_cls.h>
#include <net/xdp_sock_drv.h>
#include "stmmac_ptp.h"
+#include "stmmac_fpe.h"
#include "stmmac.h"
#include "stmmac_xdp.h"
#include <linux/reset.h>
@@ -966,35 +967,6 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
/* Nothing to do, xpcs_config() handles everything */
}
-static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- unsigned long flags;
-
- timer_shutdown_sync(&fpe_cfg->verify_timer);
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- if (is_up && fpe_cfg->pmac_enabled) {
- /* VERIFY process requires pmac enabled when NIC comes up */
- stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- false, true);
-
- /* New link => maybe new partner => new verification process */
- stmmac_fpe_apply(priv);
- } else {
- /* No link => turn off EFPE */
- stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- false, false);
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
static void stmmac_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -1006,7 +978,7 @@ static void stmmac_mac_link_down(struct phylink_config *config,
priv->eee_enabled = stmmac_eee_init(priv);
stmmac_set_eee_pls(priv, priv->hw, false);
- if (priv->dma_cap.fpesel)
+ if (stmmac_fpe_supported(priv))
stmmac_fpe_link_state_handle(priv, false);
}
@@ -1120,7 +1092,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_set_eee_pls(priv, priv->hw, true);
}
- if (priv->dma_cap.fpesel)
+ if (stmmac_fpe_supported(priv))
stmmac_fpe_link_state_handle(priv, true);
if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
@@ -4069,7 +4041,7 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
- if (priv->dma_cap.fpesel)
+ if (stmmac_fpe_supported(priv))
timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
pm_runtime_put(priv->device);
@@ -5966,35 +5938,6 @@ static int stmmac_set_features(struct net_device *netdev,
return 0;
}
-static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
-
- /* This is interrupt context, just spin_lock() */
- spin_lock(&fpe_cfg->lock);
-
- if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
- goto unlock_out;
-
- /* LP has sent verify mPacket */
- if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
- stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
- MPACKET_RESPONSE);
-
- /* Local has sent verify mPacket */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
-
- /* LP has sent response mPacket */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
- fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
-
-unlock_out:
- spin_unlock(&fpe_cfg->lock);
-}
-
static void stmmac_common_interrupt(struct stmmac_priv *priv)
{
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -6013,12 +5956,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
stmmac_est_irq_status(priv, priv, priv->dev,
&priv->xstats, tx_cnt);
- if (priv->dma_cap.fpesel) {
- int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
- priv->dev);
-
- stmmac_fpe_event_status(priv, status);
- }
+ if (stmmac_fpe_supported(priv))
+ stmmac_fpe_irq_status(priv);
/* To handle GMAC own interrupts */
if ((priv->plat->has_gmac) || xmac) {
@@ -7350,90 +7289,6 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
return ret;
}
-/**
- * stmmac_fpe_verify_timer - Timer for MAC Merge verification
- * @t: timer_list struct containing private info
- *
- * Verify the MAC Merge capability in the local TX direction, by
- * transmitting Verify mPackets up to 3 times. Wait until link
- * partner responds with a Response mPacket, otherwise fail.
- */
-static void stmmac_fpe_verify_timer(struct timer_list *t)
-{
- struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
- struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
- fpe_cfg);
- unsigned long flags;
- bool rearm = false;
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- switch (fpe_cfg->status) {
- case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
- case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
- if (fpe_cfg->verify_retries != 0) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg, MPACKET_VERIFY);
- rearm = true;
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
- }
-
- fpe_cfg->verify_retries--;
- break;
-
- case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
- stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- true, true);
- break;
-
- default:
- break;
- }
-
- if (rearm) {
- mod_timer(&fpe_cfg->verify_timer,
- jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
-static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
-{
- if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
- fpe_cfg->verify_enabled &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
- timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
- mod_timer(&fpe_cfg->verify_timer, jiffies);
- }
-}
-
-void stmmac_fpe_apply(struct stmmac_priv *priv)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
-
- /* If verification is disabled, configure FPE right away.
- * Otherwise let the timer code do it.
- */
- if (!fpe_cfg->verify_enabled) {
- stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- fpe_cfg->tx_enabled,
- fpe_cfg->pmac_enabled);
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
- fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
-
- if (netif_running(priv->dev))
- stmmac_fpe_verify_timer_arm(fpe_cfg);
- }
-}
-
static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
{
const struct stmmac_xdp_buff *ctx = (void *)_ctx;
@@ -7712,11 +7567,7 @@ int stmmac_dvr_probe(struct device *device,
mutex_init(&priv->lock);
- priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
- priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
- priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
- timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
- spin_lock_init(&priv->fpe_cfg.lock);
+ stmmac_fpe_init(priv);
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
@@ -7891,7 +7742,7 @@ int stmmac_suspend(struct device *dev)
}
rtnl_unlock();
- if (priv->dma_cap.fpesel)
+ if (stmmac_fpe_supported(priv))
timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
priv->speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 03f90676b3ad..0c7d81ddd440 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -500,23 +500,22 @@ int stmmac_pcs_setup(struct net_device *ndev)
struct fwnode_handle *devnode, *pcsnode;
struct dw_xpcs *xpcs = NULL;
struct stmmac_priv *priv;
- int addr, mode, ret;
+ int addr, ret;
priv = netdev_priv(ndev);
- mode = priv->plat->phy_interface;
devnode = priv->plat->port_node;
if (priv->plat->pcs_init) {
ret = priv->plat->pcs_init(priv);
} else if (fwnode_property_present(devnode, "pcs-handle")) {
pcsnode = fwnode_find_reference(devnode, "pcs-handle", 0);
- xpcs = xpcs_create_fwnode(pcsnode, mode);
+ xpcs = xpcs_create_fwnode(pcsnode);
fwnode_handle_put(pcsnode);
ret = PTR_ERR_OR_ZERO(xpcs);
} else if (priv->plat->mdio_bus_data &&
priv->plat->mdio_bus_data->pcs_mask) {
addr = ffs(priv->plat->mdio_bus_data->pcs_mask) - 1;
- xpcs = xpcs_create_mdiodev(priv->mii, addr, mode);
+ xpcs = xpcs_create_mdiodev(priv->mii, addr);
ret = PTR_ERR_OR_ZERO(xpcs);
} else {
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ad868e8d195d..3ac32444e492 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -522,6 +522,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_device_is_compatible(np, "st,spear600-gmac") ||
of_device_is_compatible(np, "snps,dwmac-3.50a") ||
of_device_is_compatible(np, "snps,dwmac-3.70a") ||
+ of_device_is_compatible(np, "snps,dwmac-3.72a") ||
of_device_is_compatible(np, "snps,dwmac")) {
/* Note that the max-frame-size parameter as defined in the
* ePAPR v1.1 spec is defined as max-frame-size, it's
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index a6b1de9a251d..429b2d357813 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -9,7 +9,6 @@
*******************************************************************************/
#include "stmmac.h"
#include "stmmac_ptp.h"
-#include "dwmac4.h"
/**
* stmmac_adjust_freq
@@ -265,7 +264,7 @@ static int stmmac_getcrosststamp(struct ptp_clock_info *ptp,
}
/* structure describing a PTP hardware clock */
-static struct ptp_clock_info stmmac_ptp_clock_ops = {
+const struct ptp_clock_info stmmac_ptp_clock_ops = {
.owner = THIS_MODULE,
.name = "stmmac ptp",
.max_adj = 62500000,
@@ -282,6 +281,24 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
.getcrosststamp = stmmac_getcrosststamp,
};
+/* structure describing a PTP hardware clock */
+const struct ptp_clock_info dwmac1000_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .name = "stmmac ptp",
+ .max_adj = 62500000,
+ .n_alarm = 0,
+ .n_ext_ts = 1,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+ .gettime64 = stmmac_get_time,
+ .settime64 = stmmac_set_time,
+ .enable = dwmac1000_ptp_enable,
+ .getcrosststamp = stmmac_getcrosststamp,
+};
+
/**
* stmmac_ptp_register
* @priv: driver private structure
@@ -298,20 +315,25 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
priv->pps[i].available = true;
}
- if (priv->plat->ptp_max_adj)
- stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
-
/* Calculate the clock domain crossing (CDC) error if necessary */
priv->plat->cdc_error_adj = 0;
if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
- stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
- stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
+ /* Update the ptp clock parameters based on feature discovery, when
+ * available
+ */
+ if (priv->dma_cap.pps_out_num)
+ priv->ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
+
+ if (priv->dma_cap.aux_snapshot_n)
+ priv->ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
+
+ if (priv->plat->ptp_max_adj)
+ priv->ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
rwlock_init(&priv->ptp_lock);
mutex_init(&priv->aux_ts_lock);
- priv->ptp_clock_ops = stmmac_ptp_clock_ops;
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
priv->device);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index fce3fba2ffd2..4cc70480ce0f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -94,4 +94,14 @@ enum aux_snapshot {
AUX_SNAPSHOT3 = 0x80,
};
+struct ptp_clock_info;
+struct ptp_clock_request;
+struct stmmac_priv;
+
+int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+
+void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time);
+void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv);
+
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 75ad2da1a37f..6a79e6a111ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1290,8 +1290,8 @@ const struct stmmac_tc_ops dwxgmac_tc_ops = {
.setup_cls_u32 = tc_setup_cls_u32,
.setup_cbs = tc_setup_cbs,
.setup_cls = tc_setup_cls,
- .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
.query_caps = tc_query_caps,
- .setup_mqprio = tc_setup_mqprio_unimplemented,
+ .setup_mqprio = tc_setup_dwmac510_mqprio,
};
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 41a27ae58ced..df6d35d41b97 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -10182,7 +10182,7 @@ static struct platform_driver niu_of_driver = {
.of_match_table = niu_match,
},
.probe = niu_of_probe,
- .remove_new = niu_of_remove,
+ .remove = niu_of_remove,
};
#endif /* CONFIG_SPARC64 */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 16c86b13c185..bbb3a6ca19ed 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1272,7 +1272,7 @@ static struct platform_driver bigmac_sbus_driver = {
.of_match_table = bigmac_sbus_match,
},
.probe = bigmac_sbus_probe,
- .remove_new = bigmac_sbus_remove,
+ .remove = bigmac_sbus_remove,
};
module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index aedd13c94225..2920341b14a0 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -965,7 +965,7 @@ static struct platform_driver qec_sbus_driver = {
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
- .remove_new = qec_sbus_remove,
+ .remove = qec_sbus_remove,
};
static int __init qec_init(void)
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 391a1bc7f446..721d8ed3f302 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -549,7 +549,7 @@ MODULE_DEVICE_TABLE(of, spl2sw_of_match);
static struct platform_driver spl2sw_driver = {
.probe = spl2sw_probe,
- .remove_new = spl2sw_remove,
+ .remove = spl2sw_remove,
.driver = {
.name = "sp7021_emac",
.of_match_table = spl2sw_of_match,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index ba6db61dd227..14e1df721f2e 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -71,6 +71,8 @@
#define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
#define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
+#define AM65_CPSW_PORTN_REG_CTL 0x004
+#define AM65_CPSW_PORTN_REG_DSCP_MAP 0x120
#define AM65_CPSW_PORTN_REG_SA_L 0x308
#define AM65_CPSW_PORTN_REG_SA_H 0x30c
#define AM65_CPSW_PORTN_REG_TS_CTL 0x310
@@ -94,6 +96,10 @@
/* AM65_CPSW_PORT_REG_PRI_CTL */
#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
+/* AM65_CPSW_PN_REG_CTL */
+#define AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN BIT(1)
+#define AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN BIT(2)
+
/* AM65_CPSW_PN_TS_CTL register fields */
#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
@@ -176,6 +182,99 @@ static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
}
+#define AM65_CPSW_DSCP_MAX GENMASK(5, 0)
+#define AM65_CPSW_PRI_MAX GENMASK(2, 0)
+#define AM65_CPSW_DSCP_PRI_PER_REG 8
+#define AM65_CPSW_DSCP_PRI_SIZE 4 /* in bits */
+static int am65_cpsw_port_set_dscp_map(struct am65_cpsw_port *slave, u8 dscp, u8 pri)
+{
+ int reg_ofs;
+ int bit_ofs;
+ u32 val;
+
+ if (dscp > AM65_CPSW_DSCP_MAX)
+ return -EINVAL;
+
+ if (pri > AM65_CPSW_PRI_MAX)
+ return -EINVAL;
+
+ /* 32-bit register offset to this dscp */
+ reg_ofs = (dscp / AM65_CPSW_DSCP_PRI_PER_REG) * 4;
+ /* bit field offset to this dscp */
+ bit_ofs = AM65_CPSW_DSCP_PRI_SIZE * (dscp % AM65_CPSW_DSCP_PRI_PER_REG);
+
+ val = readl(slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs);
+ val &= ~(AM65_CPSW_PRI_MAX << bit_ofs); /* clear */
+ val |= pri << bit_ofs; /* set */
+ writel(val, slave->port_base + AM65_CPSW_PORTN_REG_DSCP_MAP + reg_ofs);
+
+ return 0;
+}
+
+static void am65_cpsw_port_enable_dscp_map(struct am65_cpsw_port *slave)
+{
+ int dscp, pri;
+ u32 val;
+
+ /* Default DSCP to User Priority mapping as per:
+ * https://datatracker.ietf.org/doc/html/rfc8325#section-4.3
+ * and
+ * https://datatracker.ietf.org/doc/html/rfc8622#section-11
+ */
+ for (dscp = 0; dscp <= AM65_CPSW_DSCP_MAX; dscp++) {
+ switch (dscp) {
+ case 56: /* CS7 */
+ case 48: /* CS6 */
+ pri = 7;
+ break;
+ case 46: /* EF */
+ case 44: /* VA */
+ pri = 6;
+ break;
+ case 40: /* CS5 */
+ pri = 5;
+ break;
+ case 34: /* AF41 */
+ case 36: /* AF42 */
+ case 38: /* AF43 */
+ case 32: /* CS4 */
+ case 26: /* AF31 */
+ case 28: /* AF32 */
+ case 30: /* AF33 */
+ case 24: /* CS3 */
+ pri = 4;
+ break;
+ case 18: /* AF21 */
+ case 20: /* AF22 */
+ case 22: /* AF23 */
+ pri = 3;
+ break;
+ case 16: /* CS2 */
+ case 10: /* AF11 */
+ case 12: /* AF12 */
+ case 14: /* AF13 */
+ case 0: /* DF */
+ pri = 0;
+ break;
+ case 8: /* CS1 */
+ case 1: /* LE */
+ pri = 1;
+ break;
+ default:
+ pri = 0;
+ break;
+ }
+
+ am65_cpsw_port_set_dscp_map(slave, dscp, pri);
+ }
+
+ /* enable port IPV4 and IPV6 DSCP for this port */
+ val = readl(slave->port_base + AM65_CPSW_PORTN_REG_CTL);
+ val |= AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN |
+ AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN;
+ writel(val, slave->port_base + AM65_CPSW_PORTN_REG_CTL);
+}
+
static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
{
cpsw_sl_reset(port->slave.mac_sl, 100);
@@ -916,6 +1015,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
common->usage_count++;
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
+ am65_cpsw_port_enable_dscp_map(port);
if (common->is_emac_mode)
am65_cpsw_init_port_emac_ale(port);
@@ -1026,9 +1126,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
int cpu, int *len)
{
struct am65_cpsw_common *common = flow->common;
- struct am65_cpsw_ndev_priv *ndev_priv;
struct net_device *ndev = port->ndev;
- struct am65_cpsw_ndev_stats *stats;
int ret = AM65_CPSW_XDP_CONSUMED;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
@@ -1046,9 +1144,6 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
/* XDP prog might have changed packet data and boundaries */
*len = xdp->data_end - xdp->data;
- ndev_priv = netdev_priv(ndev);
- stats = this_cpu_ptr(ndev_priv->stats);
-
switch (act) {
case XDP_PASS:
ret = AM65_CPSW_XDP_PASS;
@@ -1068,20 +1163,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
if (err)
goto drop;
- u64_stats_update_begin(&stats->syncp);
- stats->rx_bytes += *len;
- stats->rx_packets++;
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_tx_add(ndev, 1, *len);
ret = AM65_CPSW_XDP_CONSUMED;
goto out;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
- u64_stats_update_begin(&stats->syncp);
- stats->rx_bytes += *len;
- stats->rx_packets++;
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_rx_add(ndev, *len);
ret = AM65_CPSW_XDP_REDIRECT;
goto out;
default:
@@ -1142,7 +1231,6 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv;
- struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
struct am65_cpsw_swdata *swdata;
@@ -1225,12 +1313,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
am65_cpsw_nuss_rx_csum(skb, csum_info);
napi_gro_receive(&flow->napi_rx, skb);
- stats = this_cpu_ptr(ndev_priv->stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_rx_add(ndev, pkt_len);
allocate:
new_page = page_pool_dev_alloc_pages(flow->page_pool);
@@ -1311,10 +1394,7 @@ static struct sk_buff *
am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
dma_addr_t desc_dma)
{
- struct am65_cpsw_ndev_priv *ndev_priv;
- struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_tx;
- struct net_device *ndev;
struct sk_buff *skb;
void **swdata;
@@ -1324,16 +1404,9 @@ am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
skb = *(swdata);
am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
- ndev = skb->dev;
-
am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
- ndev_priv = netdev_priv(ndev);
- stats = this_cpu_ptr(ndev_priv->stats);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
return skb;
}
@@ -1344,8 +1417,6 @@ am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
dma_addr_t desc_dma,
struct net_device **ndev)
{
- struct am65_cpsw_ndev_priv *ndev_priv;
- struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_tx;
struct am65_cpsw_port *port;
struct xdp_frame *xdpf;
@@ -1359,15 +1430,9 @@ am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
port = am65_common_get_port(common, port_id);
+ dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len);
*ndev = port->ndev;
- ndev_priv = netdev_priv(*ndev);
- stats = this_cpu_ptr(ndev_priv->stats);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += xdpf->len;
- u64_stats_update_end(&stats->syncp);
-
return xdpf;
}
@@ -1889,31 +1954,7 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
- unsigned int start;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct am65_cpsw_ndev_stats *cpu_stats;
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
-
- cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
- do {
- start = u64_stats_fetch_begin(&cpu_stats->syncp);
- rx_packets = cpu_stats->rx_packets;
- rx_bytes = cpu_stats->rx_bytes;
- tx_packets = cpu_stats->tx_packets;
- tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- }
+ dev_fetch_sw_netstats(stats, dev->tstats);
stats->rx_errors = dev->stats.rx_errors;
stats->rx_dropped = dev->stats.rx_dropped;
@@ -2699,13 +2740,6 @@ of_node_put:
return ret;
}
-static void am65_cpsw_pcpu_stats_free(void *data)
-{
- struct am65_cpsw_ndev_stats __percpu *stats = data;
-
- free_percpu(stats);
-}
-
static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
{
struct am65_cpsw_port *port;
@@ -2725,7 +2759,6 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
struct device *dev = common->dev;
struct am65_cpsw_port *port;
struct phylink *phylink;
- int ret;
port = &common->ports[port_idx];
@@ -2818,21 +2851,13 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
port->ndev->features &= ~NETIF_F_HW_CSUM;
- ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
- if (!ndev_priv->stats)
- return -ENOMEM;
-
- ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
- ndev_priv->stats);
- if (ret)
- dev_err(dev, "failed to add percpu stat free action %d\n", ret);
-
+ port->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
port->xdp_prog = NULL;
if (!common->dma_ndev)
common->dma_ndev = port->ndev;
- return ret;
+ return 0;
}
static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
@@ -3437,7 +3462,8 @@ static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
.quirks = 0,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
+ BIT(PHY_INTERFACE_MODE_USXGMII),
};
static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
@@ -3489,7 +3515,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct resource *res;
struct clk *clk;
int ale_entries;
- u64 id_temp;
+ __be64 id_temp;
int ret, i;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
@@ -3762,7 +3788,7 @@ static struct platform_driver am65_cpsw_nuss_driver = {
.pm = &am65_cpsw_nuss_dev_pm_ops,
},
.probe = am65_cpsw_nuss_probe,
- .remove_new = am65_cpsw_nuss_remove,
+ .remove = am65_cpsw_nuss_remove,
};
module_platform_driver(am65_cpsw_nuss_driver);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 92a27ba4c601..e7832a5cf3cc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -184,18 +184,9 @@ struct am65_cpsw_common {
u32 *ale_context;
};
-struct am65_cpsw_ndev_stats {
- u64 tx_packets;
- u64 tx_bytes;
- u64 rx_packets;
- u64 rx_bytes;
- struct u64_stats_sync syncp;
-};
-
struct am65_cpsw_ndev_priv {
u32 msg_enable;
struct am65_cpsw_port *port;
- struct am65_cpsw_ndev_stats __percpu *stats;
bool offload_fwd_mark;
/* Serialize access to MAC Merge state between ethtool requests
* and link state updates
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c0a5abd8d9a8..4ef8cf6ea135 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1802,7 +1802,7 @@ static struct platform_driver cpsw_driver = {
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
- .remove_new = cpsw_remove,
+ .remove = cpsw_remove,
};
module_platform_driver(cpsw_driver);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 8d02d2b21429..52e4e350b734 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -162,27 +162,39 @@ static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
ale_entry[idx] |= (value << start);
}
-#define DEFINE_ALE_FIELD(name, start, bits) \
+#define DEFINE_ALE_FIELD_GET(name, start, bits) \
static inline int cpsw_ale_get_##name(u32 *ale_entry) \
{ \
return cpsw_ale_get_field(ale_entry, start, bits); \
-} \
+}
+
+#define DEFINE_ALE_FIELD_SET(name, start, bits) \
static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
{ \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
-#define DEFINE_ALE_FIELD1(name, start) \
+#define DEFINE_ALE_FIELD(name, start, bits) \
+DEFINE_ALE_FIELD_GET(name, start, bits) \
+DEFINE_ALE_FIELD_SET(name, start, bits)
+
+#define DEFINE_ALE_FIELD1_GET(name, start) \
static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits) \
{ \
return cpsw_ale_get_field(ale_entry, start, bits); \
-} \
+}
+
+#define DEFINE_ALE_FIELD1_SET(name, start) \
static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \
u32 bits) \
{ \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
+#define DEFINE_ALE_FIELD1(name, start) \
+DEFINE_ALE_FIELD1_GET(name, start) \
+DEFINE_ALE_FIELD1_SET(name, start)
+
enum {
ALE_ENT_VID_MEMBER_LIST = 0,
ALE_ENT_VID_UNREG_MCAST_MSK,
@@ -238,14 +250,14 @@ static const struct ale_entry_fld vlan_entry_k3_cpswxg[] = {
DEFINE_ALE_FIELD(entry_type, 60, 2)
DEFINE_ALE_FIELD(vlan_id, 48, 12)
-DEFINE_ALE_FIELD(mcast_state, 62, 2)
+DEFINE_ALE_FIELD_SET(mcast_state, 62, 2)
DEFINE_ALE_FIELD1(port_mask, 66)
DEFINE_ALE_FIELD(super, 65, 1)
DEFINE_ALE_FIELD(ucast_type, 62, 2)
-DEFINE_ALE_FIELD1(port_num, 66)
-DEFINE_ALE_FIELD(blocked, 65, 1)
-DEFINE_ALE_FIELD(secure, 64, 1)
-DEFINE_ALE_FIELD(mcast, 40, 1)
+DEFINE_ALE_FIELD1_SET(port_num, 66)
+DEFINE_ALE_FIELD_SET(blocked, 65, 1)
+DEFINE_ALE_FIELD_SET(secure, 64, 1)
+DEFINE_ALE_FIELD_GET(mcast, 40, 1)
#define NU_VLAN_UNREG_MCAST_IDX 1
@@ -1692,26 +1704,34 @@ static void cpsw_ale_policer_reset(struct cpsw_ale *ale)
void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch)
{
int pri, idx;
- /* IEEE802.1D-2004, Standard for Local and metropolitan area networks
- * Table G-2 - Traffic type acronyms
- * Table G-3 - Defining traffic types
- * User priority values 1 and 2 effectively communicate a lower
- * priority than 0. In the below table 0 is assigned to higher priority
- * thread than 1 and 2 wherever possible.
- * The below table maps which thread the user priority needs to be
+
+ /* Reference:
+ * IEEE802.1Q-2014, Standard for Local and metropolitan area networks
+ * Table I-2 - Traffic type acronyms
+ * Table I-3 - Defining traffic types
+ * Section I.4 Traffic types and priority values, states:
+ * "0 is thus used both for default priority and for Best Effort, and
+ * Background is associated with a priority value of 1. This means
+ * that the value 1 effectively communicates a lower priority than 0."
+ *
+ * In the table below, Priority Code Point (PCP) 0 is assigned
+ * to a higher priority thread than PCP 1 wherever possible.
+ * The table maps which thread the PCP traffic needs to be
* sent to for a given number of threads (RX channels). Upper threads
* have higher priority.
* e.g. if number of threads is 8 then user priority 0 will map to
- * pri_thread_map[8-1][0] i.e. thread 2
+ * pri_thread_map[8-1][0] i.e. thread 1
*/
- int pri_thread_map[8][8] = { { 0, 0, 0, 0, 0, 0, 0, 0, },
+
+ int pri_thread_map[8][8] = { /* BK,BE,EE,CA,VI,VO,IC,NC */
+ { 0, 0, 0, 0, 0, 0, 0, 0, },
{ 0, 0, 0, 0, 1, 1, 1, 1, },
{ 0, 0, 0, 0, 1, 1, 2, 2, },
- { 1, 0, 0, 1, 2, 2, 3, 3, },
- { 1, 0, 0, 1, 2, 3, 4, 4, },
- { 1, 0, 0, 2, 3, 4, 5, 5, },
- { 1, 0, 0, 2, 3, 4, 5, 6, },
- { 2, 0, 1, 3, 4, 5, 6, 7, } };
+ { 0, 0, 1, 1, 2, 2, 3, 3, },
+ { 0, 0, 1, 1, 2, 2, 3, 4, },
+ { 1, 0, 2, 2, 3, 3, 4, 5, },
+ { 1, 0, 2, 3, 4, 4, 5, 6, },
+ { 1, 0, 2, 3, 4, 5, 6, 7 } };
cpsw_ale_policer_reset(ale);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 557cc71b9dd2..a98bcc5eb566 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -2127,7 +2127,7 @@ static struct platform_driver cpsw_driver = {
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
- .remove_new = cpsw_remove,
+ .remove = cpsw_remove,
};
module_platform_driver(cpsw_driver);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index b0950a318c42..ed8116fb05e9 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2070,7 +2070,7 @@ static struct platform_driver davinci_emac_driver = {
.of_match_table = davinci_emac_of_match,
},
.probe = davinci_emac_probe,
- .remove_new = davinci_emac_remove,
+ .remove = davinci_emac_remove,
};
/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 8e07d4a1b6ba..68507126be8e 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -760,7 +760,7 @@ static struct platform_driver davinci_mdio_driver = {
.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
},
.probe = davinci_mdio_probe,
- .remove_new = davinci_mdio_remove,
+ .remove = davinci_mdio_remove,
};
static int __init davinci_mdio_init(void)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index fe2fd1bfc904..c568c84a032b 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -817,6 +817,47 @@ static netdev_features_t emac_ndo_fix_features(struct net_device *ndev,
return features;
}
+static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int untag_mask = 0;
+ int port_mask;
+
+ if (prueth->is_hsr_offload_mode) {
+ port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id);
+ untag_mask = 0;
+
+ netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
+ icssg_set_pvid(emac->prueth, vid, emac->port_id);
+ }
+ return 0;
+}
+
+static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int untag_mask = 0;
+ int port_mask;
+
+ if (prueth->is_hsr_offload_mode) {
+ port_mask = BIT(PRUETH_PORT_HOST);
+ untag_mask = 0;
+
+ netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
+ }
+ return 0;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -829,6 +870,8 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_get_stats64 = icssg_ndo_get_stats64,
.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
.ndo_fix_features = emac_ndo_fix_features,
+ .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -956,7 +999,7 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &icssg_ethtool_ops;
ndev->hw_features = NETIF_F_SG;
- ndev->features = ndev->hw_features;
+ ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
@@ -1655,7 +1698,7 @@ MODULE_DEVICE_TABLE(of, prueth_dt_match);
static struct platform_driver prueth_driver = {
.probe = prueth_probe,
- .remove_new = prueth_remove,
+ .remove = prueth_remove,
.driver = {
.name = "icssg-prueth",
.of_match_table = prueth_dt_match,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 292f04d29f4f..5024f0647a0d 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -1215,7 +1215,7 @@ MODULE_DEVICE_TABLE(of, prueth_dt_match);
static struct platform_driver prueth_driver = {
.probe = prueth_probe,
- .remove_new = prueth_remove,
+ .remove = prueth_remove,
.driver = {
.name = "icssg-prueth-sr1",
.of_match_table = prueth_dt_match,
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 11b90e1da0c6..857820657bac 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2270,7 +2270,7 @@ static struct platform_driver netcp_driver = {
.of_match_table = of_match,
},
.probe = netcp_probe,
- .remove_new = netcp_remove,
+ .remove = netcp_remove,
};
module_platform_driver(netcp_driver);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 44488c153ea2..4fbe4b7cd12a 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -2566,7 +2566,6 @@ static void gelic_wl_setup_netdev_ops(struct net_device *netdev)
netdev->ethtool_ops = &gelic_wl_ethtool_ops;
netdev->netdev_ops = &gelic_wl_netdevice_ops;
- netdev->wireless_data = &wl->wireless_data;
netdev->wireless_handlers = &gelic_wl_wext_handler_def;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
index 1f203d1ae8db..dbabf538e10a 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
@@ -276,7 +276,6 @@ struct gelic_wl_info {
u8 active_bssid[ETH_ALEN]; /* associated bssid */
unsigned int essid_len;
- struct iw_public_data wireless_data;
struct iw_statistics iwstat;
};
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 554aff7c8f3b..c6957e3b7f0f 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1676,7 +1676,7 @@ static void tsi108_ether_remove(struct platform_device *pdev)
static struct platform_driver tsi_eth_driver = {
.probe = tsi108_init_one,
- .remove_new = tsi108_ether_remove,
+ .remove = tsi108_ether_remove,
.driver = {
.name = "tsi-ethernet",
},
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index e80c02948801..894911f3d560 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2570,7 +2570,7 @@ static struct pci_driver rhine_driver_pci = {
static struct platform_driver rhine_driver_platform = {
.probe = rhine_init_one_platform,
- .remove_new = rhine_remove_one_platform,
+ .remove = rhine_remove_one_platform,
.driver = {
.name = DRV_NAME,
.of_match_table = rhine_of_tbl,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 55fff4d0d380..dd4a07c97eee 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3247,7 +3247,7 @@ static struct pci_driver velocity_pci_driver = {
static struct platform_driver velocity_platform_driver = {
.probe = velocity_platform_probe,
- .remove_new = velocity_platform_remove,
+ .remove = velocity_platform_remove,
.driver = {
.name = "via-velocity",
.of_match_table = velocity_of_ids,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index a4cf682dca65..0ee73a265545 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -72,14 +72,6 @@ free_queue_irqs:
return err;
}
-static int txgbe_request_gpio_irq(struct txgbe *txgbe)
-{
- txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
- return request_threaded_irq(txgbe->gpio_irq, NULL,
- txgbe_gpio_irq_handler,
- IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
-}
-
static int txgbe_request_link_irq(struct txgbe *txgbe)
{
txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
@@ -149,11 +141,6 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
u32 eicr;
eicr = wx_misc_isb(wx, WX_ISB_MISC);
- if (eicr & TXGBE_PX_MISC_GPIO) {
- sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
- handle_nested_irq(sub_irq);
- nhandled++;
- }
if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
TXGBE_PX_MISC_ETH_AN)) {
sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
@@ -179,7 +166,6 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
- free_irq(txgbe->gpio_irq, txgbe);
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
@@ -191,7 +177,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
- txgbe->misc.nirqs = 2;
+ txgbe->misc.nirqs = 1;
txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
&txgbe_misc_irq_domain_ops, txgbe);
if (!txgbe->misc.domain)
@@ -216,20 +202,14 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto del_misc_irq;
- err = txgbe_request_gpio_irq(txgbe);
- if (err)
- goto free_msic_irq;
-
err = txgbe_request_link_irq(txgbe);
if (err)
- goto free_gpio_irq;
+ goto free_msic_irq;
wx->misc_irq_domain = true;
return 0;
-free_gpio_irq:
- free_irq(txgbe->gpio_irq, txgbe);
free_msic_irq:
free_irq(txgbe->misc.irq, txgbe);
del_misc_irq:
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 93180225a6f1..f77450268036 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -82,7 +82,6 @@ static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
- txgbe_reinit_gpio_intr(wx);
wx_control_hw(wx, true);
wx_configure_vectors(wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 67b61afdde96..1ae68f94dd49 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -122,7 +122,7 @@ static int txgbe_pcs_write(struct mii_bus *bus, int addr, int devnum, int regnum
static int txgbe_mdio_pcs_init(struct txgbe *txgbe)
{
struct mii_bus *mii_bus;
- struct dw_xpcs *xpcs;
+ struct phylink_pcs *pcs;
struct pci_dev *pdev;
struct wx *wx;
int ret = 0;
@@ -147,11 +147,11 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe)
if (ret)
return ret;
- xpcs = xpcs_create_mdiodev(mii_bus, 0, PHY_INTERFACE_MODE_10GBASER);
- if (IS_ERR(xpcs))
- return PTR_ERR(xpcs);
+ pcs = xpcs_create_pcs_mdiodev(mii_bus, 0);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
- txgbe->xpcs = xpcs;
+ txgbe->pcs = pcs;
return 0;
}
@@ -162,8 +162,8 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi
struct wx *wx = phylink_to_wx(config);
struct txgbe *txgbe = wx->priv;
- if (interface == PHY_INTERFACE_MODE_10GBASER)
- return &txgbe->xpcs->pcs;
+ if (wx->media_type != sp_media_copper)
+ return txgbe->pcs;
return NULL;
}
@@ -302,7 +302,7 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data)
status = rd32(wx, TXGBE_CFG_PORT_ST);
up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
- phylink_pcs_change(&txgbe->xpcs->pcs, up);
+ phylink_pcs_change(txgbe->pcs, up);
return IRQ_HANDLED;
}
@@ -358,169 +358,8 @@ static int txgbe_gpio_direction_out(struct gpio_chip *chip, unsigned int offset,
return 0;
}
-static void txgbe_gpio_irq_ack(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct wx *wx = gpiochip_get_data(gc);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- wr32(wx, WX_GPIO_EOI, BIT(hwirq));
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
-}
-
-static void txgbe_gpio_irq_mask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct wx *wx = gpiochip_get_data(gc);
- unsigned long flags;
-
- gpiochip_disable_irq(gc, hwirq);
-
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), BIT(hwirq));
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
-}
-
-static void txgbe_gpio_irq_unmask(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct wx *wx = gpiochip_get_data(gc);
- unsigned long flags;
-
- gpiochip_enable_irq(gc, hwirq);
-
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- wr32m(wx, WX_GPIO_INTMASK, BIT(hwirq), 0);
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
-}
-
-static void txgbe_toggle_trigger(struct gpio_chip *gc, unsigned int offset)
-{
- struct wx *wx = gpiochip_get_data(gc);
- u32 pol, val;
-
- pol = rd32(wx, WX_GPIO_POLARITY);
- val = rd32(wx, WX_GPIO_EXT);
-
- if (val & BIT(offset))
- pol &= ~BIT(offset);
- else
- pol |= BIT(offset);
-
- wr32(wx, WX_GPIO_POLARITY, pol);
-}
-
-static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct wx *wx = gpiochip_get_data(gc);
- u32 level, polarity, mask;
- unsigned long flags;
-
- mask = BIT(hwirq);
-
- if (type & IRQ_TYPE_LEVEL_MASK) {
- level = 0;
- irq_set_handler_locked(d, handle_level_irq);
- } else {
- level = mask;
- irq_set_handler_locked(d, handle_edge_irq);
- }
-
- if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
- polarity = mask;
- else
- polarity = 0;
-
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
-
- wr32m(wx, WX_GPIO_INTEN, mask, mask);
- wr32m(wx, WX_GPIO_INTTYPE_LEVEL, mask, level);
- if (type == IRQ_TYPE_EDGE_BOTH)
- txgbe_toggle_trigger(gc, hwirq);
- else
- wr32m(wx, WX_GPIO_POLARITY, mask, polarity);
-
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
-
- return 0;
-}
-
-static const struct irq_chip txgbe_gpio_irq_chip = {
- .name = "txgbe-gpio-irq",
- .irq_ack = txgbe_gpio_irq_ack,
- .irq_mask = txgbe_gpio_irq_mask,
- .irq_unmask = txgbe_gpio_irq_unmask,
- .irq_set_type = txgbe_gpio_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
-};
-
-irqreturn_t txgbe_gpio_irq_handler(int irq, void *data)
-{
- struct txgbe *txgbe = data;
- struct wx *wx = txgbe->wx;
- irq_hw_number_t hwirq;
- unsigned long gpioirq;
- struct gpio_chip *gc;
- unsigned long flags;
-
- gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
-
- gc = txgbe->gpio;
- for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
- int gpio = irq_find_mapping(gc->irq.domain, hwirq);
- struct irq_data *d = irq_get_irq_data(gpio);
- u32 irq_type = irq_get_trigger_type(gpio);
-
- txgbe_gpio_irq_ack(d);
- handle_nested_irq(gpio);
-
- if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- txgbe_toggle_trigger(gc, hwirq);
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-void txgbe_reinit_gpio_intr(struct wx *wx)
-{
- struct txgbe *txgbe = wx->priv;
- irq_hw_number_t hwirq;
- unsigned long gpioirq;
- struct gpio_chip *gc;
- unsigned long flags;
-
- /* for gpio interrupt pending before irq enable */
- gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
-
- gc = txgbe->gpio;
- for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
- int gpio = irq_find_mapping(gc->irq.domain, hwirq);
- struct irq_data *d = irq_get_irq_data(gpio);
- u32 irq_type = irq_get_trigger_type(gpio);
-
- txgbe_gpio_irq_ack(d);
-
- if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
- raw_spin_lock_irqsave(&wx->gpio_lock, flags);
- txgbe_toggle_trigger(gc, hwirq);
- raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
- }
- }
-}
-
static int txgbe_gpio_init(struct txgbe *txgbe)
{
- struct gpio_irq_chip *girq;
struct gpio_chip *gc;
struct device *dev;
struct wx *wx;
@@ -550,11 +389,6 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
gc->direction_input = txgbe_gpio_direction_in;
gc->direction_output = txgbe_gpio_direction_out;
- girq = &gc->irq;
- gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip);
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
-
ret = devm_gpiochip_add_data(dev, gc, wx);
if (ret)
return ret;
@@ -578,7 +412,7 @@ static int txgbe_clock_register(struct txgbe *txgbe)
if (IS_ERR(clk))
return PTR_ERR(clk);
- clock = clkdev_create(clk, NULL, clk_name);
+ clock = clkdev_create(clk, NULL, "%s", clk_name);
if (!clock) {
clk_unregister(clk);
return -ENOMEM;
@@ -778,7 +612,7 @@ err_unregister_clk:
err_destroy_phylink:
phylink_destroy(wx->phylink);
err_destroy_xpcs:
- xpcs_destroy(txgbe->xpcs);
+ xpcs_destroy_pcs(txgbe->pcs);
err_unregister_swnode:
software_node_unregister_node_group(txgbe->nodes.group);
@@ -798,6 +632,6 @@ void txgbe_remove_phy(struct txgbe *txgbe)
clkdev_drop(txgbe->clock);
clk_unregister(txgbe->clk);
phylink_destroy(txgbe->wx->phylink);
- xpcs_destroy(txgbe->xpcs);
+ xpcs_destroy_pcs(txgbe->pcs);
software_node_unregister_node_group(txgbe->nodes.group);
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 8a026d804fe2..3938985355ed 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -4,8 +4,6 @@
#ifndef _TXGBE_PHY_H_
#define _TXGBE_PHY_H_
-irqreturn_t txgbe_gpio_irq_handler(int irq, void *data);
-void txgbe_reinit_gpio_intr(struct wx *wx);
irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 959102c4c379..629a13e96b85 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -75,8 +75,7 @@
#define TXGBE_PX_MISC_IEN_MASK \
(TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \
TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \
- TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \
- TXGBE_PX_MISC_GPIO)
+ TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR)
/* Port cfg registers */
#define TXGBE_CFG_PORT_ST 0x14404
@@ -313,8 +312,7 @@ struct txgbe_nodes {
};
enum txgbe_misc_irqs {
- TXGBE_IRQ_GPIO = 0,
- TXGBE_IRQ_LINK,
+ TXGBE_IRQ_LINK = 0,
TXGBE_IRQ_MAX
};
@@ -329,13 +327,12 @@ struct txgbe {
struct wx *wx;
struct txgbe_nodes nodes;
struct txgbe_irq misc;
- struct dw_xpcs *xpcs;
+ struct phylink_pcs *pcs;
struct platform_device *sfp_dev;
struct platform_device *i2c_dev;
struct clk_lookup *clock;
struct clk *clk;
struct gpio_chip *gpio;
- unsigned int gpio_irq;
unsigned int link_irq;
/* flow director */
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index b26fd15c25ae..b77f096eaf99 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1271,6 +1271,6 @@ static struct platform_driver w5100_mmio_driver = {
.pm = &w5100_pm_ops,
},
.probe = w5100_mmio_probe,
- .remove_new = w5100_mmio_remove,
+ .remove = w5100_mmio_remove,
};
module_platform_driver(w5100_mmio_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index f165616f36fe..3e711dea3b2c 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -681,7 +681,7 @@ static struct platform_driver w5300_driver = {
.pm = &w5300_pm_ops,
},
.probe = w5300_probe,
- .remove_new = w5300_remove,
+ .remove = w5300_remove,
};
module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1072e2210aed..edb36ff07a0c 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1649,7 +1649,7 @@ MODULE_DEVICE_TABLE(of, temac_of_match);
static struct platform_driver temac_driver = {
.probe = temac_probe,
- .remove_new = temac_remove,
+ .remove = temac_remove,
.driver = {
.name = "xilinx_temac",
.of_match_table = temac_of_match,
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 1fcbcaa85ebd..0f4b02fe6f85 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2999,7 +2999,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
static struct platform_driver axienet_driver = {
.probe = axienet_probe,
- .remove_new = axienet_remove,
+ .remove = axienet_remove,
.shutdown = axienet_shutdown,
.driver = {
.name = "xilinx_axienet",
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 940452d0a4d2..ecf47107146d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -7,6 +7,7 @@
* Copyright (c) 2007 - 2013 Xilinx, Inc.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
@@ -1091,13 +1092,14 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
struct net_device *ndev = NULL;
struct net_local *lp = NULL;
struct device *dev = &ofdev->dev;
+ struct clk *clkin;
int rc = 0;
dev_info(dev, "Device Tree Probing\n");
/* Create an ethernet device instance */
- ndev = alloc_etherdev(sizeof(struct net_local));
+ ndev = devm_alloc_etherdev(dev, sizeof(struct net_local));
if (!ndev)
return -ENOMEM;
@@ -1110,15 +1112,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
/* Get IRQ for the device */
rc = platform_get_irq(ofdev, 0);
if (rc < 0)
- goto error;
+ return rc;
ndev->irq = rc;
lp->base_addr = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
- if (IS_ERR(lp->base_addr)) {
- rc = PTR_ERR(lp->base_addr);
- goto error;
- }
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
ndev->mem_start = res->start;
ndev->mem_end = res->end;
@@ -1129,6 +1129,11 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
+ clkin = devm_clk_get_optional_enabled(&ofdev->dev, NULL);
+ if (IS_ERR(clkin))
+ return dev_err_probe(&ofdev->dev, PTR_ERR(clkin),
+ "Failed to get and enable clock from Device Tree\n");
+
rc = of_get_ethdev_address(ofdev->dev.of_node, ndev);
if (rc) {
dev_warn(dev, "No MAC address found, using random\n");
@@ -1167,8 +1172,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
put_node:
of_node_put(lp->phy_node);
-error:
- free_netdev(ndev);
return rc;
}
@@ -1197,8 +1200,6 @@ static void xemaclite_of_remove(struct platform_device *of_dev)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
-
- free_netdev(ndev);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1257,7 +1258,7 @@ static struct platform_driver xemaclite_of_driver = {
.of_match_table = xemaclite_of_match,
},
.probe = xemaclite_of_probe,
- .remove_new = xemaclite_of_remove,
+ .remove = xemaclite_of_remove,
};
module_platform_driver(xemaclite_of_driver);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index aef316278eb4..a2ab1c150822 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1619,7 +1619,7 @@ static struct platform_driver ixp4xx_eth_driver = {
.of_match_table = of_match_ptr(ixp4xx_eth_of_match),
},
.probe = ixp4xx_eth_probe,
- .remove_new = ixp4xx_eth_remove,
+ .remove = ixp4xx_eth_remove,
};
module_platform_driver(ixp4xx_eth_driver);