summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/bpf/btf.rst13
-rw-r--r--Documentation/devicetree/bindings/net/engleder,tsnep.yaml79
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/networking/devlink/devlink-params.rst3
-rw-r--r--Documentation/networking/ethtool-netlink.rst10
-rw-r--r--Documentation/networking/net_failover.rst111
-rw-r--r--Documentation/networking/phy.rst5
-rw-r--r--arch/arm64/include/asm/extable.h9
-rw-r--r--arch/arm64/include/asm/memory.h5
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/arm64/mm/ptdump.c2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c7
-rw-r--r--arch/um/drivers/vector_kern.c4
-rw-r--r--drivers/base/regmap/regmap.c1
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/bfusb.c3
-rw-r--r--drivers/bluetooth/btintel.c22
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btmtk.c289
-rw-r--r--drivers/bluetooth/btmtk.h111
-rw-r--r--drivers/bluetooth/btmtksdio.c496
-rw-r--r--drivers/bluetooth/btsdio.c2
-rw-r--r--drivers/bluetooth/btusb.c390
-rw-r--r--drivers/bluetooth/hci_bcm.c1
-rw-r--r--drivers/bluetooth/hci_h4.c4
-rw-r--r--drivers/bluetooth/hci_vhci.c120
-rw-r--r--drivers/bluetooth/virtio_bt.c3
-rw-r--r--drivers/infiniband/hw/irdma/main.c3
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c4
-rw-r--r--drivers/net/dsa/ocelot/felix.c16
-rw-r--r--drivers/net/dsa/ocelot/felix.h4
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c893
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c8
-rw-r--r--drivers/net/dsa/qca8k.c653
-rw-r--r--drivers/net/dsa/qca8k.h198
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c4
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c4
-rw-r--r--drivers/net/ethernet/8390/hydra.c4
-rw-r--r--drivers/net/ethernet/8390/mac8390.c4
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c4
-rw-r--r--drivers/net/ethernet/8390/wd.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c8
-rw-r--r--drivers/net/ethernet/amd/a2065.c18
-rw-r--r--drivers/net/ethernet/amd/ariadne.c20
-rw-r--r--drivers/net/ethernet/amd/atarilance.c7
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/mvme147.c14
-rw-r--r--drivers/net/ethernet/amd/ni65.c8
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apple/macmace.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c8
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c18
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c98
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c8
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c8
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c107
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c11
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c8
-rw-r--r--drivers/net/ethernet/cortina/gemini.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c4
-rw-r--r--drivers/net/ethernet/engleder/Kconfig38
-rw-r--r--drivers/net/ethernet/engleder/Makefile10
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h189
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c293
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h230
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c1272
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c221
-rw-r--r--drivers/net/ethernet/engleder/tsnep_selftests.c811
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c443
-rw-r--r--drivers/net/ethernet/ethoc.c8
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c14
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c109
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c53
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c116
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c77
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c40
-rw-r--r--drivers/net/ethernet/i825xx/82596.c3
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c10
-rw-r--r--drivers/net/ethernet/intel/e100.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c323
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c10
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c50
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c92
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c220
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c27
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c75
-rw-r--r--drivers/net/ethernet/microsoft/mana/Makefile2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h13
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c162
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c69
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c173
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h13
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c84
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c103
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c7
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c27
-rw-r--r--drivers/net/ethernet/neterion/s2io.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c12
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c8
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c8
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c53
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c14
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c8
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c12
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c6
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c14
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c14
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c156
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c12
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c7
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h8
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c85
-rw-r--r--drivers/net/fddi/skfp/smt.c14
-rw-r--r--drivers/net/geneve.c9
-rw-r--r--drivers/net/hyperv/netvsc.c10
-rw-r--r--drivers/net/hyperv/netvsc_drv.c8
-rw-r--r--drivers/net/ipa/gsi.c114
-rw-r--r--drivers/net/ipa/gsi.h21
-rw-r--r--drivers/net/ipa/gsi_reg.h4
-rw-r--r--drivers/net/ipa/ipa_endpoint.c93
-rw-r--r--drivers/net/ipa/ipa_main.c6
-rw-r--r--drivers/net/ipa/ipa_mem.c4
-rw-r--r--drivers/net/ipa/ipa_modem.c10
-rw-r--r--drivers/net/ipa/ipa_modem.h3
-rw-r--r--drivers/net/ipa/ipa_table.c48
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c8
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/mctp/Kconfig18
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-serial.c515
-rw-r--r--drivers/net/netdevsim/ethtool.c8
-rw-r--r--drivers/net/phy/dp83869.c42
-rw-r--r--drivers/net/phy/mdio_bus.c16
-rw-r--r--drivers/net/phy/phylink.c366
-rw-r--r--drivers/net/usb/ax88179_178a.c17
-rw-r--r--drivers/net/usb/lan78xx.c1214
-rw-r--r--drivers/net/usb/r8152.c8
-rw-r--r--drivers/net/veth.c31
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vxlan.c8
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c62
-rw-r--r--drivers/net/wireguard/queueing.h4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c6
-rw-r--r--drivers/net/wwan/iosm/Makefile3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c13
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c31
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h9
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c173
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.h51
-rw-r--r--drivers/net/wwan/wwan_core.c30
-rw-r--r--drivers/nfc/fdp/i2c.c4
-rw-r--r--drivers/pcmcia/pcmcia_cis.c3
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c9
-rw-r--r--drivers/s390/net/ctcm_dbug.h1
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/lcs.c11
-rw-r--r--drivers/s390/net/qeth_core_main.c35
-rw-r--r--drivers/s390/net/qeth_ethtool.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c2
-rw-r--r--include/asm-generic/sections.h14
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/btf_ids.h20
-rw-r--r--include/linux/ethtool.h26
-rw-r--r--include/linux/filter.h7
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/net/intel/iidc.h7
-rw-r--r--include/linux/netdevice.h451
-rw-r--r--include/linux/once.h2
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/phylink.h38
-rw-r--r--include/linux/regmap.h7
-rw-r--r--include/linux/siphash.h2
-rw-r--r--include/linux/skbuff.h55
-rw-r--r--include/linux/skmsg.h6
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/wwan.h2
-rw-r--r--include/net/arp.h8
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h27
-rw-r--r--include/net/bluetooth/hci_sync.h102
-rw-r--r--include/net/devlink.h4
-rw-r--r--include/net/gro.h421
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/inet_sock.h12
-rw-r--r--include/net/ip.h9
-rw-r--r--include/net/ip6_checksum.h20
-rw-r--r--include/net/ip6_route.h18
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/iucv/af_iucv.h10
-rw-r--r--include/net/ndisc.h16
-rw-r--r--include/net/neighbour.h11
-rw-r--r--include/net/netns/core.h1
-rw-r--r--include/net/sctp/structs.h7
-rw-r--r--include/net/sock.h84
-rw-r--r--include/net/tcp.h18
-rw-r--r--include/net/udp.h24
-rw-r--r--include/soc/mscc/ocelot.h59
-rw-r--r--include/soc/mscc/ocelot_ana.h10
-rw-r--r--include/soc/mscc/ocelot_vcap.h1
-rw-r--r--include/uapi/linux/bpf.h21
-rw-r--r--include/uapi/linux/btf.h3
-rw-r--r--include/uapi/linux/ethtool.h1
-rw-r--r--include/uapi/linux/ethtool_netlink.h1
-rw-r--r--include/uapi/linux/tty.h1
-rw-r--r--kernel/bpf/bpf_task_storage.c4
-rw-r--r--kernel/bpf/btf.c19
-rw-r--r--kernel/bpf/mmap_unlock_work.h65
-rw-r--r--kernel/bpf/stackmap.c82
-rw-r--r--kernel/bpf/task_iter.c82
-rw-r--r--kernel/bpf/verifier.c34
-rw-r--r--kernel/trace/bpf_trace.c6
-rw-r--r--net/802/hippi.c2
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/Kconfig5
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bluetooth/aosp.c168
-rw-r--r--net/bluetooth/aosp.h13
-rw-r--r--net/bluetooth/cmtp/core.c4
-rw-r--r--net/bluetooth/hci_codec.c18
-rw-r--r--net/bluetooth/hci_conn.c20
-rw-r--r--net/bluetooth/hci_core.c1334
-rw-r--r--net/bluetooth/hci_event.c211
-rw-r--r--net/bluetooth/hci_request.c500
-rw-r--r--net/bluetooth/hci_request.h15
-rw-r--r--net/bluetooth/hci_sock.c11
-rw-r--r--net/bluetooth/hci_sync.c4922
-rw-r--r--net/bluetooth/hci_sysfs.c2
-rw-r--r--net/bluetooth/l2cap_sock.c19
-rw-r--r--net/bluetooth/mgmt.c2153
-rw-r--r--net/bluetooth/mgmt_util.c15
-rw-r--r--net/bluetooth/mgmt_util.h4
-rw-r--r--net/bluetooth/msft.c511
-rw-r--r--net/bluetooth/msft.h15
-rw-r--r--net/bridge/br_if.c12
-rw-r--r--net/bridge/br_sysfs_br.c7
-rw-r--r--net/core/Makefile4
-rw-r--r--net/core/dev.c697
-rw-r--r--net/core/dev_addr_lists.c93
-rw-r--r--net/core/dev_addr_lists_test.c236
-rw-r--r--net/core/devlink.c5
-rw-r--r--net/core/filter.c23
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/gro.c766
-rw-r--r--net/core/net-sysfs.c16
-rw-r--r--net/core/rtnetlink.c7
-rw-r--r--net/core/secure_seq.c4
-rw-r--r--net/core/skbuff.c162
-rw-r--r--net/core/sock.c61
-rw-r--r--net/dccp/proto.c27
-rw-r--r--net/dccp/trace.h4
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/ethtool/common.c1
-rw-r--r--net/ethtool/ioctl.c11
-rw-r--r--net/ethtool/netlink.h2
-rw-r--r--net/ethtool/rings.c32
-rw-r--r--net/ethtool/stats.c15
-rw-r--r--net/ieee802154/socket.c4
-rw-r--r--net/ipv4/af_inet.c29
-rw-r--r--net/ipv4/arp.c33
-rw-r--r--net/ipv4/esp4_offload.c1
-rw-r--r--net/ipv4/fou.c26
-rw-r--r--net/ipv4/gre_offload.c13
-rw-r--r--net/ipv4/igmp.c1
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ping.c14
-rw-r--r--net/ipv4/raw.c15
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp.c91
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_offload.c1
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv4/udp_offload.c32
-rw-r--r--net/ipv6/af_inet6.c7
-rw-r--r--net/ipv6/ah6.c5
-rw-r--r--net/ipv6/esp6_offload.c1
-rw-r--r--net/ipv6/exthdrs.c1
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/ip6_offload.c14
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c17
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c10
-rw-r--r--net/ipv6/tcpv6_offload.c1
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/iucv/af_iucv.c40
-rw-r--r--net/iucv/iucv.c124
-rw-r--r--net/mac80211/ethtool.c8
-rw-r--r--net/mctp/test/route-test.c5
-rw-r--r--net/mptcp/protocol.c2
-rw-r--r--net/mptcp/sockopt.c106
-rw-r--r--net/mptcp/subflow.c7
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rds/send.c2
-rw-r--r--net/sched/sch_generic.c69
-rw-r--r--net/sched/sch_netem.c18
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c3
-rw-r--r--net/sctp/sm_statefuns.c11
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/sctp/transport.c26
-rw-r--r--net/smc/af_smc.c2
-rw-r--r--net/tipc/crypto.c19
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/xdp/xsk.c4
-rw-r--r--tools/bpf/bpftool/.gitignore2
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile3
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst12
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-link.rst2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst66
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst6
-rw-r--r--tools/bpf/bpftool/Documentation/common_options.rst9
-rw-r--r--tools/bpf/bpftool/Makefile19
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool5
-rw-r--r--tools/bpf/bpftool/btf.c13
-rw-r--r--tools/bpf/bpftool/btf_dumper.c42
-rw-r--r--tools/bpf/bpftool/common.c1
-rw-r--r--tools/bpf/bpftool/feature.c2
-rw-r--r--tools/bpf/bpftool/gen.c12
-rw-r--r--tools/bpf/bpftool/iter.c7
-rw-r--r--tools/bpf/bpftool/main.c13
-rw-r--r--tools/bpf/bpftool/main.h3
-rw-r--r--tools/bpf/bpftool/map.c13
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c9
-rw-r--r--tools/bpf/bpftool/prog.c214
-rw-r--r--tools/bpf/bpftool/struct_ops.c16
-rw-r--r--tools/bpf/runqslower/runqslower.c6
-rw-r--r--tools/include/uapi/linux/bpf.h21
-rw-r--r--tools/include/uapi/linux/btf.h3
-rw-r--r--tools/lib/bpf/Makefile1
-rw-r--r--tools/lib/bpf/bpf.c166
-rw-r--r--tools/lib/bpf/bpf.h74
-rw-r--r--tools/lib/bpf/bpf_gen_internal.h8
-rw-r--r--tools/lib/bpf/btf.c69
-rw-r--r--tools/lib/bpf/btf.h80
-rw-r--r--tools/lib/bpf/btf_dump.c40
-rw-r--r--tools/lib/bpf/gen_loader.c33
-rw-r--r--tools/lib/bpf/libbpf.c376
-rw-r--r--tools/lib/bpf/libbpf.h102
-rw-r--r--tools/lib/bpf/libbpf.map13
-rw-r--r--tools/lib/bpf/libbpf_common.h14
-rw-r--r--tools/lib/bpf/libbpf_internal.h33
-rw-r--r--tools/lib/bpf/libbpf_legacy.h1
-rw-r--r--tools/lib/bpf/libbpf_probes.c20
-rw-r--r--tools/lib/bpf/linker.c4
-rw-r--r--tools/lib/bpf/xsk.c34
-rw-r--r--tools/testing/selftests/bpf/Makefile71
-rw-r--r--tools/testing/selftests/bpf/README.rst9
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c17
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_ringbufs.c8
-rw-r--r--tools/testing/selftests/bpf/btf_helpers.c17
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h3
-rw-r--r--tools/testing/selftests/bpf/get_cgroup_id_user.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c207
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c41
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_split.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_tag.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_write.c67
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/exhandler.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c33
-rw-r--r--tools/testing/selftests/bpf/prog_tests/find_vma.c117
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_func_args.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/l4lb_all.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lock.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_buffer.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_access.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_md_access.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/queue_stack_map.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/signal_pending.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_lookup.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_ctx.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spinlock.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_estats.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_attach.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_info.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_perf.c2
-rw-r--r--tools/testing/selftests/bpf/progs/btf_decl_tag.c (renamed from tools/testing/selftests/bpf/progs/tag.c)4
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag.c25
-rw-r--r--tools/testing/selftests/bpf/progs/exhandler_kern.c43
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c2
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma.c69
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma_fail1.c29
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma_fail2.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lock.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_queue_stack_map.h2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c2
-rw-r--r--tools/testing/selftests/bpf/test_btf.h3
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c3
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c3
-rw-r--r--tools/testing/selftests/bpf/test_lirc_mode2_user.c6
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c9
-rw-r--r--tools/testing/selftests/bpf/test_maps.c7
-rw-r--r--tools/testing/selftests/bpf/test_sock.c23
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c13
-rw-r--r--tools/testing/selftests/bpf/test_stub.c44
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c23
-rw-r--r--tools/testing/selftests/bpf/test_tag.c3
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c7
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c38
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c60
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h6
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c32
-rw-r--r--tools/testing/selftests/bpf/xdping.c3
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.c2
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh40
-rw-r--r--tools/testing/selftests/net/mptcp/config8
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c51
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh80
-rw-r--r--tools/testing/selftests/net/nettest.c33
607 files changed, 23152 insertions, 9598 deletions
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
index 9ad4218a751f..d0ec40d00c28 100644
--- a/Documentation/bpf/btf.rst
+++ b/Documentation/bpf/btf.rst
@@ -86,6 +86,7 @@ sequentially and type id is assigned to each recognized type starting from id
#define BTF_KIND_DATASEC 15 /* Section */
#define BTF_KIND_FLOAT 16 /* Floating point */
#define BTF_KIND_DECL_TAG 17 /* Decl Tag */
+ #define BTF_KIND_TYPE_TAG 18 /* Type Tag */
Note that the type section encodes debug info, not just pure types.
``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
@@ -107,7 +108,7 @@ Each type contains the following common data::
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
- * FUNC, FUNC_PROTO and DECL_TAG.
+ * FUNC, FUNC_PROTO, DECL_TAG and TYPE_TAG.
* "type" is a type_id referring to another type.
*/
union {
@@ -492,6 +493,16 @@ the attribute is applied to a ``struct``/``union`` member or
a ``func`` argument, and ``btf_decl_tag.component_idx`` should be a
valid index (starting from 0) pointing to a member or an argument.
+2.2.17 BTF_KIND_TYPE_TAG
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: offset to a non-empty string
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_TYPE_TAG
+ * ``info.vlen``: 0
+ * ``type``: the type with ``btf_type_tag`` attribute
+
3. BTF Kernel API
*****************
diff --git a/Documentation/devicetree/bindings/net/engleder,tsnep.yaml b/Documentation/devicetree/bindings/net/engleder,tsnep.yaml
new file mode 100644
index 000000000000..d0e1476e15b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/engleder,tsnep.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/engleder,tsnep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TSN endpoint Ethernet MAC binding
+
+maintainers:
+ - Gerhard Engleder <gerhard@engleder-embedded.com>
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ const: engleder,tsnep
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ local-mac-address: true
+
+ mac-address: true
+
+ nvmem-cells: true
+
+ nvmem-cells-names: true
+
+ phy-connection-type:
+ enum:
+ - mii
+ - gmii
+ - rgmii
+ - rgmii-id
+
+ phy-mode: true
+
+ phy-handle: true
+
+ mdio:
+ type: object
+ $ref: "mdio.yaml#"
+ description: optional node for embedded MDIO controller
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ axi {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ tnsep0: ethernet@a0000000 {
+ compatible = "engleder,tsnep";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ interrupts = <0 89 1>;
+ interrupt-parent = <&gic>;
+ local-mac-address = [00 00 00 00 00 00];
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ suppress-preamble;
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ rxc-skew-ps = <1080>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 66d6432fd781..5f4ca46bfb13 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -379,6 +379,8 @@ patternProperties:
description: Silicon Laboratories (formerly Energy Micro AS)
"^engicam,.*":
description: Engicam S.r.l.
+ "^engleder,.*":
+ description: Engleder
"^epcos,.*":
description: EPCOS AG
"^epfl,.*":
diff --git a/Documentation/networking/devlink/devlink-params.rst b/Documentation/networking/devlink/devlink-params.rst
index 4878907e9232..b7dfe693a332 100644
--- a/Documentation/networking/devlink/devlink-params.rst
+++ b/Documentation/networking/devlink/devlink-params.rst
@@ -109,6 +109,9 @@ own name.
- Boolean
- When enabled, the device driver will instantiate VDPA networking
specific auxiliary device of the devlink device.
+ * - ``enable_iwarp``
+ - Boolean
+ - Enable handling of iWARP traffic in the device.
* - ``internal_err_reset``
- Boolean
- When enabled, the device driver will reset the device on internal
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 7b598c7e3912..9d98e0511249 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -849,7 +849,7 @@ Request contents:
Kernel response contents:
- ==================================== ====== ==========================
+ ==================================== ====== ===========================
``ETHTOOL_A_RINGS_HEADER`` nested reply header
``ETHTOOL_A_RINGS_RX_MAX`` u32 max size of RX ring
``ETHTOOL_A_RINGS_RX_MINI_MAX`` u32 max size of RX mini ring
@@ -859,7 +859,8 @@ Kernel response contents:
``ETHTOOL_A_RINGS_RX_MINI`` u32 size of RX mini ring
``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring
``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
- ==================================== ====== ==========================
+ ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
+ ==================================== ====== ===========================
RINGS_SET
@@ -869,13 +870,14 @@ Sets ring sizes like ``ETHTOOL_SRINGPARAM`` ioctl request.
Request contents:
- ==================================== ====== ==========================
+ ==================================== ====== ===========================
``ETHTOOL_A_RINGS_HEADER`` nested reply header
``ETHTOOL_A_RINGS_RX`` u32 size of RX ring
``ETHTOOL_A_RINGS_RX_MINI`` u32 size of RX mini ring
``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring
``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
- ==================================== ====== ==========================
+ ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
+ ==================================== ====== ===========================
Kernel checks that requested ring sizes do not exceed limits reported by
driver. Driver may impose additional constraints and may not suspport all
diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst
index e143ab79a960..3a662f2b4d6e 100644
--- a/Documentation/networking/net_failover.rst
+++ b/Documentation/networking/net_failover.rst
@@ -35,7 +35,7 @@ To support this, the hypervisor needs to enable VIRTIO_NET_F_STANDBY
feature on the virtio-net interface and assign the same MAC address to both
virtio-net and VF interfaces.
-Here is an example XML snippet that shows such configuration.
+Here is an example libvirt XML snippet that shows such configuration:
::
<interface type='network'>
@@ -45,18 +45,32 @@ Here is an example XML snippet that shows such configuration.
<model type='virtio'/>
<driver name='vhost' queues='4'/>
<link state='down'/>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
+ <teaming type='persistent'/>
+ <alias name='ua-backup0'/>
</interface>
<interface type='hostdev' managed='yes'>
<mac address='52:54:00:00:12:53'/>
<source>
<address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
</source>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
+ <teaming type='transient' persistent='ua-backup0'/>
</interface>
+In this configuration, the first device definition is for the virtio-net
+interface and this acts as the 'persistent' device indicating that this
+interface will always be plugged in. This is specified by the 'teaming' tag with
+required attribute type having value 'persistent'. The link state for the
+virtio-net device is set to 'down' to ensure that the 'failover' netdev prefers
+the VF passthrough device for normal communication. The virtio-net device will
+be brought UP during live migration to allow uninterrupted communication.
+
+The second device definition is for the VF passthrough interface. Here the
+'teaming' tag is provided with type 'transient' indicating that this device may
+periodically be unplugged. A second attribute - 'persistent' is provided and
+points to the alias name declared for the virtio-net device.
+
Booting a VM with the above configuration will result in the following 3
-netdevs created in the VM.
+interfaces created in the VM:
::
4: ens10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
@@ -65,13 +79,36 @@ netdevs created in the VM.
valid_lft 42482sec preferred_lft 42482sec
inet6 fe80::97d8:db2:8c10:b6d6/64 scope link
valid_lft forever preferred_lft forever
- 5: ens10nsby: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000
+ 5: ens10nsby: <BROADCAST,MULTICAST> mtu 1500 qdisc fq_codel master ens10 state DOWN group default qlen 1000
link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
7: ens11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ens10 state UP group default qlen 1000
link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
-ens10 is the 'failover' master netdev, ens10nsby and ens11 are the slave
-'standby' and 'primary' netdevs respectively.
+Here, ens10 is the 'failover' master interface, ens10nsby is the slave 'standby'
+virtio-net interface, and ens11 is the slave 'primary' VF passthrough interface.
+
+One point to note here is that some user space network configuration daemons
+like systemd-networkd, ifupdown, etc, do not understand the 'net_failover'
+device; and on the first boot, the VM might end up with both 'failover' device
+and VF accquiring IP addresses (either same or different) from the DHCP server.
+This will result in lack of connectivity to the VM. So some tweaks might be
+needed to these network configuration daemons to make sure that an IP is
+received only on the 'failover' device.
+
+Below is the patch snippet used with 'cloud-ifupdown-helper' script found on
+Debian cloud images:
+
+::
+ @@ -27,6 +27,8 @@ do_setup() {
+ local working="$cfgdir/.$INTERFACE"
+ local final="$cfgdir/$INTERFACE"
+
+ + if [ -d "/sys/class/net/${INTERFACE}/master" ]; then exit 0; fi
+ +
+ if ifup --no-act "$INTERFACE" > /dev/null 2>&1; then
+ # interface is already known to ifupdown, no need to generate cfg
+ log "Skipping configuration generation for $INTERFACE"
+
Live Migration of a VM with SR-IOV VF & virtio-net in STANDBY mode
==================================================================
@@ -80,40 +117,68 @@ net_failover also enables hypervisor controlled live migration to be supported
with VMs that have direct attached SR-IOV VF devices by automatic failover to
the paravirtual datapath when the VF is unplugged.
-Here is a sample script that shows the steps to initiate live migration on
-the source hypervisor.
+Here is a sample script that shows the steps to initiate live migration from
+the source hypervisor. Note: It is assumed that the VM is connected to a
+software bridge 'br0' which has a single VF attached to it along with the vnet
+device to the VM. This is not the VF that was passthrough'd to the VM (seen in
+the vf.xml file).
::
- # cat vf_xml
+ # cat vf.xml
<interface type='hostdev' managed='yes'>
<mac address='52:54:00:00:12:53'/>
<source>
<address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
</source>
- <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
+ <teaming type='transient' persistent='ua-backup0'/>
</interface>
- # Source Hypervisor
+ # Source Hypervisor migrate.sh
#!/bin/bash
- DOMAIN=fedora27-tap01
- PF=enp66s0f0
- VF_NUM=5
- TAP_IF=tap01
- VF_XML=
+ DOMAIN=vm-01
+ PF=ens6np0
+ VF=ens6v1 # VF attached to the bridge.
+ VF_NUM=1
+ TAP_IF=vmtap01 # virtio-net interface in the VM.
+ VF_XML=vf.xml
MAC=52:54:00:00:12:53
ZERO_MAC=00:00:00:00:00:00
+ # Set the virtio-net interface up.
virsh domif-setlink $DOMAIN $TAP_IF up
- bridge fdb del $MAC dev $PF master
- virsh detach-device $DOMAIN $VF_XML
+
+ # Remove the VF that was passthrough'd to the VM.
+ virsh detach-device --live --config $DOMAIN $VF_XML
+
ip link set $PF vf $VF_NUM mac $ZERO_MAC
- virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system
+ # Add FDB entry for traffic to continue going to the VM via
+ # the VF -> br0 -> vnet interface path.
+ bridge fdb add $MAC dev $VF
+ bridge fdb add $MAC dev $TAP_IF master
+
+ # Migrate the VM
+ virsh migrate --live --persistent $DOMAIN qemu+ssh://$REMOTE_HOST/system
+
+ # Clean up FDB entries after migration completes.
+ bridge fdb del $MAC dev $VF
+ bridge fdb del $MAC dev $TAP_IF master
- # Destination Hypervisor
+On the destination hypervisor, a shared bridge 'br0' is created before migration
+starts, and a VF from the destination PF is added to the bridge. Similarly an
+appropriate FDB entry is added.
+
+The following script is executed on the destination hypervisor once migration
+completes, and it reattaches the VF to the VM and brings down the virtio-net
+interface.
+
+::
+ # reattach-vf.sh
#!/bin/bash
- virsh attach-device $DOMAIN $VF_XML
- virsh domif-setlink $DOMAIN $TAP_IF down
+ bridge fdb del 52:54:00:00:12:53 dev ens36v0
+ bridge fdb del 52:54:00:00:12:53 dev vmtap01 master
+ virsh attach-device --config --live vm01 vf.xml
+ virsh domif-setlink vm01 vmtap01 down
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index 571ba08386e7..d43da709bf40 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -237,6 +237,11 @@ negotiation results.
Some of the interface modes are described below:
+``PHY_INTERFACE_MODE_SMII``
+ This is serial MII, clocked at 125MHz, supporting 100M and 10M speeds.
+ Some details can be found in
+ https://opencores.org/ocsvn/smii/smii/trunk/doc/SMII.pdf
+
``PHY_INTERFACE_MODE_1000BASEX``
This defines the 1000BASE-X single-lane serdes link as defined by the
802.3 standard section 36. The link operates at a fixed bit rate of
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 8b300dd28def..72b0e71cc3de 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -33,15 +33,6 @@ do { \
(b)->data = (tmp).data; \
} while (0)
-static inline bool in_bpf_jit(struct pt_regs *regs)
-{
- if (!IS_ENABLED(CONFIG_BPF_JIT))
- return false;
-
- return regs->pc >= BPF_JIT_REGION_START &&
- regs->pc < BPF_JIT_REGION_END;
-}
-
#ifdef CONFIG_BPF_JIT
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 1b9a1e242612..0af70d9abede 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -44,11 +44,8 @@
#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
#define KIMAGE_VADDR (MODULES_END)
-#define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN))
-#define BPF_JIT_REGION_SIZE (SZ_128M)
-#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
-#define MODULES_VADDR (BPF_JIT_REGION_END)
+#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN))
#define MODULES_VSIZE (SZ_128M)
#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 7b21213a570f..e8986e6067a9 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -994,7 +994,7 @@ static struct break_hook bug_break_hook = {
static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
{
pr_err("%s generated an invalid instruction at %pS!\n",
- in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
+ "Kernel text patching",
(void *)instruction_pointer(regs));
/* We cannot handle this */
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 1c403536c9bb..9bc4066c5bf3 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -41,8 +41,6 @@ static struct addr_marker address_markers[] = {
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" },
#endif
- { BPF_JIT_REGION_START, "BPF start" },
- { BPF_JIT_REGION_END, "BPF end" },
{ MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" },
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 3a8a7140a9bf..86c9dc0681cc 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1141,15 +1141,12 @@ out:
u64 bpf_jit_alloc_exec_limit(void)
{
- return BPF_JIT_REGION_SIZE;
+ return VMALLOC_END - VMALLOC_START;
}
void *bpf_jit_alloc_exec(unsigned long size)
{
- return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
- BPF_JIT_REGION_END, GFP_KERNEL,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
+ return vmalloc(size);
}
void bpf_jit_free_exec(void *addr)
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index cde6db184c26..4fc1a5d70dcf 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1441,7 +1441,9 @@ flash_fail:
}
static void vector_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct vector_private *vp = netdev_priv(netdev);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 21a0c2562ec0..2d74f9f82aa9 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -876,6 +876,7 @@ struct regmap *__regmap_init(struct device *dev,
if (!bus) {
map->reg_read = config->reg_read;
map->reg_write = config->reg_write;
+ map->reg_update_bits = config->reg_update_bits;
map->defer_caching = false;
goto skip_format_initialization;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 851842372c9b..36380e618ba4 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -19,6 +19,10 @@ config BT_QCA
tristate
select FW_LOADER
+config BT_MTK
+ tristate
+ select FW_LOADER
+
config BT_HCIBTUSB
tristate "HCI USB driver"
depends on USB
@@ -55,6 +59,7 @@ config BT_HCIBTUSB_BCM
config BT_HCIBTUSB_MTK
bool "MediaTek protocol support"
depends on BT_HCIBTUSB
+ select BT_MTK
default n
help
The MediaTek protocol support enables firmware download
@@ -383,6 +388,7 @@ config BT_ATH3K
config BT_MTKSDIO
tristate "MediaTek HCI SDIO driver"
depends on MMC
+ select BT_MTK
help
MediaTek Bluetooth HCI SDIO driver.
This driver is required if you want to use MediaTek Bluetooth
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 16286ea2655d..3321a8aea4a0 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
obj-$(CONFIG_BT_QCA) += btqca.o
+obj-$(CONFIG_BT_MTK) += btmtk.o
obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 5a321b4076aa..cab93935cc7f 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -628,6 +628,9 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress;
data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize);
+ if (!data->bulk_pkt_size)
+ goto done;
+
rwlock_init(&data->lock);
data->reassembly = NULL;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 9359bff47296..8f9109b40961 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -2081,14 +2081,16 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
if (ver->img_type == 0x03) {
btintel_clear_flag(hdev, INTEL_BOOTLOADER);
btintel_check_bdaddr(hdev);
- }
-
- /* If the OTP has no valid Bluetooth device address, then there will
- * also be no valid address for the operational firmware.
- */
- if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) {
- bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ } else {
+ /*
+ * Check for valid bd address in boot loader mode. Device
+ * will be marked as unconfigured if empty bd address is
+ * found.
+ */
+ if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) {
+ bt_dev_info(hdev, "No device address configured");
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ }
}
btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
@@ -2466,6 +2468,10 @@ static int btintel_setup_combined(struct hci_dev *hdev)
goto exit_error;
}
+ /* memset ver_tlv to start with clean state as few fields are exclusive
+ * to bootloader mode and are not populated in operational mode
+ */
+ memset(&ver_tlv, 0, sizeof(ver_tlv));
/* For TLV type device, parse the tlv data */
err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
if (err) {
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 5ccbe4d459d0..181338f60530 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -1,4 +1,4 @@
-/**
+/*
* Marvell Bluetooth driver
*
* Copyright (C) 2009, Marvell International Ltd.
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
new file mode 100644
index 000000000000..c2ee5c4b975a
--- /dev/null
+++ b/drivers/bluetooth/btmtk.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2021 MediaTek Inc.
+ *
+ */
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btmtk.h"
+
+#define VERSION "0.1"
+
+/* It is for mt79xx download rom patch*/
+#define MTK_FW_ROM_PATCH_HEADER_SIZE 32
+#define MTK_FW_ROM_PATCH_GD_SIZE 64
+#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64
+#define MTK_SEC_MAP_COMMON_SIZE 12
+#define MTK_SEC_MAP_NEED_SEND_SIZE 52
+
+struct btmtk_patch_header {
+ u8 datetime[16];
+ u8 platform[4];
+ __le16 hwver;
+ __le16 swver;
+ __le32 magicnum;
+} __packed;
+
+struct btmtk_global_desc {
+ __le32 patch_ver;
+ __le32 sub_sys;
+ __le32 feature_opt;
+ __le32 section_num;
+} __packed;
+
+struct btmtk_section_map {
+ __le32 sectype;
+ __le32 secoffset;
+ __le32 secsize;
+ union {
+ __le32 u4SecSpec[13];
+ struct {
+ __le32 dlAddr;
+ __le32 dlsize;
+ __le32 seckeyidx;
+ __le32 alignlen;
+ __le32 sectype;
+ __le32 dlmodecrctype;
+ __le32 crc;
+ __le32 reserved[6];
+ } bin_info_spec;
+ };
+} __packed;
+
+int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ struct btmtk_global_desc *globaldesc = NULL;
+ struct btmtk_section_map *sectionmap;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ const u8 *fw_bin_ptr;
+ int err, dlen, i, status;
+ u8 flag, first_block, retry;
+ u32 section_num, dl_size, section_offset;
+ u8 cmd[64];
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_bin_ptr = fw_ptr;
+ globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
+ section_num = le32_to_cpu(globaldesc->section_num);
+
+ for (i = 0; i < section_num; i++) {
+ first_block = 1;
+ fw_ptr = fw_bin_ptr;
+ sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
+ MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
+
+ section_offset = le32_to_cpu(sectionmap->secoffset);
+ dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize);
+
+ if (dl_size > 0) {
+ retry = 20;
+ while (retry > 0) {
+ cmd[0] = 0; /* 0 means legacy dl mode. */
+ memcpy(cmd + 1,
+ fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
+ MTK_FW_ROM_PATCH_GD_SIZE +
+ MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i +
+ MTK_SEC_MAP_COMMON_SIZE,
+ MTK_SEC_MAP_NEED_SEND_SIZE + 1);
+
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = &status;
+ wmt_params.flag = 0;
+ wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1;
+ wmt_params.data = &cmd;
+
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ if (status == BTMTK_WMT_PATCH_UNDONE) {
+ break;
+ } else if (status == BTMTK_WMT_PATCH_PROGRESS) {
+ msleep(100);
+ retry--;
+ } else if (status == BTMTK_WMT_PATCH_DONE) {
+ goto next_section;
+ } else {
+ bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
+ status);
+ goto err_release_fw;
+ }
+ }
+
+ fw_ptr += section_offset;
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (dl_size > 0) {
+ dlen = min_t(int, 250, dl_size);
+ if (first_block == 1) {
+ flag = 1;
+ first_block = 0;
+ } else if (dl_size - dlen <= 0) {
+ flag = 3;
+ } else {
+ flag = 2;
+ }
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ dl_size -= dlen;
+ fw_ptr += dlen;
+ }
+ }
+next_section:
+ continue;
+ }
+ /* Wait a few moments for firmware activation done */
+ usleep_range(100000, 120000);
+
+err_release_fw:
+ release_firmware(fw);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btmtk_setup_firmware_79xx);
+
+int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+ u8 flag, param;
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ /* Power on data RAM the firmware relies on. */
+ param = 1;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 3;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
+ goto err_release_fw;
+ }
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ /* The size of patch header is 30 bytes, should be skip */
+ if (fw_size < 30) {
+ err = -EINVAL;
+ goto err_release_fw;
+ }
+
+ fw_size -= 30;
+ fw_ptr += 30;
+ flag = 1;
+
+ wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (fw_size > 0) {
+ dlen = min_t(int, 250, fw_size);
+
+ /* Tell device the position in sequence */
+ if (fw_size - dlen <= 0)
+ flag = 3;
+ else if (fw_size < fw->size - 30)
+ flag = 2;
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto err_release_fw;
+ }
+
+ fw_size -= dlen;
+ fw_ptr += dlen;
+ }
+
+ wmt_params.op = BTMTK_WMT_RST;
+ wmt_params.flag = 4;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = NULL;
+
+ /* Activate funciton the firmware providing to */
+ err = wmt_cmd_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ goto err_release_fw;
+ }
+
+ /* Wait a few moments for firmware activation done */
+ usleep_range(10000, 12000);
+
+err_release_fw:
+ release_firmware(fw);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btmtk_setup_firmware);
+
+int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ long ret;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1a, 6, bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
+ ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmtk_set_bdaddr);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
+MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7663);
+MODULE_FIRMWARE(FIRMWARE_MT7668);
+MODULE_FIRMWARE(FIRMWARE_MT7961);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
new file mode 100644
index 000000000000..6e7b0c7567c0
--- /dev/null
+++ b/drivers/bluetooth/btmtk.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2021 MediaTek Inc. */
+
+#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
+#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
+
+#define HCI_WMT_MAX_EVENT_SIZE 64
+
+#define BTMTK_WMT_REG_READ 0x2
+
+enum {
+ BTMTK_WMT_PATCH_DWNLD = 0x1,
+ BTMTK_WMT_TEST = 0x2,
+ BTMTK_WMT_WAKEUP = 0x3,
+ BTMTK_WMT_HIF = 0x4,
+ BTMTK_WMT_FUNC_CTRL = 0x6,
+ BTMTK_WMT_RST = 0x7,
+ BTMTK_WMT_REGISTER = 0x8,
+ BTMTK_WMT_SEMAPHORE = 0x17,
+};
+
+enum {
+ BTMTK_WMT_INVALID,
+ BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_PROGRESS,
+ BTMTK_WMT_PATCH_DONE,
+ BTMTK_WMT_ON_UNDONE,
+ BTMTK_WMT_ON_DONE,
+ BTMTK_WMT_ON_PROGRESS,
+};
+
+struct btmtk_wmt_hdr {
+ u8 dir;
+ u8 op;
+ __le16 dlen;
+ u8 flag;
+} __packed;
+
+struct btmtk_hci_wmt_cmd {
+ struct btmtk_wmt_hdr hdr;
+ u8 data[];
+} __packed;
+
+struct btmtk_hci_wmt_evt {
+ struct hci_event_hdr hhdr;
+ struct btmtk_wmt_hdr whdr;
+} __packed;
+
+struct btmtk_hci_wmt_evt_funcc {
+ struct btmtk_hci_wmt_evt hwhdr;
+ __be16 status;
+} __packed;
+
+struct btmtk_hci_wmt_evt_reg {
+ struct btmtk_hci_wmt_evt hwhdr;
+ u8 rsv[2];
+ u8 num;
+ __le32 addr;
+ __le32 val;
+} __packed;
+
+struct btmtk_tci_sleep {
+ u8 mode;
+ __le16 duration;
+ __le16 host_duration;
+ u8 host_wakeup_pin;
+ u8 time_compensation;
+} __packed;
+
+struct btmtk_hci_wmt_params {
+ u8 op;
+ u8 flag;
+ u16 dlen;
+ const void *data;
+ u32 *status;
+};
+
+typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *,
+ struct btmtk_hci_wmt_params *);
+
+#if IS_ENABLED(CONFIG_BT_MTK)
+
+int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+
+int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync);
+
+int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync);
+#else
+
+static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
+ const bdaddr_t *bdaddr)
+{
+ return -EOPNOTSUPP;
+}
+
+static int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
+{
+ return -EOPNOTSUPP;
+}
+
+static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
+ wmt_cmd_sync_func_t wmt_cmd_sync)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 9872ef18f9fe..ce6a6c00ff98 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -12,7 +12,6 @@
#include <asm/unaligned.h>
#include <linux/atomic.h>
-#include <linux/firmware.h>
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -28,26 +27,32 @@
#include <net/bluetooth/hci_core.h>
#include "h4_recv.h"
+#include "btmtk.h"
#define VERSION "0.1"
-#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
-#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
-
#define MTKBTSDIO_AUTOSUSPEND_DELAY 8000
static bool enable_autosuspend;
struct btmtksdio_data {
const char *fwname;
+ u16 chipid;
};
static const struct btmtksdio_data mt7663_data = {
.fwname = FIRMWARE_MT7663,
+ .chipid = 0x7663,
};
static const struct btmtksdio_data mt7668_data = {
.fwname = FIRMWARE_MT7668,
+ .chipid = 0x7668,
+};
+
+static const struct btmtksdio_data mt7921_data = {
+ .fwname = FIRMWARE_MT7961,
+ .chipid = 0x7921,
};
static const struct sdio_device_id btmtksdio_table[] = {
@@ -55,6 +60,8 @@ static const struct sdio_device_id btmtksdio_table[] = {
.driver_data = (kernel_ulong_t)&mt7663_data },
{SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668),
.driver_data = (kernel_ulong_t)&mt7668_data },
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7961),
+ .driver_data = (kernel_ulong_t)&mt7921_data },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
@@ -86,81 +93,27 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define MTK_REG_CRDR 0x1c
+#define MTK_REG_CRPLR 0x24
+
#define MTK_SDIO_BLOCK_SIZE 256
#define BTMTKSDIO_TX_WAIT_VND_EVT 1
-enum {
- MTK_WMT_PATCH_DWNLD = 0x1,
- MTK_WMT_TEST = 0x2,
- MTK_WMT_WAKEUP = 0x3,
- MTK_WMT_HIF = 0x4,
- MTK_WMT_FUNC_CTRL = 0x6,
- MTK_WMT_RST = 0x7,
- MTK_WMT_SEMAPHORE = 0x17,
-};
-
-enum {
- BTMTK_WMT_INVALID,
- BTMTK_WMT_PATCH_UNDONE,
- BTMTK_WMT_PATCH_DONE,
- BTMTK_WMT_ON_UNDONE,
- BTMTK_WMT_ON_DONE,
- BTMTK_WMT_ON_PROGRESS,
-};
-
struct mtkbtsdio_hdr {
__le16 len;
__le16 reserved;
u8 bt_type;
} __packed;
-struct mtk_wmt_hdr {
- u8 dir;
- u8 op;
- __le16 dlen;
- u8 flag;
-} __packed;
-
-struct mtk_hci_wmt_cmd {
- struct mtk_wmt_hdr hdr;
- u8 data[256];
-} __packed;
-
-struct btmtk_hci_wmt_evt {
- struct hci_event_hdr hhdr;
- struct mtk_wmt_hdr whdr;
-} __packed;
-
-struct btmtk_hci_wmt_evt_funcc {
- struct btmtk_hci_wmt_evt hwhdr;
- __be16 status;
-} __packed;
-
-struct btmtk_tci_sleep {
- u8 mode;
- __le16 duration;
- __le16 host_duration;
- u8 host_wakeup_pin;
- u8 time_compensation;
-} __packed;
-
-struct btmtk_hci_wmt_params {
- u8 op;
- u8 flag;
- u16 dlen;
- const void *data;
- u32 *status;
-};
-
struct btmtksdio_dev {
struct hci_dev *hdev;
struct sdio_func *func;
struct device *dev;
- struct work_struct tx_work;
+ struct work_struct txrx_work;
unsigned long tx_state;
struct sk_buff_head txq;
+ bool hw_tx_ready;
struct sk_buff *evt_skb;
@@ -172,29 +125,35 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
+ struct btmtk_hci_wmt_evt_reg *wmt_evt_reg;
u32 hlen, status = BTMTK_WMT_INVALID;
struct btmtk_hci_wmt_evt *wmt_evt;
- struct mtk_hci_wmt_cmd wc;
- struct mtk_wmt_hdr *hdr;
+ struct btmtk_hci_wmt_cmd *wc;
+ struct btmtk_wmt_hdr *hdr;
int err;
+ /* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255)
return -EINVAL;
- hdr = (struct mtk_wmt_hdr *)&wc;
+ wc = kzalloc(hlen, GFP_KERNEL);
+ if (!wc)
+ return -ENOMEM;
+
+ hdr = &wc->hdr;
hdr->dir = 1;
hdr->op = wmt_params->op;
hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
hdr->flag = wmt_params->flag;
- memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+ memcpy(wc->data, wmt_params->data, wmt_params->dlen);
set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
- err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_wc;
}
/* The vendor specific WMT commands are all answered by a vendor
@@ -211,13 +170,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_wc;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err_free_wc;
}
/* Parse and handle the return WMT event */
@@ -230,13 +190,13 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
}
switch (wmt_evt->whdr.op) {
- case MTK_WMT_SEMAPHORE:
+ case BTMTK_WMT_SEMAPHORE:
if (wmt_evt->whdr.flag == 2)
status = BTMTK_WMT_PATCH_UNDONE;
else
status = BTMTK_WMT_PATCH_DONE;
break;
- case MTK_WMT_FUNC_CTRL:
+ case BTMTK_WMT_FUNC_CTRL:
wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
status = BTMTK_WMT_ON_DONE;
@@ -245,6 +205,19 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
else
status = BTMTK_WMT_ON_UNDONE;
break;
+ case BTMTK_WMT_PATCH_DWNLD:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_DONE;
+ else if (wmt_evt->whdr.flag == 1)
+ status = BTMTK_WMT_PATCH_PROGRESS;
+ else
+ status = BTMTK_WMT_PATCH_UNDONE;
+ break;
+ case BTMTK_WMT_REGISTER:
+ wmt_evt_reg = (struct btmtk_hci_wmt_evt_reg *)wmt_evt;
+ if (le16_to_cpu(wmt_evt->whdr.dlen) == 12)
+ status = le32_to_cpu(wmt_evt_reg->val);
+ break;
}
if (wmt_params->status)
@@ -253,6 +226,8 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
err_free_skb:
kfree_skb(bdev->evt_skb);
bdev->evt_skb = NULL;
+err_free_wc:
+ kfree(wc);
return err;
}
@@ -279,6 +254,7 @@ static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
sdio_hdr->reserved = cpu_to_le16(0);
sdio_hdr->bt_type = hci_skb_pkt_type(skb);
+ bdev->hw_tx_ready = false;
err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
if (err < 0)
@@ -301,32 +277,6 @@ static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
}
-static void btmtksdio_tx_work(struct work_struct *work)
-{
- struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
- tx_work);
- struct sk_buff *skb;
- int err;
-
- pm_runtime_get_sync(bdev->dev);
-
- sdio_claim_host(bdev->func);
-
- while ((skb = skb_dequeue(&bdev->txq))) {
- err = btmtksdio_tx_packet(bdev, skb);
- if (err < 0) {
- bdev->hdev->stat.err_tx++;
- skb_queue_head(&bdev->txq, skb);
- break;
- }
- }
-
- sdio_release_host(bdev->func);
-
- pm_runtime_mark_last_busy(bdev->dev);
- pm_runtime_put_autosuspend(bdev->dev);
-}
-
static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
@@ -477,70 +427,89 @@ err_kfree_skb:
return err;
}
-static void btmtksdio_interrupt(struct sdio_func *func)
+static void btmtksdio_txrx_work(struct work_struct *work)
{
- struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
- u32 int_status;
- u16 rx_size;
-
- /* It is required that the host gets ownership from the device before
- * accessing any register, however, if SDIO host is not being released,
- * a potential deadlock probably happens in a circular wait between SDIO
- * IRQ work and PM runtime work. So, we have to explicitly release SDIO
- * host here and claim again after the PM runtime work is all done.
- */
- sdio_release_host(bdev->func);
+ struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
+ txrx_work);
+ unsigned long txrx_timeout;
+ u32 int_status, rx_size;
+ struct sk_buff *skb;
+ int err;
pm_runtime_get_sync(bdev->dev);
sdio_claim_host(bdev->func);
/* Disable interrupt */
- sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
-
- int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
-
- /* Ack an interrupt as soon as possible before any operation on
- * hardware.
- *
- * Note that we don't ack any status during operations to avoid race
- * condition between the host and the device such as it's possible to
- * mistakenly ack RX_DONE for the next packet and then cause interrupts
- * not be raised again but there is still pending data in the hardware
- * FIFO.
- */
- sdio_writel(func, int_status, MTK_REG_CHISR, NULL);
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
+
+ txrx_timeout = jiffies + 5 * HZ;
+
+ do {
+ int_status = sdio_readl(bdev->func, MTK_REG_CHISR, NULL);
+
+ /* Ack an interrupt as soon as possible before any operation on
+ * hardware.
+ *
+ * Note that we don't ack any status during operations to avoid race
+ * condition between the host and the device such as it's possible to
+ * mistakenly ack RX_DONE for the next packet and then cause interrupts
+ * not be raised again but there is still pending data in the hardware
+ * FIFO.
+ */
+ sdio_writel(bdev->func, int_status, MTK_REG_CHISR, NULL);
+
+ if (int_status & FW_OWN_BACK_INT)
+ bt_dev_dbg(bdev->hdev, "Get fw own back");
+
+ if (int_status & TX_EMPTY)
+ bdev->hw_tx_ready = true;
+ else if (unlikely(int_status & TX_FIFO_OVERFLOW))
+ bt_dev_warn(bdev->hdev, "Tx fifo overflow");
+
+ if (bdev->hw_tx_ready) {
+ skb = skb_dequeue(&bdev->txq);
+ if (skb) {
+ err = btmtksdio_tx_packet(bdev, skb);
+ if (err < 0) {
+ bdev->hdev->stat.err_tx++;
+ skb_queue_head(&bdev->txq, skb);
+ }
+ }
+ }
- if (unlikely(!int_status))
- bt_dev_err(bdev->hdev, "CHISR is 0");
+ if (int_status & RX_DONE_INT) {
+ rx_size = sdio_readl(bdev->func, MTK_REG_CRPLR, NULL);
+ rx_size = (rx_size & RX_PKT_LEN) >> 16;
+ if (btmtksdio_rx_packet(bdev, rx_size) < 0)
+ bdev->hdev->stat.err_rx++;
+ }
+ } while (int_status || time_is_before_jiffies(txrx_timeout));
- if (int_status & FW_OWN_BACK_INT)
- bt_dev_dbg(bdev->hdev, "Get fw own back");
+ /* Enable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
- if (int_status & TX_EMPTY)
- schedule_work(&bdev->tx_work);
- else if (unlikely(int_status & TX_FIFO_OVERFLOW))
- bt_dev_warn(bdev->hdev, "Tx fifo overflow");
+ sdio_release_host(bdev->func);
- if (int_status & RX_DONE_INT) {
- rx_size = (int_status & RX_PKT_LEN) >> 16;
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+}
- if (btmtksdio_rx_packet(bdev, rx_size) < 0)
- bdev->hdev->stat.err_rx++;
- }
+static void btmtksdio_interrupt(struct sdio_func *func)
+{
+ struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
- /* Enable interrupt */
- sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
+ /* Disable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
- pm_runtime_mark_last_busy(bdev->dev);
- pm_runtime_put_autosuspend(bdev->dev);
+ schedule_work(&bdev->txrx_work);
}
static int btmtksdio_open(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ u32 status, val;
int err;
- u32 status;
sdio_claim_host(bdev->func);
@@ -580,13 +549,22 @@ static int btmtksdio_open(struct hci_dev *hdev)
/* SDIO CMD 5 allows the SDIO device back to idle state an
* synchronous interrupt is supported in SDIO 4-bit mode
*/
- sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN,
- MTK_REG_CSDIOCSR, &err);
+ val = sdio_readl(bdev->func, MTK_REG_CSDIOCSR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ val |= SDIO_INT_CTL;
+ sdio_writel(bdev->func, val, MTK_REG_CSDIOCSR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Explitly set write-1-clear method */
+ val = sdio_readl(bdev->func, MTK_REG_CHCR, &err);
if (err < 0)
goto err_release_irq;
- /* Setup write-1-clear for CHISR register */
- sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err);
+ val |= C_INT_CLR_CTRL;
+ sdio_writel(bdev->func, val, MTK_REG_CHCR, &err);
if (err < 0)
goto err_release_irq;
@@ -630,6 +608,8 @@ static int btmtksdio_close(struct hci_dev *hdev)
sdio_release_irq(bdev->func);
+ cancel_work_sync(&bdev->txrx_work);
+
/* Return ownership to the device */
sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
@@ -651,7 +631,7 @@ static int btmtksdio_flush(struct hci_dev *hdev)
skb_queue_purge(&bdev->txq);
- cancel_work_sync(&bdev->tx_work);
+ cancel_work_sync(&bdev->txrx_work);
return 0;
}
@@ -663,7 +643,7 @@ static int btmtksdio_func_query(struct hci_dev *hdev)
u8 param = 0;
/* Query whether the function is enabled */
- wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 4;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
@@ -678,111 +658,16 @@ static int btmtksdio_func_query(struct hci_dev *hdev)
return status;
}
-static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
+static int mt76xx_setup(struct hci_dev *hdev, const char *fwname)
{
struct btmtk_hci_wmt_params wmt_params;
- const struct firmware *fw;
- const u8 *fw_ptr;
- size_t fw_size;
- int err, dlen;
- u8 flag, param;
-
- err = request_firmware(&fw, fwname, &hdev->dev);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
- return err;
- }
-
- /* Power on data RAM the firmware relies on. */
- param = 1;
- wmt_params.op = MTK_WMT_FUNC_CTRL;
- wmt_params.flag = 3;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
-
- err = mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
- goto free_fw;
- }
-
- fw_ptr = fw->data;
- fw_size = fw->size;
-
- /* The size of patch header is 30 bytes, should be skip */
- if (fw_size < 30) {
- err = -EINVAL;
- goto free_fw;
- }
-
- fw_size -= 30;
- fw_ptr += 30;
- flag = 1;
-
- wmt_params.op = MTK_WMT_PATCH_DWNLD;
- wmt_params.status = NULL;
-
- while (fw_size > 0) {
- dlen = min_t(int, 250, fw_size);
-
- /* Tell device the position in sequence */
- if (fw_size - dlen <= 0)
- flag = 3;
- else if (fw_size < fw->size - 30)
- flag = 2;
-
- wmt_params.flag = flag;
- wmt_params.dlen = dlen;
- wmt_params.data = fw_ptr;
-
- err = mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
- err);
- goto free_fw;
- }
-
- fw_size -= dlen;
- fw_ptr += dlen;
- }
-
- wmt_params.op = MTK_WMT_RST;
- wmt_params.flag = 4;
- wmt_params.dlen = 0;
- wmt_params.data = NULL;
- wmt_params.status = NULL;
-
- /* Activate funciton the firmware providing to */
- err = mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
- goto free_fw;
- }
-
- /* Wait a few moments for firmware activation done */
- usleep_range(10000, 12000);
-
-free_fw:
- release_firmware(fw);
- return err;
-}
-
-static int btmtksdio_setup(struct hci_dev *hdev)
-{
- struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
- struct btmtk_hci_wmt_params wmt_params;
- ktime_t calltime, delta, rettime;
struct btmtk_tci_sleep tci_sleep;
- unsigned long long duration;
struct sk_buff *skb;
int err, status;
u8 param = 0x1;
- calltime = ktime_get();
-
/* Query whether the firmware is already download */
- wmt_params.op = MTK_WMT_SEMAPHORE;
+ wmt_params.op = BTMTK_WMT_SEMAPHORE;
wmt_params.flag = 1;
wmt_params.dlen = 0;
wmt_params.data = NULL;
@@ -800,7 +685,7 @@ static int btmtksdio_setup(struct hci_dev *hdev)
}
/* Setup a firmware which the device definitely requires */
- err = mtk_setup_firmware(hdev, bdev->data->fwname);
+ err = btmtk_setup_firmware(hdev, fwname, mtk_hci_wmt_sync);
if (err < 0)
return err;
@@ -823,7 +708,7 @@ ignore_setup_fw:
}
/* Enable Bluetooth protocol */
- wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
@@ -852,6 +737,113 @@ ignore_func_on:
}
kfree_skb(skb);
+ return 0;
+}
+
+static int mt79xx_setup(struct hci_dev *hdev, const char *fwname)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ u8 param = 0x1;
+ int err;
+
+ err = btmtk_setup_firmware_79xx(hdev, fwname, mtk_hci_wmt_sync);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to setup 79xx firmware (%d)", err);
+ return err;
+ }
+
+ /* Enable Bluetooth protocol */
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ return err;
+}
+
+static int btsdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ struct reg_read_cmd {
+ u8 type;
+ u8 rsv;
+ u8 num;
+ __le32 addr;
+ } __packed reg_read = {
+ .type = 1,
+ .num = 1,
+ };
+ u32 status;
+ int err;
+
+ reg_read.addr = cpu_to_le32(reg);
+ wmt_params.op = BTMTK_WMT_REGISTER;
+ wmt_params.flag = BTMTK_WMT_REG_READ;
+ wmt_params.dlen = sizeof(reg_read);
+ wmt_params.data = &reg_read;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to read reg(%d)", err);
+ return err;
+ }
+
+ *val = status;
+
+ return err;
+}
+
+static int btmtksdio_setup(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+ char fwname[64];
+ int err, dev_id;
+ u32 fw_version = 0;
+
+ calltime = ktime_get();
+ bdev->hw_tx_ready = true;
+
+ switch (bdev->data->chipid) {
+ case 0x7921:
+ err = btsdio_mtk_reg_read(hdev, 0x70010200, &dev_id);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get device id (%d)", err);
+ return err;
+ }
+
+ err = btsdio_mtk_reg_read(hdev, 0x80021004, &fw_version);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw version (%d)", err);
+ return err;
+ }
+
+ snprintf(fwname, sizeof(fwname),
+ "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, (fw_version & 0xff) + 1);
+ err = mt79xx_setup(hdev, fwname);
+ if (err < 0)
+ return err;
+ break;
+ case 0x7663:
+ case 0x7668:
+ err = mt76xx_setup(hdev, bdev->data->fwname);
+ if (err < 0)
+ return err;
+ break;
+ default:
+ return -ENODEV;
+ }
+
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long)ktime_to_ns(delta) >> 10;
@@ -891,7 +883,7 @@ static int btmtksdio_shutdown(struct hci_dev *hdev)
pm_runtime_get_sync(bdev->dev);
/* Disable the device */
- wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
@@ -932,7 +924,7 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb_queue_tail(&bdev->txq, skb);
- schedule_work(&bdev->tx_work);
+ schedule_work(&bdev->txrx_work);
return 0;
}
@@ -955,7 +947,7 @@ static int btmtksdio_probe(struct sdio_func *func,
bdev->dev = &func->dev;
bdev->func = func;
- INIT_WORK(&bdev->tx_work, btmtksdio_tx_work);
+ INIT_WORK(&bdev->txrx_work, btmtksdio_txrx_work);
skb_queue_head_init(&bdev->txq);
/* Initialize and register HCI device */
@@ -976,6 +968,8 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->setup = btmtksdio_setup;
hdev->shutdown = btmtksdio_shutdown;
hdev->send = btmtksdio_send_frame;
+ hdev->set_bdaddr = btmtk_set_bdaddr;
+
SET_HCIDEV_DEV(hdev, &func->dev);
hdev->manufacturer = 70;
@@ -1112,5 +1106,3 @@ MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FIRMWARE_MT7663);
-MODULE_FIRMWARE(FIRMWARE_MT7668);
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 199e8f7d426d..795be33f2892 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -295,6 +295,8 @@ static int btsdio_probe(struct sdio_func *func,
switch (func->device) {
case SDIO_DEVICE_ID_BROADCOM_43341:
case SDIO_DEVICE_ID_BROADCOM_43430:
+ case SDIO_DEVICE_ID_BROADCOM_4345:
+ case SDIO_DEVICE_ID_BROADCOM_43455:
case SDIO_DEVICE_ID_BROADCOM_4356:
return -ENODEV;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 75c83768c257..30a057e1d4e3 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -24,6 +24,7 @@
#include "btintel.h"
#include "btbcm.h"
#include "btrtl.h"
+#include "btmtk.h"
#define VERSION "0.8"
@@ -2131,122 +2132,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
#define MTK_BT_RST_DONE 0x00000100
#define MTK_BT_RESET_WAIT_MS 100
#define MTK_BT_RESET_NUM_TRIES 10
-#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
-#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
-
-#define HCI_WMT_MAX_EVENT_SIZE 64
-/* It is for mt79xx download rom patch*/
-#define MTK_FW_ROM_PATCH_HEADER_SIZE 32
-#define MTK_FW_ROM_PATCH_GD_SIZE 64
-#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64
-#define MTK_SEC_MAP_COMMON_SIZE 12
-#define MTK_SEC_MAP_NEED_SEND_SIZE 52
-
-enum {
- BTMTK_WMT_PATCH_DWNLD = 0x1,
- BTMTK_WMT_FUNC_CTRL = 0x6,
- BTMTK_WMT_RST = 0x7,
- BTMTK_WMT_SEMAPHORE = 0x17,
-};
-
-enum {
- BTMTK_WMT_INVALID,
- BTMTK_WMT_PATCH_UNDONE,
- BTMTK_WMT_PATCH_PROGRESS,
- BTMTK_WMT_PATCH_DONE,
- BTMTK_WMT_ON_UNDONE,
- BTMTK_WMT_ON_DONE,
- BTMTK_WMT_ON_PROGRESS,
-};
-
-struct btmtk_wmt_hdr {
- u8 dir;
- u8 op;
- __le16 dlen;
- u8 flag;
-} __packed;
-
-struct btmtk_hci_wmt_cmd {
- struct btmtk_wmt_hdr hdr;
- u8 data[];
-} __packed;
-
-struct btmtk_hci_wmt_evt {
- struct hci_event_hdr hhdr;
- struct btmtk_wmt_hdr whdr;
-} __packed;
-
-struct btmtk_hci_wmt_evt_funcc {
- struct btmtk_hci_wmt_evt hwhdr;
- __be16 status;
-} __packed;
-
-struct btmtk_tci_sleep {
- u8 mode;
- __le16 duration;
- __le16 host_duration;
- u8 host_wakeup_pin;
- u8 time_compensation;
-} __packed;
-
-struct btmtk_hci_wmt_params {
- u8 op;
- u8 flag;
- u16 dlen;
- const void *data;
- u32 *status;
-};
-
-struct btmtk_patch_header {
- u8 datetime[16];
- u8 platform[4];
- __le16 hwver;
- __le16 swver;
- __le32 magicnum;
-} __packed;
-
-struct btmtk_global_desc {
- __le32 patch_ver;
- __le32 sub_sys;
- __le32 feature_opt;
- __le32 section_num;
-} __packed;
-
-struct btmtk_section_map {
- __le32 sectype;
- __le32 secoffset;
- __le32 secsize;
- union {
- __le32 u4SecSpec[13];
- struct {
- __le32 dlAddr;
- __le32 dlsize;
- __le32 seckeyidx;
- __le32 alignlen;
- __le32 sectype;
- __le32 dlmodecrctype;
- __le32 crc;
- __le32 reserved[6];
- } bin_info_spec;
- };
-} __packed;
-
-static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr)
-{
- struct sk_buff *skb;
- long ret;
-
- skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- ret = PTR_ERR(skb);
- bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
- ret);
- return ret;
- }
- kfree_skb(skb);
-
- return 0;
-}
static void btusb_mtk_wmt_recv(struct urb *urb)
{
@@ -2265,6 +2150,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
if (!skb) {
hdev->stat.err_rx++;
+ kfree(urb->setup_packet);
return;
}
@@ -2285,6 +2171,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
data->evt_skb = skb_clone(skb, GFP_ATOMIC);
if (!data->evt_skb) {
kfree_skb(skb);
+ kfree(urb->setup_packet);
return;
}
}
@@ -2293,6 +2180,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
if (err < 0) {
kfree_skb(data->evt_skb);
data->evt_skb = NULL;
+ kfree(urb->setup_packet);
return;
}
@@ -2303,6 +2191,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
wake_up_bit(&data->flags,
BTUSB_TX_WAIT_VND_EVT);
}
+ kfree(urb->setup_packet);
return;
} else if (urb->status == -ENOENT) {
/* Avoid suspend failed when usb_kill_urb */
@@ -2323,6 +2212,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
usb_anchor_urb(urb, &data->ctrl_anchor);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
+ kfree(urb->setup_packet);
/* -EPERM: urb is being killed;
* -ENODEV: device got disconnected
*/
@@ -2497,209 +2387,6 @@ err_free_wc:
return err;
}
-static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname)
-{
- struct btmtk_hci_wmt_params wmt_params;
- struct btmtk_global_desc *globaldesc = NULL;
- struct btmtk_section_map *sectionmap;
- const struct firmware *fw;
- const u8 *fw_ptr;
- const u8 *fw_bin_ptr;
- int err, dlen, i, status;
- u8 flag, first_block, retry;
- u32 section_num, dl_size, section_offset;
- u8 cmd[64];
-
- err = request_firmware(&fw, fwname, &hdev->dev);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
- return err;
- }
-
- fw_ptr = fw->data;
- fw_bin_ptr = fw_ptr;
- globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
- section_num = le32_to_cpu(globaldesc->section_num);
-
- for (i = 0; i < section_num; i++) {
- first_block = 1;
- fw_ptr = fw_bin_ptr;
- sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
- MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
-
- section_offset = le32_to_cpu(sectionmap->secoffset);
- dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize);
-
- if (dl_size > 0) {
- retry = 20;
- while (retry > 0) {
- cmd[0] = 0; /* 0 means legacy dl mode. */
- memcpy(cmd + 1,
- fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
- MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i +
- MTK_SEC_MAP_COMMON_SIZE,
- MTK_SEC_MAP_NEED_SEND_SIZE + 1);
-
- wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
- wmt_params.status = &status;
- wmt_params.flag = 0;
- wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1;
- wmt_params.data = &cmd;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
- err);
- goto err_release_fw;
- }
-
- if (status == BTMTK_WMT_PATCH_UNDONE) {
- break;
- } else if (status == BTMTK_WMT_PATCH_PROGRESS) {
- msleep(100);
- retry--;
- } else if (status == BTMTK_WMT_PATCH_DONE) {
- goto next_section;
- } else {
- bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
- status);
- goto err_release_fw;
- }
- }
-
- fw_ptr += section_offset;
- wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
- wmt_params.status = NULL;
-
- while (dl_size > 0) {
- dlen = min_t(int, 250, dl_size);
- if (first_block == 1) {
- flag = 1;
- first_block = 0;
- } else if (dl_size - dlen <= 0) {
- flag = 3;
- } else {
- flag = 2;
- }
-
- wmt_params.flag = flag;
- wmt_params.dlen = dlen;
- wmt_params.data = fw_ptr;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
- err);
- goto err_release_fw;
- }
-
- dl_size -= dlen;
- fw_ptr += dlen;
- }
- }
-next_section:
- continue;
- }
- /* Wait a few moments for firmware activation done */
- usleep_range(100000, 120000);
-
-err_release_fw:
- release_firmware(fw);
-
- return err;
-}
-
-static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
-{
- struct btmtk_hci_wmt_params wmt_params;
- const struct firmware *fw;
- const u8 *fw_ptr;
- size_t fw_size;
- int err, dlen;
- u8 flag, param;
-
- err = request_firmware(&fw, fwname, &hdev->dev);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
- return err;
- }
-
- /* Power on data RAM the firmware relies on. */
- param = 1;
- wmt_params.op = BTMTK_WMT_FUNC_CTRL;
- wmt_params.flag = 3;
- wmt_params.dlen = sizeof(param);
- wmt_params.data = &param;
- wmt_params.status = NULL;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
- goto err_release_fw;
- }
-
- fw_ptr = fw->data;
- fw_size = fw->size;
-
- /* The size of patch header is 30 bytes, should be skip */
- if (fw_size < 30) {
- err = -EINVAL;
- goto err_release_fw;
- }
-
- fw_size -= 30;
- fw_ptr += 30;
- flag = 1;
-
- wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
- wmt_params.status = NULL;
-
- while (fw_size > 0) {
- dlen = min_t(int, 250, fw_size);
-
- /* Tell device the position in sequence */
- if (fw_size - dlen <= 0)
- flag = 3;
- else if (fw_size < fw->size - 30)
- flag = 2;
-
- wmt_params.flag = flag;
- wmt_params.dlen = dlen;
- wmt_params.data = fw_ptr;
-
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
- err);
- goto err_release_fw;
- }
-
- fw_size -= dlen;
- fw_ptr += dlen;
- }
-
- wmt_params.op = BTMTK_WMT_RST;
- wmt_params.flag = 4;
- wmt_params.dlen = 0;
- wmt_params.data = NULL;
- wmt_params.status = NULL;
-
- /* Activate funciton the firmware providing to */
- err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
- if (err < 0) {
- bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
- goto err_release_fw;
- }
-
- /* Wait a few moments for firmware activation done */
- usleep_range(10000, 12000);
-
-err_release_fw:
- release_firmware(fw);
-
- return err;
-}
-
static int btusb_mtk_func_query(struct hci_dev *hdev)
{
struct btmtk_hci_wmt_params wmt_params;
@@ -2857,7 +2544,8 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, (fw_version & 0xff) + 1);
- err = btusb_mtk_setup_firmware_79xx(hdev, fw_bin_name);
+ err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
+ btusb_mtk_hci_wmt_sync);
/* It's Device EndPoint Reset Option Register */
btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
@@ -2877,6 +2565,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
}
hci_set_msft_opcode(hdev, 0xFD30);
+ hci_set_aosp_capable(hdev);
goto done;
default:
bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
@@ -2903,7 +2592,8 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
}
/* Setup a firmware which the device definitely requires */
- err = btusb_mtk_setup_firmware(hdev, fwname);
+ err = btmtk_setup_firmware(hdev, fwname,
+ btusb_mtk_hci_wmt_sync);
if (err < 0)
return err;
@@ -3064,9 +2754,6 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
-MODULE_FIRMWARE(FIRMWARE_MT7663);
-MODULE_FIRMWARE(FIRMWARE_MT7668);
-
#ifdef CONFIG_PM
/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
static int marvell_config_oob_wake(struct hci_dev *hdev)
@@ -3190,6 +2877,9 @@ static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev,
#define QCA_DFU_TIMEOUT 3000
#define QCA_FLAG_MULTI_NVM 0x80
+#define WCN6855_2_0_RAM_VERSION_GF 0x400c1200
+#define WCN6855_2_1_RAM_VERSION_GF 0x400c1211
+
struct qca_version {
__le32 rom_version;
__le32 patch_version;
@@ -3221,6 +2911,7 @@ static const struct qca_device_info qca_devices_table[] = {
{ 0x00000302, 28, 4, 16 }, /* Rome 3.2 */
{ 0x00130100, 40, 4, 16 }, /* WCN6855 1.0 */
{ 0x00130200, 40, 4, 16 }, /* WCN6855 2.0 */
+ { 0x00130201, 40, 4, 16 }, /* WCN6855 2.1 */
};
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
@@ -3375,6 +3066,40 @@ done:
return err;
}
+static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
+ const struct qca_version *ver)
+{
+ u32 rom_version = le32_to_cpu(ver->rom_version);
+ u16 flag = le16_to_cpu(ver->flag);
+
+ if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
+ u16 board_id = le16_to_cpu(ver->board_id);
+ const char *variant;
+
+ switch (le32_to_cpu(ver->ram_version)) {
+ case WCN6855_2_0_RAM_VERSION_GF:
+ case WCN6855_2_1_RAM_VERSION_GF:
+ variant = "_gf";
+ break;
+ default:
+ variant = "";
+ break;
+ }
+
+ if (board_id == 0) {
+ snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin",
+ rom_version, variant);
+ } else {
+ snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin",
+ rom_version, variant, board_id);
+ }
+ } else {
+ snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin",
+ rom_version);
+ }
+
+}
+
static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
struct qca_version *ver,
const struct qca_device_info *info)
@@ -3383,20 +3108,7 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
char fwname[64];
int err;
- if (((ver->flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
- /* if boardid equal 0, use default nvm without surfix */
- if (le16_to_cpu(ver->board_id) == 0x0) {
- snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
- le32_to_cpu(ver->rom_version));
- } else {
- snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x_%04x.bin",
- le32_to_cpu(ver->rom_version),
- le16_to_cpu(ver->board_id));
- }
- } else {
- snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
- le32_to_cpu(ver->rom_version));
- }
+ btusb_generate_qca_nvm_name(fwname, sizeof(fwname), ver);
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
@@ -3868,7 +3580,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
hdev->cmd_timeout = btusb_mtk_cmd_timeout;
- hdev->set_bdaddr = btusb_set_bdaddr_mtk;
+ hdev->set_bdaddr = btmtk_set_bdaddr;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
data->recv_acl = btusb_recv_acl_mtk;
}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index ef54afa29357..7852abf15ddf 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -1508,7 +1508,6 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm4334-bt" },
{ .compatible = "brcm,bcm4345c5" },
- { .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
{ .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
{ .compatible = "brcm,bcm4335a0" },
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 4b3b14a34794..1d0cdf023243 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -252,7 +252,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
}
if (!dlen) {
- hu->padding = (skb->len - 1) % alignment;
+ hu->padding = (skb->len + 1) % alignment;
hu->padding = (alignment - hu->padding) % alignment;
/* No more data, complete frame */
@@ -260,7 +260,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
skb = NULL;
}
} else {
- hu->padding = (skb->len - 1) % alignment;
+ hu->padding = (skb->len + 1) % alignment;
hu->padding = (alignment - hu->padding) % alignment;
/* Complete frame */
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index b45db0db347c..49ac884d996e 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -38,9 +38,12 @@ struct vhci_data {
struct mutex open_mutex;
struct delayed_work open_timeout;
+ struct work_struct suspend_work;
bool suspended;
bool wakeup;
+ __u16 msft_opcode;
+ bool aosp_capable;
};
static int vhci_open_dev(struct hci_dev *hdev)
@@ -114,6 +117,17 @@ static ssize_t force_suspend_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
+static void vhci_suspend_work(struct work_struct *work)
+{
+ struct vhci_data *data = container_of(work, struct vhci_data,
+ suspend_work);
+
+ if (data->suspended)
+ hci_suspend_dev(data->hdev);
+ else
+ hci_resume_dev(data->hdev);
+}
+
static ssize_t force_suspend_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -129,16 +143,10 @@ static ssize_t force_suspend_write(struct file *file,
if (data->suspended == enable)
return -EALREADY;
- if (enable)
- err = hci_suspend_dev(data->hdev);
- else
- err = hci_resume_dev(data->hdev);
-
- if (err)
- return err;
-
data->suspended = enable;
+ schedule_work(&data->suspend_work);
+
return count;
}
@@ -176,6 +184,8 @@ static ssize_t force_wakeup_write(struct file *file,
if (data->wakeup == enable)
return -EALREADY;
+ data->wakeup = enable;
+
return count;
}
@@ -186,6 +196,88 @@ static const struct file_operations force_wakeup_fops = {
.llseek = default_llseek,
};
+static int msft_opcode_set(void *data, u64 val)
+{
+ struct vhci_data *vhci = data;
+
+ if (val > 0xffff || hci_opcode_ogf(val) != 0x3f)
+ return -EINVAL;
+
+ if (vhci->msft_opcode)
+ return -EALREADY;
+
+ vhci->msft_opcode = val;
+
+ return 0;
+}
+
+static int msft_opcode_get(void *data, u64 *val)
+{
+ struct vhci_data *vhci = data;
+
+ *val = vhci->msft_opcode;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(msft_opcode_fops, msft_opcode_get, msft_opcode_set,
+ "%llu\n");
+
+static ssize_t aosp_capable_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *vhci = file->private_data;
+ char buf[3];
+
+ buf[0] = vhci->aosp_capable ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t aosp_capable_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct vhci_data *vhci = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (!enable)
+ return -EINVAL;
+
+ if (vhci->aosp_capable)
+ return -EALREADY;
+
+ vhci->aosp_capable = enable;
+
+ return count;
+}
+
+static const struct file_operations aosp_capable_fops = {
+ .open = simple_open,
+ .read = aosp_capable_read,
+ .write = aosp_capable_write,
+ .llseek = default_llseek,
+};
+
+static int vhci_setup(struct hci_dev *hdev)
+{
+ struct vhci_data *vhci = hci_get_drvdata(hdev);
+
+ if (vhci->msft_opcode)
+ hci_set_msft_opcode(hdev, vhci->msft_opcode);
+
+ if (vhci->aosp_capable)
+ hci_set_aosp_capable(hdev);
+
+ return 0;
+}
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -228,6 +320,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->get_data_path_id = vhci_get_data_path_id;
hdev->get_codec_config_data = vhci_get_codec_config_data;
hdev->wakeup = vhci_wakeup;
+ hdev->setup = vhci_setup;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
/* bit 6 is for external configuration */
if (opcode & 0x40)
@@ -251,6 +345,14 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
&force_wakeup_fops);
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
+ &msft_opcode_fops);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
+ &aosp_capable_fops);
+
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb_put_u8(skb, 0xff);
@@ -440,6 +542,7 @@ static int vhci_open(struct inode *inode, struct file *file)
mutex_init(&data->open_mutex);
INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
+ INIT_WORK(&data->suspend_work, vhci_suspend_work);
file->private_data = data;
nonseekable_open(inode, file);
@@ -455,6 +558,7 @@ static int vhci_release(struct inode *inode, struct file *file)
struct hci_dev *hdev;
cancel_delayed_work_sync(&data->open_timeout);
+ flush_work(&data->suspend_work);
hdev = data->hdev;
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 57908ce4fae8..076e4942a3f0 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -202,6 +202,9 @@ static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb)
hci_skb_pkt_type(skb) = pkt_type;
hci_recv_frame(vbt->hdev, skb);
break;
+ default:
+ kfree_skb(skb);
+ break;
}
}
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 51a41359e0b4..3fda7b78a9af 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -228,7 +228,8 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
rf->msix_count = pf->num_rdma_msix;
rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
rf->default_vsi.vsi_idx = vsi->vsi_num;
- rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
+ rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
rf->rdma_ver = IRDMA_GEN_2;
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ff8da720a33a..cf73eacdda91 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1460,7 +1460,7 @@ done:
bond_dev->hw_enc_features |= xfrm_features;
#endif /* CONFIG_XFRM_OFFLOAD */
bond_dev->mpls_features = mpls_features;
- bond_dev->gso_max_segs = gso_max_segs;
+ netif_set_gso_max_segs(bond_dev, gso_max_segs);
netif_set_gso_max_size(bond_dev, gso_max_size);
bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index 377c7d2e7612..6655146294fc 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -20,7 +20,9 @@ static void c_can_get_drvinfo(struct net_device *netdev,
}
static void c_can_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct c_can_priv *priv = netdev_priv(netdev);
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 327cc4654806..0e102caddb73 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -240,24 +240,32 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
*/
static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
{
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->is_dsa_8021q_cpu = true;
ocelot->npi = -1;
/* Overwrite PGID_CPU with the non-tagging port */
ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
{
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->is_dsa_8021q_cpu = false;
/* Restore PGID_CPU */
ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
PGID_CPU);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
@@ -989,6 +997,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->num_stats = felix->info->num_stats;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
+ ocelot->vcap_pol.base = felix->info->vcap_pol_base;
+ ocelot->vcap_pol.max = felix->info->vcap_pol_max;
+ ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2;
+ ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2;
ocelot->ops = felix->info->ops;
ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index be3e42e135c0..dfe08dddd262 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -21,6 +21,10 @@ struct felix_info {
int num_ports;
int num_tx_queues;
struct vcap_props *vcap;
+ u16 vcap_pol_base;
+ u16 vcap_pol_max;
+ u16 vcap_pol_base2;
+ u16 vcap_pol_max2;
int switch_pci_bar;
int imdio_pci_bar;
const struct ptp_clock_info *ptp_caps;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 45c5ec7a83ea..36f9c2e0e063 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -5,8 +5,10 @@
#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
+#include <net/tc_act/tc_gate.h>
#include <soc/mscc/ocelot.h>
#include <linux/dsa/ocelot.h>
#include <linux/pcs-lynx.h>
@@ -17,6 +19,8 @@
#include "felix.h"
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_VCAP_POLICER_BASE 63
+#define VSC9959_VCAP_POLICER_MAX 383
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
@@ -292,7 +296,7 @@ static const u32 vsc9959_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
+ REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
@@ -1020,15 +1024,6 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
*maxuse = val & GENMASK(11, 0);
}
-static const struct ocelot_ops vsc9959_ops = {
- .reset = vsc9959_reset,
- .wm_enc = vsc9959_wm_enc,
- .wm_dec = vsc9959_wm_dec,
- .wm_stat = vsc9959_wm_stat,
- .port_to_netdev = felix_port_to_netdev,
- .netdev_to_port = felix_netdev_to_port,
-};
-
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
@@ -1344,6 +1339,880 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
}
}
+#define VSC9959_PSFP_SFID_MAX 175
+#define VSC9959_PSFP_GATE_ID_MAX 183
+#define VSC9959_PSFP_POLICER_BASE 63
+#define VSC9959_PSFP_POLICER_MAX 383
+#define VSC9959_PSFP_GATE_LIST_NUM 4
+#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000
+
+struct felix_stream {
+ struct list_head list;
+ unsigned long id;
+ bool dummy;
+ int ports;
+ int port;
+ u8 dmac[ETH_ALEN];
+ u16 vid;
+ s8 prio;
+ u8 sfid_valid;
+ u8 ssid_valid;
+ u32 sfid;
+ u32 ssid;
+};
+
+struct felix_stream_filter {
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+ u8 enable;
+ int portmask;
+ u8 sg_valid;
+ u32 sgid;
+ u8 fm_valid;
+ u32 fmid;
+ u8 prio_valid;
+ u8 prio;
+ u32 maxsdu;
+};
+
+struct felix_stream_filter_counters {
+ u32 match;
+ u32 not_pass_gate;
+ u32 not_pass_sdu;
+ u32 red;
+};
+
+struct felix_stream_gate {
+ u32 index;
+ u8 enable;
+ u8 ipv_valid;
+ u8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletime_ext;
+ u32 num_entries;
+ struct action_gate_entry entries[0];
+};
+
+struct felix_stream_gate_entry {
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+};
+
+static int vsc9959_stream_identify(struct flow_cls_offload *f,
+ struct felix_stream *stream)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(stream->dmac, match.key->dst);
+ if (!is_zero_ether_addr(match.mask->src))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority)
+ stream->prio = match.key->vlan_priority;
+ else
+ stream->prio = -1;
+
+ if (!match.mask->vlan_id)
+ return -EOPNOTSUPP;
+ stream->vid = match.key->vlan_id;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ stream->id = f->cookie;
+
+ return 0;
+}
+
+static int vsc9959_mact_stream_set(struct ocelot *ocelot,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ enum macaccess_entry_type type;
+ int ret, sfid, ssid;
+ u32 vid, dst_idx;
+ u8 mac[ETH_ALEN];
+
+ ether_addr_copy(mac, stream->dmac);
+ vid = stream->vid;
+
+ /* Stream identification desn't support to add a stream with non
+ * existent MAC (The MAC entry has not been learned in MAC table).
+ */
+ ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type);
+ if (ret) {
+ if (extack)
+ NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table");
+ return -EOPNOTSUPP;
+ }
+
+ if ((stream->sfid_valid || stream->ssid_valid) &&
+ type == ENTRYTYPE_NORMAL)
+ type = ENTRYTYPE_LOCKED;
+
+ sfid = stream->sfid_valid ? stream->sfid : -1;
+ ssid = stream->ssid_valid ? stream->ssid : -1;
+
+ ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type,
+ sfid, ssid);
+
+ return ret;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_lookup(struct list_head *stream_list,
+ struct felix_stream *stream)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (ether_addr_equal(tmp->dmac, stream->dmac) &&
+ tmp->vid == stream->vid)
+ return tmp;
+
+ return NULL;
+}
+
+static int vsc9959_stream_table_add(struct ocelot *ocelot,
+ struct list_head *stream_list,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ struct felix_stream *stream_entry;
+ int ret;
+
+ stream_entry = kzalloc(sizeof(*stream_entry), GFP_KERNEL);
+ if (!stream_entry)
+ return -ENOMEM;
+
+ memcpy(stream_entry, stream, sizeof(*stream_entry));
+
+ if (!stream->dummy) {
+ ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack);
+ if (ret) {
+ kfree(stream_entry);
+ return ret;
+ }
+ }
+
+ list_add_tail(&stream_entry->list, stream_list);
+
+ return 0;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (tmp->id == id)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_stream_table_del(struct ocelot *ocelot,
+ struct felix_stream *stream)
+{
+ if (!stream->dummy)
+ vsc9959_mact_stream_set(ocelot, stream, NULL);
+
+ list_del(&stream->list);
+ kfree(stream);
+}
+
+static u32 vsc9959_sfi_access_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS);
+}
+
+static int vsc9959_psfp_sfi_set(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ u32 val;
+
+ if (sfi->index > VSC9959_PSFP_SFID_MAX)
+ return -EINVAL;
+
+ if (!sfi->enable) {
+ ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE);
+ ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+ }
+
+ if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX ||
+ sfi->fmid > VSC9959_PSFP_POLICER_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot,
+ (sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) |
+ ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) |
+ (sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) |
+ ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) |
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ (sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) |
+ ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) |
+ ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) |
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports)
+{
+ u32 val;
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
+ ANA_TABLES_SFIDTIDX_SFID_INDEX_M,
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) |
+ ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA,
+ ANA_TABLES_SFID_MASK);
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M,
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct list_head *pos)
+{
+ struct felix_stream_filter *sfi_entry;
+ int ret;
+
+ sfi_entry = kzalloc(sizeof(*sfi_entry), GFP_KERNEL);
+ if (!sfi_entry)
+ return -ENOMEM;
+
+ memcpy(sfi_entry, sfi, sizeof(*sfi_entry));
+ refcount_set(&sfi_entry->refcount, 1);
+
+ ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry);
+ if (ret) {
+ kfree(sfi_entry);
+ return ret;
+ }
+
+ vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask);
+
+ list_add(&sfi_entry->list, pos);
+
+ return 0;
+}
+
+static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct list_head *pos, *q, *last;
+ struct felix_stream_filter *tmp;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ if (sfi->sg_valid == tmp->sg_valid &&
+ sfi->fm_valid == tmp->fm_valid &&
+ sfi->portmask == tmp->portmask &&
+ tmp->sgid == sfi->sgid &&
+ tmp->fmid == sfi->fmid) {
+ sfi->index = tmp->index;
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index == insert) {
+ last = pos;
+ insert++;
+ }
+ }
+ sfi->index = insert;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+}
+
+static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct felix_stream_filter *sfi2)
+{
+ struct felix_stream_filter *tmp;
+ struct list_head *pos, *q, *last;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+ int ret;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index >= insert + 2)
+ break;
+
+ insert = tmp->index + 1;
+ last = pos;
+ }
+ sfi->index = insert;
+
+ ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+ if (ret)
+ return ret;
+
+ sfi2->index = insert + 1;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next);
+}
+
+static struct felix_stream_filter *
+vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index)
+{
+ struct felix_stream_filter *tmp;
+
+ list_for_each_entry(tmp, sfi_list, list)
+ if (tmp->index == index)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index)
+{
+ struct felix_stream_filter *tmp, *n;
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ tmp->enable = 0;
+ vsc9959_psfp_sfi_set(ocelot, tmp);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry,
+ struct felix_stream_gate *sgi)
+{
+ sgi->index = entry->gate.index;
+ sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1;
+ sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0;
+ sgi->basetime = entry->gate.basetime;
+ sgi->cycletime = entry->gate.cycletime;
+ sgi->num_entries = entry->gate.num_entries;
+ sgi->enable = 1;
+
+ memcpy(sgi->entries, entry->gate.entries,
+ entry->gate.num_entries * sizeof(struct action_gate_entry));
+}
+
+static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL);
+}
+
+static int vsc9959_psfp_sgi_set(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct action_gate_entry *e;
+ struct timespec64 base_ts;
+ u32 interval_sum = 0;
+ u32 val;
+ int i;
+
+ if (sgi->index > VSC9959_PSFP_GATE_ID_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index),
+ ANA_SG_ACCESS_CTRL);
+
+ if (!sgi->enable) {
+ ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE,
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE,
+ ANA_SG_CONFIG_REG_3);
+
+ return 0;
+ }
+
+ if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN ||
+ sgi->cycletime > NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM)
+ return -EINVAL;
+
+ vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts);
+ ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1);
+ val = lower_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2);
+
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot,
+ (sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) |
+ ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE |
+ ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) |
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val),
+ ANA_SG_CONFIG_REG_3);
+
+ ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4);
+
+ e = sgi->entries;
+ for (i = 0; i < sgi->num_entries; i++) {
+ u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8);
+
+ ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) |
+ (e[i].gate_state ?
+ ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0),
+ ANA_SG_GCL_GS_CONFIG, i);
+
+ interval_sum += e[i].interval;
+ ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i);
+ }
+
+ ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL);
+
+ return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val,
+ (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct felix_stream_gate_entry *tmp;
+ struct ocelot_psfp_list *psfp;
+ int ret;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry(tmp, &psfp->sgi_list, list)
+ if (tmp->index == sgi->index) {
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = vsc9959_psfp_sgi_set(ocelot, sgi);
+ if (ret) {
+ kfree(tmp);
+ return ret;
+ }
+
+ tmp->index = sgi->index;
+ refcount_set(&tmp->refcount, 1);
+ list_add_tail(&tmp->list, &psfp->sgi_list);
+
+ return 0;
+}
+
+static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
+ u32 index)
+{
+ struct felix_stream_gate_entry *tmp, *n;
+ struct felix_stream_gate sgi = {0};
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ sgi.index = index;
+ sgi.enable = 0;
+ vsc9959_psfp_sgi_set(ocelot, &sgi);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
+ struct felix_stream_filter_counters *counters)
+{
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ counters->match = ocelot_read_gix(ocelot, SYS_CNT, 0x200);
+ counters->not_pass_gate = ocelot_read_gix(ocelot, SYS_CNT, 0x201);
+ counters->not_pass_sdu = ocelot_read_gix(ocelot, SYS_CNT, 0x202);
+ counters->red = ocelot_read_gix(ocelot, SYS_CNT, 0x203);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(index) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+}
+
+static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct felix_stream_filter old_sfi, *sfi_entry;
+ struct felix_stream_filter sfi = {0};
+ const struct flow_action_entry *a;
+ struct felix_stream *stream_entry;
+ struct felix_stream stream = {0};
+ struct felix_stream_gate *sgi;
+ struct ocelot_psfp_list *psfp;
+ struct ocelot_policer pol;
+ int ret, i, size;
+ u64 rate, burst;
+ u32 index;
+
+ psfp = &ocelot->psfp;
+
+ ret = vsc9959_stream_identify(f, &stream);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC");
+ return ret;
+ }
+
+ flow_action_for_each(i, a, &f->rule->action) {
+ switch (a->id) {
+ case FLOW_ACTION_GATE:
+ size = struct_size(sgi, entries, a->gate.num_entries);
+ sgi = kzalloc(size, GFP_KERNEL);
+ vsc9959_psfp_parse_gate(a, sgi);
+ ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
+ if (ret) {
+ kfree(sgi);
+ goto err;
+ }
+ sfi.sg_valid = 1;
+ sfi.sgid = sgi->index;
+ kfree(sgi);
+ break;
+ case FLOW_ACTION_POLICE:
+ index = a->police.index + VSC9959_PSFP_POLICER_BASE;
+ if (index > VSC9959_PSFP_POLICER_MAX) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ rate = a->police.rate_bytes_ps;
+ burst = rate * PSCHED_NS2TICKS(a->police.burst);
+ pol = (struct ocelot_policer) {
+ .burst = div_u64(burst, PSCHED_TICKS_PER_SEC),
+ .rate = div_u64(rate, 1000) * 8,
+ };
+ ret = ocelot_vcap_policer_add(ocelot, index, &pol);
+ if (ret)
+ goto err;
+
+ sfi.fm_valid = 1;
+ sfi.fmid = index;
+ sfi.maxsdu = a->police.mtu;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ stream.ports = BIT(port);
+ stream.port = port;
+
+ sfi.portmask = stream.ports;
+ sfi.prio_valid = (stream.prio < 0 ? 0 : 1);
+ sfi.prio = (sfi.prio_valid ? stream.prio : 0);
+ sfi.enable = 1;
+
+ /* Check if stream is set. */
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream);
+ if (stream_entry) {
+ if (stream_entry->ports & BIT(port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on this port");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ if (stream_entry->ports != BIT(stream_entry->port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on two ports");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ stream_entry->ports |= BIT(port);
+ stream.ports = stream_entry->ports;
+
+ sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list,
+ stream_entry->sfid);
+ memcpy(&old_sfi, sfi_entry, sizeof(old_sfi));
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid);
+
+ old_sfi.portmask = stream_entry->ports;
+ sfi.portmask = stream.ports;
+
+ if (stream_entry->port > port) {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi,
+ &old_sfi);
+ stream_entry->dummy = true;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi,
+ &sfi);
+ stream.dummy = true;
+ }
+ if (ret)
+ goto err;
+
+ stream_entry->sfid = old_sfi.index;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi);
+ if (ret)
+ goto err;
+ }
+
+ stream.sfid = sfi.index;
+ stream.sfid_valid = 1;
+ ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list,
+ &stream, extack);
+ if (ret) {
+ vsc9959_psfp_sfi_table_del(ocelot, stream.sfid);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (sfi.sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid);
+
+ if (sfi.fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi.fmid);
+
+ return ret;
+}
+
+static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
+ struct flow_cls_offload *f)
+{
+ struct felix_stream *stream, tmp, *stream_entry;
+ static struct felix_stream_filter *sfi;
+ struct ocelot_psfp_list *psfp;
+
+ psfp = &ocelot->psfp;
+
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream)
+ return -ENOMEM;
+
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -ENOMEM;
+
+ if (sfi->sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
+
+ if (sfi->fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi->fmid);
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream->sfid);
+
+ memcpy(&tmp, stream, sizeof(tmp));
+
+ stream->sfid_valid = 0;
+ vsc9959_stream_table_del(ocelot, stream);
+
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp);
+ if (stream_entry) {
+ stream_entry->ports = BIT(stream_entry->port);
+ if (stream_entry->dummy) {
+ stream_entry->dummy = false;
+ vsc9959_mact_stream_set(ocelot, stream_entry, NULL);
+ }
+ vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid,
+ stream_entry->ports);
+ }
+
+ return 0;
+}
+
+static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
+ struct flow_cls_offload *f,
+ struct flow_stats *stats)
+{
+ struct felix_stream_filter_counters counters;
+ struct ocelot_psfp_list *psfp;
+ struct felix_stream *stream;
+
+ psfp = &ocelot->psfp;
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream)
+ return -ENOMEM;
+
+ vsc9959_psfp_counters_get(ocelot, stream->sfid, &counters);
+
+ stats->pkts = counters.match;
+ stats->drops = counters.not_pass_gate + counters.not_pass_sdu +
+ counters.red;
+
+ return 0;
+}
+
+static void vsc9959_psfp_init(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+
+ INIT_LIST_HEAD(&psfp->stream_list);
+ INIT_LIST_HEAD(&psfp->sfi_list);
+ INIT_LIST_HEAD(&psfp->sgi_list);
+}
+
+/* When using cut-through forwarding and the egress port runs at a higher data
+ * rate than the ingress port, the packet currently under transmission would
+ * suffer an underrun since it would be transmitted faster than it is received.
+ * The Felix switch implementation of cut-through forwarding does not check in
+ * hardware whether this condition is satisfied or not, so we must restrict the
+ * list of ports that have cut-through forwarding enabled on egress to only be
+ * the ports operating at the lowest link speed within their respective
+ * forwarding domain.
+ */
+static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_switch *ds = felix->ds;
+ int port, other_port;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int min_speed = ocelot_port->speed;
+ unsigned long mask = 0;
+ u32 tmp, val = 0;
+
+ /* Disable cut-through on ports that are down */
+ if (ocelot_port->speed <= 0)
+ goto set;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* Ocelot switches forward from the NPI port towards
+ * any port, regardless of it being in the NPI port's
+ * forwarding domain or not.
+ */
+ mask = dsa_user_ports(ds);
+ } else {
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port);
+ mask &= ~BIT(port);
+ if (ocelot->npi >= 0)
+ mask |= BIT(ocelot->npi);
+ else
+ mask |= ocelot_get_dsa_8021q_cpu_mask(ocelot);
+ }
+
+ /* Calculate the minimum link speed, among the ports that are
+ * up, of this source port's forwarding domain.
+ */
+ for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) {
+ struct ocelot_port *other_ocelot_port;
+
+ other_ocelot_port = ocelot->ports[other_port];
+ if (other_ocelot_port->speed <= 0)
+ continue;
+
+ if (min_speed > other_ocelot_port->speed)
+ min_speed = other_ocelot_port->speed;
+ }
+
+ /* Enable cut-through forwarding for all traffic classes. */
+ if (ocelot_port->speed == min_speed)
+ val = GENMASK(7, 0);
+
+set:
+ tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
+ if (tmp == val)
+ continue;
+
+ dev_dbg(ocelot->dev,
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
+ port, mask, ocelot_port->speed, min_speed,
+ val ? "enabling" : "disabling");
+
+ ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
+ }
+}
+
+static const struct ocelot_ops vsc9959_ops = {
+ .reset = vsc9959_reset,
+ .wm_enc = vsc9959_wm_enc,
+ .wm_dec = vsc9959_wm_dec,
+ .wm_stat = vsc9959_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+ .psfp_init = vsc9959_psfp_init,
+ .psfp_filter_add = vsc9959_psfp_filter_add,
+ .psfp_filter_del = vsc9959_psfp_filter_del,
+ .psfp_stats_get = vsc9959_psfp_stats_get,
+ .cut_through_fwd = vsc9959_cut_through_fwd,
+};
+
static const struct felix_info felix_info_vsc9959 = {
.target_io_res = vsc9959_target_io_res,
.port_io_res = vsc9959_port_io_res,
@@ -1354,6 +2223,10 @@ static const struct felix_info felix_info_vsc9959 = {
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
+ .vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = 0,
+ .vcap_pol_max2 = 0,
.num_mact_rows = 2048,
.num_ports = 6,
.num_tx_queues = OCELOT_NUM_TC,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 92eae63150ea..899b98193b4a 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -18,6 +18,10 @@
#define MSCC_MIIM_CMD_REGAD_SHIFT 20
#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
#define MSCC_MIIM_CMD_VLD BIT(31)
+#define VSC9953_VCAP_POLICER_BASE 11
+#define VSC9953_VCAP_POLICER_MAX 31
+#define VSC9953_VCAP_POLICER_BASE2 120
+#define VSC9953_VCAP_POLICER_MAX2 161
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
@@ -1172,6 +1176,10 @@ static const struct felix_info seville_info_vsc9953 = {
.stats_layout = vsc9953_stats_layout,
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
+ .vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
+ .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
.num_mact_rows = 2048,
.num_ports = 10,
.num_tx_queues = OCELOT_NUM_TC,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 147ca39531a3..96a7fbf8700c 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -9,6 +9,8 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
#include <net/dsa.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
@@ -68,6 +70,8 @@ static const struct qca8k_mib_desc ar8327_mib[] = {
MIB_DESC(1, 0x9c, "TxExcDefer"),
MIB_DESC(1, 0xa0, "TxDefer"),
MIB_DESC(1, 0xa4, "TxLateCol"),
+ MIB_DESC(1, 0xa8, "RXUnicast"),
+ MIB_DESC(1, 0xac, "TXUnicast"),
};
/* The 32bit switch registers are accessed indirectly. To achieve this we need
@@ -151,6 +155,25 @@ qca8k_set_page(struct mii_bus *bus, u16 page)
static int
qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
{
+ return regmap_read(priv->regmap, reg, val);
+}
+
+static int
+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_write(priv->regmap, reg, val);
+}
+
+static int
+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static int
+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
@@ -171,8 +194,9 @@ exit:
}
static int
-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
@@ -193,8 +217,9 @@ exit:
}
static int
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
@@ -222,34 +247,6 @@ exit:
return ret;
}
-static int
-qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return qca8k_rmw(priv, reg, 0, val);
-}
-
-static int
-qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return qca8k_rmw(priv, reg, val, 0);
-}
-
-static int
-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-
- return qca8k_read(priv, reg, val);
-}
-
-static int
-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-
- return qca8k_write(priv, reg, val);
-}
-
static const struct regmap_range qca8k_readable_ranges[] = {
regmap_reg_range(0x0000, 0x00e4), /* Global control */
regmap_reg_range(0x0100, 0x0168), /* EEE control */
@@ -281,26 +278,19 @@ static struct regmap_config qca8k_regmap_config = {
.max_register = 0x16ac, /* end MIB - Port6 range */
.reg_read = qca8k_regmap_read,
.reg_write = qca8k_regmap_write,
+ .reg_update_bits = qca8k_regmap_update_bits,
.rd_table = &qca8k_readable_table,
+ .disable_locking = true, /* Locking is handled by qca8k read/write */
+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
};
static int
qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
{
- int ret, ret1;
u32 val;
- ret = read_poll_timeout(qca8k_read, ret1, !(val & mask),
- 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- priv, reg, &val);
-
- /* Check if qca8k_read has failed for a different reason
- * before returning -ETIMEDOUT
- */
- if (ret < 0 && ret1 < 0)
- return ret1;
-
- return ret;
+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
}
static int
@@ -319,18 +309,18 @@ qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
}
/* vid - 83:72 */
- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
/* aging - 67:64 */
- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
/* portmask - 54:48 */
- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
/* mac - 47:0 */
- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
- fdb->mac[1] = reg[1] & 0xff;
- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
- fdb->mac[5] = reg[0] & 0xff;
+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
return 0;
}
@@ -343,18 +333,18 @@ qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
int i;
/* vid - 83:72 */
- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
/* aging - 67:64 */
- reg[2] |= aging & QCA8K_ATU_STATUS_M;
+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
/* portmask - 54:48 */
- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
/* mac - 47:0 */
- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
- reg[1] |= mac[1];
- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
- reg[0] |= mac[5];
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
for (i = 0; i < 3; i++)
@@ -372,7 +362,7 @@ qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
reg |= cmd;
if (port >= 0) {
reg |= QCA8K_ATU_FUNC_PORT_EN;
- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
}
/* Write the function register triggering the table access */
@@ -446,6 +436,81 @@ qca8k_fdb_flush(struct qca8k_priv *priv)
}
static int
+qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule exist. Delete first */
+ if (!fdb.aging) {
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+ }
+
+ /* Add port to fdb portmask */
+ fdb.port_mask |= port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int
+qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule doesn't exist. Why delete? */
+ if (!fdb.aging) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+
+ /* Only port in the rule is this port. Don't re insert */
+ if (fdb.port_mask == port_mask)
+ goto exit;
+
+ /* Remove port from port mask */
+ fdb.port_mask &= ~port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int
qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
{
u32 reg;
@@ -454,7 +519,7 @@ qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
/* Set the command and VLAN index */
reg = QCA8K_VTU_FUNC1_BUSY;
reg |= cmd;
- reg |= vid << QCA8K_VTU_FUNC1_VID_S;
+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
/* Write the function register triggering the table access */
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
@@ -500,13 +565,11 @@ qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
if (ret < 0)
goto out;
reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
if (untagged)
- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
else
- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
if (ret)
@@ -534,15 +597,13 @@ qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
if (ret < 0)
goto out;
- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
/* Check if we're the last member to be removed */
del = true;
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
if ((reg & mask) != mask) {
del = false;
@@ -571,7 +632,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
int ret;
mutex_lock(&priv->reg_mutex);
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
if (ret)
goto exit;
@@ -579,7 +640,7 @@ qca8k_mib_init(struct qca8k_priv *priv)
if (ret)
goto exit;
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
if (ret)
goto exit;
@@ -600,9 +661,9 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
mask |= QCA8K_PORT_STATUS_LINK_AUTO;
if (enable)
- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
else
- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
}
static u32
@@ -864,8 +925,8 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
* a dt-overlay and driver reload changed the configuration
*/
- return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_EN);
+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_EN);
}
/* Check if the devicetree declare the port:phy mapping */
@@ -983,7 +1044,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
u32 delay;
/* We have 2 CPU port. Check them */
- for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
/* Skip every other port */
if (port != 0 && port != 6)
continue;
@@ -1014,7 +1075,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
mode == PHY_INTERFACE_MODE_RGMII_TXID)
delay = 1;
- if (delay > QCA8K_MAX_DELAY) {
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
@@ -1030,7 +1091,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
mode == PHY_INTERFACE_MODE_RGMII_RXID)
delay = 2;
- if (delay > QCA8K_MAX_DELAY) {
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
@@ -1089,14 +1150,6 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
- mutex_init(&priv->reg_mutex);
-
- /* Start by setting up the register mapping */
- priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
- &qca8k_regmap_config);
- if (IS_ERR(priv->regmap))
- dev_warn(priv->dev, "regmap initialization failed");
-
ret = qca8k_setup_mdio_bus(priv);
if (ret)
return ret;
@@ -1110,16 +1163,16 @@ qca8k_setup(struct dsa_switch *ds)
return ret;
/* Make sure MAC06 is disabled */
- ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL,
- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
if (ret) {
dev_err(priv->dev, "failed disabling MAC06 exchange");
return ret;
}
/* Enable CPU Port */
- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
if (ret) {
dev_err(priv->dev, "failed enabling CPU port");
return ret;
@@ -1141,8 +1194,8 @@ qca8k_setup(struct dsa_switch *ds)
/* Enable QCA header mode on all cpu ports */
if (dsa_is_cpu_port(ds, i)) {
ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
if (ret) {
dev_err(priv->dev, "failed enabling QCA header mode");
return ret;
@@ -1159,10 +1212,10 @@ qca8k_setup(struct dsa_switch *ds)
* for igmp, unknown, multicast and broadcast packet
*/
ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
if (ret)
return ret;
@@ -1180,8 +1233,6 @@ qca8k_setup(struct dsa_switch *ds)
/* Individual user ports get connected to CPU port only */
if (dsa_is_user_port(ds, i)) {
- int shift = 16 * (i % 2);
-
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER,
BIT(cpu_port));
@@ -1189,8 +1240,8 @@ qca8k_setup(struct dsa_switch *ds)
return ret;
/* Enable ARP Auto-learning by default */
- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_LEARN);
+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
if (ret)
return ret;
@@ -1198,8 +1249,8 @@ qca8k_setup(struct dsa_switch *ds)
* default egress vid
*/
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
- 0xfff << shift,
- QCA8K_PORT_VID_DEF << shift);
+ QCA8K_EGREES_VLAN_PORT_MASK(i),
+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
if (ret)
return ret;
@@ -1246,7 +1297,7 @@ qca8k_setup(struct dsa_switch *ds)
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN;
qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
- QCA8K_PORT_HOL_CTRL1_ING_BUF |
+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
QCA8K_PORT_HOL_CTRL1_WRED_EN,
@@ -1269,8 +1320,8 @@ qca8k_setup(struct dsa_switch *ds)
mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
- QCA8K_GLOBAL_FC_GOL_XON_THRES_S |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S,
+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
mask);
}
@@ -1285,6 +1336,13 @@ qca8k_setup(struct dsa_switch *ds)
/* We don't have interrupts for link changes, so we need to poll */
ds->pcs_poll = true;
+ /* Set min a max ageing value supported */
+ ds->ageing_time_min = 7000;
+ ds->ageing_time_max = 458745000;
+
+ /* Set max number of LAGs supported */
+ ds->num_lag_ids = QCA8K_NUM_LAGS;
+
return 0;
}
@@ -1631,12 +1689,16 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
static void
qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{
+ const struct qca8k_match_data *match_data;
+ struct qca8k_priv *priv = ds->priv;
int i;
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
+ match_data = of_device_get_match_data(priv->dev);
+
+ for (i = 0; i < match_data->mib_count; i++)
strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
ETH_GSTRING_LEN);
}
@@ -1646,12 +1708,15 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ const struct qca8k_match_data *match_data;
const struct qca8k_mib_desc *mib;
u32 reg, i, val;
u32 hi = 0;
int ret;
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
+ match_data = of_device_get_match_data(priv->dev);
+
+ for (i = 0; i < match_data->mib_count; i++) {
mib = &ar8327_mib[i];
reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
@@ -1674,10 +1739,15 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
static int
qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
+ const struct qca8k_match_data *match_data;
+ struct qca8k_priv *priv = ds->priv;
+
if (sset != ETH_SS_STATS)
return 0;
- return ARRAY_SIZE(ar8327_mib);
+ match_data = of_device_get_match_data(priv->dev);
+
+ return match_data->mib_count;
}
static int
@@ -1758,9 +1828,9 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
/* Add this port to the portvlan mask of the other ports
* in the bridge
*/
- ret = qca8k_reg_set(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
if (ret)
return ret;
if (i != port)
@@ -1790,9 +1860,9 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
/* Remove this port to the portvlan mask of the other ports
* in the bridge
*/
- qca8k_reg_clear(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
+ regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
}
/* Set the cpu port to be the only one in the portvlan mask of
@@ -1802,6 +1872,36 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
}
+static void
+qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct qca8k_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ u32 val;
+
+ /* AGE_TIME reg is set in 7s step */
+ val = secs / 7;
+
+ /* Handle case with 0 as val to NOT disable
+ * learning
+ */
+ if (!val)
+ val = 1;
+
+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
+ QCA8K_ATU_AGE_TIME(val));
+}
+
static int
qca8k_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
@@ -1908,6 +2008,121 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int
+qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
+}
+
+static int
+qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+}
+
+static int
+qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int monitor_port, ret;
+ u32 reg, val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* QCA83xx can have only one port set to mirror mode.
+ * Check that the correct port is requested and return error otherwise.
+ * When no mirror port is set, the values is set to 0xF
+ */
+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+ /* Set the monitor port */
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
+ mirror->to_local_port);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ return ret;
+
+ if (ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_update_bits(priv->regmap, reg, val, val);
+ if (ret)
+ return ret;
+
+ /* Track mirror port for tx and rx to decide when the
+ * mirror port has to be disabled.
+ */
+ if (ingress)
+ priv->mirror_rx |= BIT(port);
+ else
+ priv->mirror_tx |= BIT(port);
+
+ return 0;
+}
+
+static void
+qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg, val;
+ int ret;
+
+ if (mirror->ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_clear_bits(priv->regmap, reg, val);
+ if (ret)
+ goto err;
+
+ if (mirror->ingress)
+ priv->mirror_rx &= ~BIT(port);
+ else
+ priv->mirror_tx &= ~BIT(port);
+
+ /* No port set to send packet to mirror port. Disable mirror port */
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ goto err;
+ }
+err:
+ dev_err(priv->dev, "Failed to del mirror port from %d", port);
+}
+
+static int
qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
@@ -1916,11 +2131,11 @@ qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
if (vlan_filtering) {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
} else {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
}
@@ -1944,10 +2159,9 @@ qca8k_port_vlan_add(struct dsa_switch *ds, int port,
}
if (pvid) {
- int shift = 16 * (port % 2);
-
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift, vlan->vid << shift);
+ QCA8K_EGREES_VLAN_PORT_MASK(port),
+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
if (ret)
return ret;
@@ -1996,12 +2210,185 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
return DSA_TAG_PROTO_QCA;
}
+static bool
+qca8k_lag_can_offload(struct dsa_switch *ds,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct dsa_port *dp;
+ int id, members = 0;
+
+ id = dsa_lag_id(ds->dst, lag);
+ if (id < 0 || id >= ds->num_lag_ids)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > QCA8K_NUM_PORTS_FOR_LAG)
+ return false;
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return false;
+
+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23)
+ return false;
+
+ return true;
+}
+
+static int
+qca8k_lag_setup_hash(struct dsa_switch *ds,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct qca8k_priv *priv = ds->priv;
+ bool unique_lag = true;
+ u32 hash = 0;
+ int i, id;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+ switch (info->hash_type) {
+ case NETDEV_LAG_HASH_L23:
+ hash |= QCA8K_TRUNK_HASH_SIP_EN;
+ hash |= QCA8K_TRUNK_HASH_DIP_EN;
+ fallthrough;
+ case NETDEV_LAG_HASH_L2:
+ hash |= QCA8K_TRUNK_HASH_SA_EN;
+ hash |= QCA8K_TRUNK_HASH_DA_EN;
+ break;
+ default: /* We should NEVER reach this */
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are the unique configured LAG */
+ dsa_lags_foreach_id(i, ds->dst)
+ if (i != id && dsa_lag_dev(ds->dst, i)) {
+ unique_lag = false;
+ break;
+ }
+
+ /* Hash Mode is global. Make sure the same Hash Mode
+ * is set to all the 4 possible lag.
+ * If we are the unique LAG we can set whatever hash
+ * mode we want.
+ * To change hash mode it's needed to remove all LAG
+ * and change the mode with the latest.
+ */
+ if (unique_lag) {
+ priv->lag_hash_mode = hash;
+ } else if (priv->lag_hash_mode != hash) {
+ netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
+ QCA8K_TRUNK_HASH_MASK, hash);
+}
+
+static int
+qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
+ struct net_device *lag, bool delete)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret, id, i;
+ u32 val;
+
+ id = dsa_lag_id(ds->dst, lag);
+
+ /* Read current port member */
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* Shift val to the correct trunk */
+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
+ if (delete)
+ val &= ~BIT(port);
+ else
+ val |= BIT(port);
+
+ /* Update port member. With empty portmap disable trunk */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
+ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
+ QCA8K_REG_GOL_TRUNK_EN(id),
+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
+
+ /* Search empty member if adding or port on deleting */
+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
+ if (ret)
+ return ret;
+
+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
+
+ if (delete) {
+ /* If port flagged to be disabled assume this member is
+ * empty
+ */
+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
+ if (val != port)
+ continue;
+ } else {
+ /* If port flagged to be enabled assume this member is
+ * already set
+ */
+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+ }
+
+ /* We have found the member to add/remove */
+ break;
+ }
+
+ /* Set port in the correct port mask or disable port if in delete mode */
+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
+}
+
+static int
+qca8k_port_lag_join(struct dsa_switch *ds, int port,
+ struct net_device *lag,
+ struct netdev_lag_upper_info *info)
+{
+ int ret;
+
+ if (!qca8k_lag_can_offload(ds, lag, info))
+ return -EOPNOTSUPP;
+
+ ret = qca8k_lag_setup_hash(ds, lag, info);
+ if (ret)
+ return ret;
+
+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
+}
+
+static int
+qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct net_device *lag)
+{
+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
+}
+
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
.get_strings = qca8k_get_strings,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
+ .set_ageing_time = qca8k_set_ageing_time,
.get_mac_eee = qca8k_get_mac_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
@@ -2011,9 +2398,14 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_stp_state_set = qca8k_port_stp_state_set,
.port_bridge_join = qca8k_port_bridge_join,
.port_bridge_leave = qca8k_port_bridge_leave,
+ .port_fast_age = qca8k_port_fast_age,
.port_fdb_add = qca8k_port_fdb_add,
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
+ .port_mdb_add = qca8k_port_mdb_add,
+ .port_mdb_del = qca8k_port_mdb_del,
+ .port_mirror_add = qca8k_port_mirror_add,
+ .port_mirror_del = qca8k_port_mirror_del,
.port_vlan_filtering = qca8k_port_vlan_filtering,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
@@ -2023,6 +2415,8 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.phylink_mac_link_down = qca8k_phylink_mac_link_down,
.phylink_mac_link_up = qca8k_phylink_mac_link_up,
.get_phy_flags = qca8k_get_phy_flags,
+ .port_lag_join = qca8k_port_lag_join,
+ .port_lag_leave = qca8k_port_lag_leave,
};
static int qca8k_read_switch_id(struct qca8k_priv *priv)
@@ -2041,7 +2435,7 @@ static int qca8k_read_switch_id(struct qca8k_priv *priv)
if (ret < 0)
return -ENODEV;
- id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK);
+ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
if (id != data->id) {
dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
return -ENODEV;
@@ -2050,7 +2444,7 @@ static int qca8k_read_switch_id(struct qca8k_priv *priv)
priv->switch_id = id;
/* Save revision to communicate to the internal PHY driver */
- priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK);
+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
return 0;
}
@@ -2085,6 +2479,14 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
gpiod_set_value_cansleep(priv->reset_gpio, 0);
}
+ /* Start by setting up the register mapping */
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
+ &qca8k_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(priv->dev, "regmap initialization failed");
+ return PTR_ERR(priv->regmap);
+ }
+
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
if (ret)
@@ -2173,14 +2575,17 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
static const struct qca8k_match_data qca8327 = {
.id = QCA8K_ID_QCA8327,
.reduced_package = true,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
};
static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
};
static const struct qca8k_match_data qca833x = {
.id = QCA8K_ID_QCA8337,
+ .mib_count = QCA8K_QCA833X_MIB_COUNT,
};
static const struct of_device_id qca8k_of_match[] = {
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 128b8cf85e08..ab4a417b25a9 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -15,12 +15,17 @@
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
#define QCA8K_MAX_MTU 9000
+#define QCA8K_NUM_LAGS 4
+#define QCA8K_NUM_PORTS_FOR_LAG 4
#define PHY_ID_QCA8327 0x004dd034
#define QCA8K_ID_QCA8327 0x12
#define PHY_ID_QCA8337 0x004dd036
#define QCA8K_ID_QCA8337 0x13
+#define QCA8K_QCA832X_MIB_COUNT 39
+#define QCA8K_QCA833X_MIB_COUNT 41
+
#define QCA8K_BUSY_WAIT_TIMEOUT 2000
#define QCA8K_NUM_FDB_RECORDS 2048
@@ -30,9 +35,9 @@
/* Global control registers */
#define QCA8K_REG_MASK_CTRL 0x000
#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
-#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
+#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
-#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
+#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
@@ -41,12 +46,11 @@
#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
-#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
-#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
-#define QCA8K_MAX_DELAY 3
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
#define QCA8K_REG_PWS 0x010
#define QCA8K_PWS_POWER_ON_SEL BIT(31)
@@ -68,10 +72,12 @@
#define QCA8K_MDIO_MASTER_READ BIT(27)
#define QCA8K_MDIO_MASTER_WRITE 0
#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
-#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
-#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
-#define QCA8K_MDIO_MASTER_DATA(x) (x)
+#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
+#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
+#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
+#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
#define QCA8K_MDIO_MASTER_MAX_PORTS 5
#define QCA8K_MDIO_MASTER_MAX_REG 32
#define QCA8K_GOL_MAC_ADDR0 0x60
@@ -93,9 +99,7 @@
#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
-#define QCA8K_PORT_HDR_CTRL_RX_S 2
#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
-#define QCA8K_PORT_HDR_CTRL_TX_S 0
#define QCA8K_PORT_HDR_CTRL_ALL 2
#define QCA8K_PORT_HDR_CTRL_MGMT 1
#define QCA8K_PORT_HDR_CTRL_NONE 0
@@ -105,10 +109,11 @@
#define QCA8K_SGMII_EN_TX BIT(3)
#define QCA8K_SGMII_EN_SD BIT(4)
#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
-#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23))
-#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22)
-#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
-#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
+#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
+#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
+#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
+#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
+#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
/* MAC_PWR_SEL registers */
#define QCA8K_REG_MAC_PWR_SEL 0x0e4
@@ -119,102 +124,152 @@
#define QCA8K_REG_EEE_CTRL 0x100
#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
+/* TRUNK_HASH_EN registers */
+#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
+#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
+#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
+#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
+#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
+#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
+
/* ACL registers */
#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
-#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
-#define QCA8K_PORT_VLAN_SVID(x) x
+#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
+#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
+#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
+#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
/* Lookup registers */
#define QCA8K_REG_ATU_DATA0 0x600
-#define QCA8K_ATU_ADDR2_S 24
-#define QCA8K_ATU_ADDR3_S 16
-#define QCA8K_ATU_ADDR4_S 8
+#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
+#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
+#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
#define QCA8K_REG_ATU_DATA1 0x604
-#define QCA8K_ATU_PORT_M 0x7f
-#define QCA8K_ATU_PORT_S 16
-#define QCA8K_ATU_ADDR0_S 8
+#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
+#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
#define QCA8K_REG_ATU_DATA2 0x608
-#define QCA8K_ATU_VID_M 0xfff
-#define QCA8K_ATU_VID_S 8
-#define QCA8K_ATU_STATUS_M 0xf
+#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
+#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
#define QCA8K_ATU_STATUS_STATIC 0xf
#define QCA8K_REG_ATU_FUNC 0x60c
#define QCA8K_ATU_FUNC_BUSY BIT(31)
#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
#define QCA8K_ATU_FUNC_FULL BIT(12)
-#define QCA8K_ATU_FUNC_PORT_M 0xf
-#define QCA8K_ATU_FUNC_PORT_S 8
+#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
#define QCA8K_REG_VTU_FUNC0 0x610
#define QCA8K_VTU_FUNC0_VALID BIT(20)
#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
-#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
-#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
-#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
-#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
-#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
-#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
+/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
+ * It does contain VLAN_MODE for each port [5:4] for port0,
+ * [7:6] for port1 ... [17:16] for port6. Use virtual port
+ * define to handle this.
+ */
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
+#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
#define QCA8K_REG_VTU_FUNC1 0x614
#define QCA8K_VTU_FUNC1_BUSY BIT(31)
-#define QCA8K_VTU_FUNC1_VID_S 16
+#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
#define QCA8K_VTU_FUNC1_FULL BIT(4)
+#define QCA8K_REG_ATU_CTRL 0x618
+#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
+#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
-#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
-#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
-#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
-#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
-#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
-#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
+#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
+#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
+#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
+#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
+
+#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
+/* 4 max trunk first
+ * first 6 bit for member bitmap
+ * 7th bit is to enable trunk port
+ */
+#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
+#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
+#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
+#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
+#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
+#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
+/* Complex shift: FIRST shift for port THEN shift for trunk */
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
+#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
/* Pkt edit registers */
+#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
+#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
+#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
/* L3 registers */
@@ -244,6 +299,7 @@ enum qca8k_fdb_cmd {
QCA8K_FDB_FLUSH = 1,
QCA8K_FDB_LOAD = 2,
QCA8K_FDB_PURGE = 3,
+ QCA8K_FDB_FLUSH_PORT = 5,
QCA8K_FDB_NEXT = 6,
QCA8K_FDB_SEARCH = 7,
};
@@ -264,6 +320,7 @@ struct ar8xxx_port_status {
struct qca8k_match_data {
u8 id;
bool reduced_package;
+ u8 mib_count;
};
enum {
@@ -282,6 +339,9 @@ struct qca8k_ports_config {
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
+ u8 mirror_rx;
+ u8 mirror_tx;
+ u8 lag_hash_mode;
bool legacy_phy_port_mapping;
struct qca8k_ports_config ports_config;
struct regmap *regmap;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index a4b1447ff055..4c18f619ec02 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1216,12 +1216,10 @@ int vsc73xx_probe(struct vsc73xx *vsc)
}
EXPORT_SYMBOL(vsc73xx_probe);
-int vsc73xx_remove(struct vsc73xx *vsc)
+void vsc73xx_remove(struct vsc73xx *vsc)
{
dsa_unregister_switch(vsc->ds);
gpiod_set_value(vsc->reset, 1);
-
- return 0;
}
EXPORT_SYMBOL(vsc73xx_remove);
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 30b951504e65..30b1f0a36566 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -26,5 +26,5 @@ struct vsc73xx_ops {
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
-int vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_remove(struct vsc73xx *vsc);
void vsc73xx_shutdown(struct vsc73xx *vsc);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 05e15b6e5e2c..481f1df3106c 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1138,7 +1138,9 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static void
-typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
ering->rx_max_pending = RXENT_ENTRIES;
ering->tx_max_pending = TXLO_ENTRIES - 1;
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 941754ea78ec..1df7601af86a 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -116,6 +116,7 @@ static int hydra_init(struct zorro_dev *z)
unsigned long ioaddr = board+HYDRA_NIC_BASE;
const char name[] = "NE2000";
int start_page, stop_page;
+ u8 macaddr[ETH_ALEN];
int j;
int err;
@@ -129,7 +130,8 @@ static int hydra_init(struct zorro_dev *z)
return -ENOMEM;
for (j = 0; j < ETH_ALEN; j++)
- dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ macaddr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ eth_hw_addr_set(dev, macaddr);
/* We must set the 8390 for word mode. */
z_writeb(0x4b, ioaddr + NE_EN0_DCFG);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 91b04abfd687..7fb819b9b89a 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -292,6 +292,7 @@ static bool mac8390_rsrc_init(struct net_device *dev,
struct nubus_dirent ent;
int offset;
volatile unsigned short *i;
+ u8 addr[ETH_ALEN];
dev->irq = SLOT2IRQ(board->slot);
/* This is getting to be a habit */
@@ -314,7 +315,8 @@ static bool mac8390_rsrc_init(struct net_device *dev,
return false;
}
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+ nubus_get_rsrc_mem(addr, &ent, 6);
+ eth_hw_addr_set(dev, addr);
if (useresources[cardtype] == 1) {
nubus_rewinddir(&dir);
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 0890fa493f70..6e62c37c9400 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -204,6 +204,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
{
int i, retval;
int checksum = 0;
+ u8 macaddr[ETH_ALEN];
const char *model_name;
unsigned char eeprom_irq = 0;
static unsigned version_printed;
@@ -239,7 +240,8 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ macaddr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, macaddr);
netdev_info(dev, "%s at %#3x, %pM", model_name,
ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 263a942d81fa..5b00c452bede 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -168,6 +168,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
int checksum = 0;
int ancient = 0; /* An old card without config registers. */
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ u8 addr[ETH_ALEN];
const char *model_name;
static unsigned version_printed;
struct ei_device *ei_local = netdev_priv(dev);
@@ -191,7 +192,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
netdev_info(dev, version);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ addr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, addr);
netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 4601b38f532a..027cbacca1c9 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -73,6 +73,7 @@ config DNET
source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
+source "drivers/net/ethernet/engleder/Kconfig"
source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index fdd8c6c17451..33d30b619e00 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
+obj-$(CONFIG_NET_VENDOR_ENGLEDER) += engleder/
obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 13e745cf3781..6b9b43e422c1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -465,7 +465,9 @@ static void ena_get_drvinfo(struct net_device *dev,
}
static void ena_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
@@ -476,7 +478,9 @@ static void ena_get_ringparam(struct net_device *netdev,
}
static int ena_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
u32 new_tx_size, new_rx_size;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 2f808dbc8b0e..3a351d3396bf 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -680,6 +680,7 @@ static int a2065_init_one(struct zorro_dev *z,
unsigned long base_addr = board + A2065_LANCE;
unsigned long mem_start = board + A2065_RAM;
struct resource *r1, *r2;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -706,17 +707,18 @@ static int a2065_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
+ addr[0] = 0x00;
if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
- dev->dev_addr[1] = 0x80;
- dev->dev_addr[2] = 0x10;
+ addr[1] = 0x80;
+ addr[2] = 0x10;
} else { /* Ameristar */
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x9f;
+ addr[1] = 0x00;
+ addr[2] = 0x9f;
}
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 5e0f645f5bde..4ea7b9f3c424 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -441,11 +441,11 @@ static int ariadne_open(struct net_device *dev)
/* Set the Ethernet Hardware Address */
lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[0];
lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[1];
lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[2];
/* Set the Init Block Mode */
lance->RAP = CSR15; /* Mode Register */
@@ -717,6 +717,7 @@ static int ariadne_init_one(struct zorro_dev *z,
unsigned long mem_start = board + ARIADNE_RAM;
struct resource *r1, *r2;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -740,12 +741,13 @@ static int ariadne_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x60;
- dev->dev_addr[2] = 0x30;
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[0] = 0x00;
+ addr[1] = 0x60;
+ addr[2] = 0x30;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 9c7d9690d00c..27869164c6e6 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -471,6 +471,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
int i;
static int did_version;
unsigned short save1, save2;
+ u8 addr[ETH_ALEN];
PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
(long)memaddr, (long)ioaddr ));
@@ -585,14 +586,16 @@ static unsigned long __init lance_probe1( struct net_device *dev,
eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
case NEW_RIEBL:
- lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
break;
case PAM_CARD:
i = IO->eeprom;
for( i = 0; i < 6; ++i )
- dev->dev_addr[i] =
+ addr[i] =
((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
((((unsigned short *)MEM)[i*2+1] & 0x0f));
+ eth_hw_addr_set(dev, addr);
i = IO->mem;
break;
}
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 6784f8748638..055fda11c572 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -129,6 +129,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
struct hplance_private *lp;
+ u8 addr[ETH_ALEN];
int i;
/* reset the board */
@@ -144,9 +145,10 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
/* The NVRAM holds our ethernet address, one nibble per byte,
* at bytes NVRAMOFF+1,3,5,7,9...
*/
- dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
| (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
}
+ eth_hw_addr_set(dev, addr);
lp = netdev_priv(dev);
lp->lance.name = d->name;
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 945bf1d87507..462016666752 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -480,6 +480,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
unsigned long flags;
int err = -ENOMEM;
void __iomem *bios;
+ u8 addr[ETH_ALEN];
/* First we look for special cases.
Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
@@ -541,7 +542,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
/* There is a 16 byte station address PROM at the base address.
The first six bytes are the station address. */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
+ addr[i] = inb(ioaddr + i);
+ eth_hw_addr_set(dev, addr);
printk("%pM", dev->dev_addr);
dev->base_addr = ioaddr;
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index da97fccea9ea..410c7b67eba4 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -74,6 +74,7 @@ static struct net_device * __init mvme147lance_probe(void)
static int called;
static const char name[] = "MVME147 LANCE";
struct m147lance_private *lp;
+ u8 macaddr[ETH_ALEN];
u_long *addr;
u_long address;
int err;
@@ -93,15 +94,16 @@ static struct net_device * __init mvme147lance_probe(void)
addr = (u_long *)ETHERNET_ADDRESS;
address = *addr;
- dev->dev_addr[0] = 0x08;
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x3e;
+ macaddr[0] = 0x08;
+ macaddr[1] = 0x00;
+ macaddr[2] = 0x3e;
address = address >> 8;
- dev->dev_addr[5] = address&0xff;
+ macaddr[5] = address&0xff;
address = address >> 8;
- dev->dev_addr[4] = address&0xff;
+ macaddr[4] = address&0xff;
address = address >> 8;
- dev->dev_addr[3] = address&0xff;
+ macaddr[3] = address&0xff;
+ eth_hw_addr_set(dev, macaddr);
printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
dev->name, dev->base_addr, MVME147_LANCE_IRQ,
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 032e8922b482..8ba579b89b75 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -251,7 +251,7 @@ static void ni65_recv_intr(struct net_device *dev,int);
static void ni65_xmit_intr(struct net_device *dev,int);
static int ni65_open(struct net_device *dev);
static int ni65_lance_reinit(struct net_device *dev);
-static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
+static void ni65_init_lance(struct priv *p,const unsigned char*,int,int);
static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
struct net_device *dev);
static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
@@ -418,6 +418,7 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
{
int i,j;
struct priv *p;
+ u8 addr[ETH_ALEN];
unsigned long flags;
dev->irq = irq;
@@ -444,7 +445,8 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
return -ENODEV;
for(j=0;j<6;j++)
- dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+ addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+ eth_hw_addr_set(dev, addr);
if( (j=ni65_alloc_buffer(dev)) < 0) {
release_region(ioaddr, cards[i].total_size);
@@ -566,7 +568,7 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
/*
* set lance register and trigger init
*/
-static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
+static void ni65_init_lance(struct priv *p,const unsigned char *daddr,int filter,int mode)
{
int i;
u32 pib;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f5c50ff377ff..c20c369c7eb8 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -860,7 +860,9 @@ static int pcnet32_nway_reset(struct net_device *dev)
}
static void pcnet32_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
@@ -871,7 +873,9 @@ static void pcnet32_get_ringparam(struct net_device *dev,
}
static int pcnet32_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 94879cf8b420..6ceb1cdf6eba 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -619,8 +619,11 @@ static int xgbe_get_module_eeprom(struct net_device *netdev,
return pdata->phy_if.module_eeprom(pdata, eeprom, data);
}
-static void xgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+static void
+xgbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -631,7 +634,9 @@ static void xgbe_get_ringparam(struct net_device *netdev,
}
static int xgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int rx, tx;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 220dc42af31a..ff2d099aab21 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -869,7 +869,7 @@ static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
for (i = 0; i < pdata->txq_cnt; i++) {
txq = netdev_get_tx_queue(ndev, i);
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
netif_tx_start_queue(txq);
}
}
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 95d3061c61be..8fcaf1639920 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -92,7 +92,7 @@ static void mace_reset(struct net_device *dev);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_dma_intr(int irq, void *dev_id);
static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* Load a receive DMA channel with a base address and ring length
@@ -197,6 +197,7 @@ static int mace_probe(struct platform_device *pdev)
unsigned char *addr;
struct net_device *dev;
unsigned char checksum = 0;
+ u8 macaddr[ETH_ALEN];
int err;
dev = alloc_etherdev(PRIV_BYTES);
@@ -229,8 +230,9 @@ static int mace_probe(struct platform_device *pdev)
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
checksum ^= v;
- dev->dev_addr[j] = v;
+ macaddr[j] = v;
}
+ eth_hw_addr_set(dev, macaddr);
for (; j < 8; ++j) {
checksum ^= bitrev8(addr[j<<4]);
}
@@ -315,11 +317,12 @@ static void mace_reset(struct net_device *dev)
* Load the address on a mace controller.
*/
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -331,7 +334,8 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- mb->padr = dev->dev_addr[i] = p[i];
+ mb->padr = macaddr[i] = p[i];
+ eth_hw_addr_set(dev, macaddr);
if (mp->chipid != BROKEN_ADDRCHG_REV)
mb->iac = 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a9ef0544e30f..a418238f6309 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -812,7 +812,9 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
}
static void aq_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
@@ -827,7 +829,9 @@ static void aq_get_ringparam(struct net_device *ndev,
}
static int aq_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
const struct aq_hw_caps_s *hw_caps;
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index e230d8d0ff73..e7a9f9863258 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -144,12 +144,13 @@ static void ax88796c_set_mac_addr(struct net_device *ndev)
static void ax88796c_load_mac_addr(struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u8 addr[ETH_ALEN];
u16 temp;
lockdep_assert_held(&ax_local->spi_lock);
/* Try the device tree first */
- if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) &&
+ if (!platform_get_ethdev_address(&ax_local->spi->dev, ndev) &&
is_valid_ether_addr(ndev->dev_addr)) {
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
@@ -159,18 +160,19 @@ static void ax88796c_load_mac_addr(struct net_device *ndev)
/* Read the MAC address from AX88796C */
temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
- ndev->dev_addr[5] = (u8)temp;
- ndev->dev_addr[4] = (u8)(temp >> 8);
+ addr[5] = (u8)temp;
+ addr[4] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
- ndev->dev_addr[3] = (u8)temp;
- ndev->dev_addr[2] = (u8)(temp >> 8);
+ addr[3] = (u8)temp;
+ addr[2] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
- ndev->dev_addr[1] = (u8)temp;
- ndev->dev_addr[0] = (u8)(temp >> 8);
+ addr[1] = (u8)temp;
+ addr[0] = (u8)(temp >> 8);
- if (is_valid_ether_addr(ndev->dev_addr)) {
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
"MAC address read from ASIX chip\n");
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 88d2ab748399..ff924f06581e 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -766,7 +766,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
unsigned long timestamp;
u32 rx_sm, tx_sm, rx_fd;
- timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
+ timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
if (likely(time_before(jiffies, timestamp + HZ / 10)))
return false;
@@ -1024,72 +1024,6 @@ static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
}
-static void ag71xx_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- break;
- case PHY_INTERFACE_MODE_MII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
- ag71xx_is(ag, AR9340) ||
- ag71xx_is(ag, QCA9530) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_GMII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_SGMII:
- if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RMII:
- if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RGMII:
- if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- default:
- goto unsupported;
- }
-
- phylink_set(mask, MII);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Autoneg);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_RGMII ||
- state->interface == PHY_INTERFACE_MODE_GMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-unsupported:
- linkmode_zero(supported);
-}
-
static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
@@ -1163,7 +1097,7 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
- .validate = ag71xx_mac_validate,
+ .validate = phylink_generic_validate,
.mac_pcs_get_state = ag71xx_mac_pcs_get_state,
.mac_an_restart = ag71xx_mac_an_restart,
.mac_config = ag71xx_mac_config,
@@ -1177,6 +1111,34 @@ static int ag71xx_phylink_setup(struct ag71xx *ag)
ag->phylink_config.dev = &ag->ndev->dev;
ag->phylink_config.type = PHYLINK_NETDEV;
+ ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
+ ag71xx_is(ag, AR9340) ||
+ ag71xx_is(ag, QCA9530) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ ag->phylink_config.supported_interfaces);
phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
ag->phy_if_mode, &ag71xx_phylink_mac_ops);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b4c9e805e981..6a969969d221 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3438,7 +3438,9 @@ static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
}
static void atl1_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
@@ -3451,7 +3453,9 @@ static void atl1_get_ringparam(struct net_device *netdev,
}
static int atl1_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 969591bbc066..e5857e88c207 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1961,7 +1961,9 @@ static int b44_set_link_ksettings(struct net_device *dev,
}
static void b44_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
@@ -1972,7 +1974,9 @@ static void b44_get_ringparam(struct net_device *dev,
}
static int b44_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a568994a03a6..b04e423c446a 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1497,8 +1497,11 @@ static int bcm_enet_set_link_ksettings(struct net_device *dev,
}
}
-static void bcm_enet_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enet_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -1512,7 +1515,9 @@ static void bcm_enet_get_ringparam(struct net_device *dev,
}
static int bcm_enet_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
@@ -2579,8 +2584,11 @@ static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
}
}
-static void bcm_enetsw_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enetsw_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -2595,8 +2603,11 @@ static void bcm_enetsw_get_ringparam(struct net_device *dev,
ering->tx_pending = priv->tx_ring_size;
}
-static int bcm_enetsw_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static int
+bcm_enetsw_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index babc955ba64e..e20aafeb4ca9 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -7318,7 +7318,9 @@ static int bnx2_set_coalesce(struct net_device *dev,
}
static void
-bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
@@ -7389,7 +7391,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
}
static int
-bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
int rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e8e8c2d593c5..54a2334dee56 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
#include <linux/ip.h>
#include <linux/crash_dump.h>
#include <net/tcp.h>
+#include <net/gro.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/prefetch.h>
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 472a3a478038..0e319ac7799f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1914,7 +1914,9 @@ static int bnx2x_set_coalesce(struct net_device *dev,
}
static void bnx2x_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1938,7 +1940,9 @@ static void bnx2x_get_ringparam(struct net_device *dev,
}
static int bnx2x_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8c2cf5519787..2dac704dc346 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -586,7 +586,7 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
-static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr,
u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 0b193edb73b8..2bb133ae61c3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -849,7 +849,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
memcpy(old, new, sizeof(struct nig_stats));
- memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+ BUILD_BUG_ON(sizeof(estats->shared) != sizeof(pstats->mac_stx[1]));
+ memcpy(&(estats->shared), &(pstats->mac_stx[1]),
sizeof(struct mac_stx));
estats->brb_drop_hi = pstats->brb_drop_hi;
estats->brb_drop_lo = pstats->brb_drop_lo;
@@ -1634,9 +1635,9 @@ void bnx2x_stats_init(struct bnx2x *bp)
REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
if (!CHIP_IS_E3(bp)) {
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt0), 2);
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt1), 2);
}
/* Prepare statistics ramrod data */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index d55e63692cf3..ae93c078707b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -36,10 +36,14 @@ struct nig_stats {
u32 pbf_octets;
u32 pbf_packet;
u32 safc_inp;
- u32 egress_mac_pkt0_lo;
- u32 egress_mac_pkt0_hi;
- u32 egress_mac_pkt1_lo;
- u32 egress_mac_pkt1_hi;
+ struct_group(egress_mac_pkt0,
+ u32 egress_mac_pkt0_lo;
+ u32 egress_mac_pkt0_hi;
+ );
+ struct_group(egress_mac_pkt1,
+ u32 egress_mac_pkt1_lo;
+ u32 egress_mac_pkt1_hi;
+ );
};
enum bnx2x_stats_event {
@@ -83,6 +87,7 @@ struct bnx2x_eth_stats {
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
+ struct_group(shared,
u32 rx_stat_ifhcinbadoctets_hi;
u32 rx_stat_ifhcinbadoctets_lo;
u32 tx_stat_ifhcoutbadoctets_hi;
@@ -159,6 +164,7 @@ struct bnx2x_eth_stats {
u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
u32 tx_stat_bmac_ufl_hi;
u32 tx_stat_bmac_ufl_lo;
+ );
u32 pause_frames_received_hi;
u32 pause_frames_received_lo;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c04ea83188e2..c057b1df86a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -37,6 +37,7 @@
#include <linux/if_bridge.h>
#include <linux/rtc.h>
#include <linux/bpf.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8188d55722e4..15253396096a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -775,7 +775,9 @@ skip_tpa_stats:
}
static void bnxt_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
@@ -794,7 +796,9 @@ static void bnxt_get_ringparam(struct net_device *dev,
}
static int bnxt_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 85ca3909859d..283f3c1f1195 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12390,7 +12390,10 @@ static int tg3_nway_reset(struct net_device *dev)
return r;
}
-static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static void tg3_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
@@ -12411,7 +12414,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
ering->tx_pending = tp->napi[0].tx_pending;
}
-static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static int tg3_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 391b85f25141..4b4ad731897f 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -405,7 +405,9 @@ static int bnad_set_coalesce(struct net_device *netdev,
static void
bnad_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct bnad *bnad = netdev_priv(netdev);
@@ -418,7 +420,9 @@ bnad_get_ringparam(struct net_device *netdev,
static int
bnad_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
int i, current_err, err = 0;
struct bnad *bnad = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ffce528aa00e..d4da9adf6777 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -506,74 +506,6 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
netdev_err(bp->dev, "adjusting tx_clk failed.\n");
}
-static void macb_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct macb *bp = netdev_priv(ndev);
-
- /* We only support MII, RMII, GMII, RGMII & SGMII. */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_10GBASER &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- if (!macb_is_gem(bp) &&
- (state->interface == PHY_INTERFACE_MODE_GMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- linkmode_zero(supported);
- return;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
- !(bp->caps & MACB_CAPS_HIGH_SPEED &&
- bp->caps & MACB_CAPS_PCS)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Asym_Pause);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_10GBASER)) {
- phylink_set_10g_modes(mask);
- phylink_set(mask, 10000baseKR_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- goto out;
- }
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_GMII ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
-
- if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
- phylink_set(mask, 1000baseT_Half);
- }
-out:
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed,
int duplex)
@@ -815,7 +747,7 @@ static int macb_mac_prepare(struct phylink_config *config, unsigned int mode,
}
static const struct phylink_mac_ops macb_phylink_ops = {
- .validate = macb_validate,
+ .validate = phylink_generic_validate,
.mac_prepare = macb_mac_prepare,
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
@@ -882,6 +814,35 @@ static int macb_mii_probe(struct net_device *dev)
bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
}
+ bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ bp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ bp->phylink_config.supported_interfaces);
+
+ /* Determine what modes are supported */
+ if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
+ bp->phylink_config.mac_capabilities |= MAC_1000FD;
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
+ bp->phylink_config.mac_capabilities |= MAC_1000HD;
+
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ bp->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_PCS)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_HIGH_SPEED) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ bp->phylink_config.supported_interfaces);
+ bp->phylink_config.mac_capabilities |= MAC_10000FD;
+ }
+ }
+
bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
bp->phy_interface, &macb_phylink_ops);
if (IS_ERR(bp->phylink)) {
@@ -3101,7 +3062,9 @@ static int macb_set_link_ksettings(struct net_device *netdev,
}
static void macb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
@@ -3113,7 +3076,9 @@ static void macb_get_ringparam(struct net_device *netdev,
}
static int macb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
u32 new_rx_size, new_tx_size;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 2b9747867d4c..2c10ae3f7fc1 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -947,7 +947,9 @@ static int lio_set_phys_id(struct net_device *netdev,
static void
lio_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -1252,8 +1254,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
return 0;
}
-static int lio_ethtool_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+lio_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
u32 rx_count, tx_count, rx_count_old, tx_count_old;
struct lio *lio = GET_LIO(netdev);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4e39d712e121..4b4ffdd1044d 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -548,7 +548,7 @@ struct octeon_mgmt_cam_state {
};
static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
- unsigned char *addr)
+ const unsigned char *addr)
{
int i;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 7f2882109b16..5a9fad61e9ea 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -467,7 +467,9 @@ static int nicvf_get_coalesce(struct net_device *netdev,
}
static void nicvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
@@ -479,7 +481,9 @@ static void nicvf_get_ringparam(struct net_device *netdev,
}
static int nicvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 609820e214a3..18acd7cf3d6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -710,7 +710,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
@@ -724,7 +726,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = adapter->params.sge.cmdQ_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index bfffcaeee624..e2637bd2f423 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1948,7 +1948,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -1964,7 +1966,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = q->txq_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index c3afec1041f8..70f528a9c727 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -126,8 +126,10 @@ struct rsp_desc { /* response queue descriptor */
struct rss_header rss_hdr;
__be32 flags;
__be32 len_cq;
- u8 imm_data[47];
- u8 intr_gen;
+ struct_group(immediate,
+ u8 imm_data[47];
+ u8 intr_gen;
+ );
};
/*
@@ -925,7 +927,8 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
if (skb) {
__skb_put(skb, IMMED_PKT_SIZE);
- skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
+ BUILD_BUG_ON(IMMED_PKT_SIZE != sizeof(resp->immediate));
+ skb_copy_to_linear_data(skb, &resp->immediate, IMMED_PKT_SIZE);
}
return skb;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 129352bbe114..6c790af92170 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -890,7 +890,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -906,7 +908,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = s->ethtxq[pi->first_qset].q.size;
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
int i;
const struct port_info *pi = netdev_priv(dev);
@@ -1989,6 +1993,15 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
return 0;
}
+static bool cxgb4_fw_mod_type_info_available(unsigned int fw_mod_type)
+{
+ /* Read port module EEPROM as long as it is plugged-in and
+ * safe to read.
+ */
+ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
static int cxgb4_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -1997,7 +2010,7 @@ static int cxgb4_get_module_info(struct net_device *dev,
struct adapter *adapter = pi->adapter;
int ret;
- if (!t4_is_inserted_mod_type(pi->mod_type))
+ if (!cxgb4_fw_mod_type_info_available(pi->mod_type))
return -EINVAL;
switch (pi->port_type) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fa5b596ff23a..f889f404305c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1842,8 +1842,10 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
- fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) + sizeof(wr->vlantci);
+ BUILD_BUG_ON(sizeof(wr->firmware) !=
+ (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) + sizeof(wr->vlantci)));
+ fw_hdr_copy_len = sizeof(wr->firmware);
ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
if (ret)
goto out_free;
@@ -1924,7 +1926,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/* If this is a Large Send Offload packet we'll put in an LSO CPL
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 0a326c054707..2419459a0b85 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -794,10 +794,12 @@ struct fw_eth_tx_pkt_vm_wr {
__be32 op_immdlen;
__be32 equiq_to_len16;
__be32 r3[2];
- u8 ethmacdst[6];
- u8 ethmacsrc[6];
- __be16 ethtype;
- __be16 vlantci;
+ struct_group(firmware,
+ u8 ethmacdst[ETH_ALEN];
+ u8 ethmacsrc[ETH_ALEN];
+ __be16 ethtype;
+ __be16 vlantci;
+ );
};
#define FW_CMD_MAX_TIMEOUT 10000
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ae9cca768d74..acac2be0e3f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1591,7 +1591,9 @@ static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
* first Queue Set.
*/
static void cxgb4vf_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -1614,7 +1616,9 @@ static void cxgb4vf_get_ringparam(struct net_device *dev,
* device -- after vetting them of course!
*/
static int cxgb4vf_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0295b2406646..43b2ceb6aa32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1167,10 +1167,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
- const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci));
+ const size_t fw_hdr_copy_len = sizeof(wr->firmware);
/*
* The chip minimum packet length is 10 octets but the firmware
@@ -1267,7 +1264,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/*
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 84251b85fc93..21a70b1f0ac5 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -242,12 +242,15 @@ static int mac89x0_device_probe(struct platform_device *pdev)
pr_info("No EEPROM, giving up now.\n");
goto out1;
} else {
+ u8 addr[ETH_ALEN];
+
for (i = 0; i < ETH_ALEN; i += 2) {
/* Big-endian (why??!) */
unsigned short s = readreg(dev, PP_IA + i);
- dev->dev_addr[i] = s >> 8;
- dev->dev_addr[i+1] = s & 0xff;
+ addr[i] = s >> 8;
+ addr[i+1] = s & 0xff;
}
+ eth_hw_addr_set(dev, addr);
}
dev->irq = SLOT2IRQ(slot);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 6ded4d9fa32a..6c11f9d62526 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -177,7 +177,9 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
}
static void enic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
@@ -189,7 +191,9 @@ static void enic_get_ringparam(struct net_device *netdev,
}
static int enic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 941f175fb911..07add311f65d 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2105,7 +2105,9 @@ static void gmac_get_pauseparam(struct net_device *netdev,
}
static void gmac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
@@ -2123,7 +2125,9 @@ static void gmac_get_ringparam(struct net_device *netdev,
}
static int gmac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
int err = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f9955308b93d..dfa784339781 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -683,7 +683,9 @@ static int be_get_link_ksettings(struct net_device *netdev,
}
static void be_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct be_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
new file mode 100644
index 000000000000..614dcc65c634
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Engleder network device configuration
+#
+
+config NET_VENDOR_ENGLEDER
+ bool "Engleder devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Engleder devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ENGLEDER
+
+config TSNEP
+ tristate "TSN endpoint support"
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
+ help
+ Support for the Engleder TSN endpoint Ethernet MAC IP Core.
+
+ To compile this driver as a module, choose M here. The module will be
+ called tsnep.
+
+config TSNEP_SELFTESTS
+ bool "TSN endpoint self test support"
+ default n
+ depends on TSNEP
+ help
+ This enables self test support within the TSN endpoint driver.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_ENGLEDER
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
new file mode 100644
index 000000000000..cce2191cb889
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Engleder Ethernet drivers
+#
+
+obj-$(CONFIG_TSNEP) += tsnep.o
+
+tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
+ $(tsnep-y)
+tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
new file mode 100644
index 000000000000..23bbece6b7de
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#ifndef _TSNEP_H
+#define _TSNEP_H
+
+#include "tsnep_hw.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/miscdevice.h>
+
+#define TSNEP "tsnep"
+
+#define TSNEP_RING_SIZE 256
+#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
+#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
+
+#define TSNEP_QUEUES 1
+
+struct tsnep_gcl {
+ void __iomem *addr;
+
+ u64 base_time;
+ u64 cycle_time;
+ u64 cycle_time_extension;
+
+ struct tsnep_gcl_operation operation[TSNEP_GCL_COUNT];
+ int count;
+
+ u64 change_limit;
+
+ u64 start_time;
+ bool change;
+};
+
+struct tsnep_tx_entry {
+ struct tsnep_tx_desc *desc;
+ struct tsnep_tx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+ bool owner_user_flag;
+
+ u32 properties;
+
+ struct sk_buff *skb;
+ size_t len;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+};
+
+struct tsnep_tx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ /* TX ring lock */
+ spinlock_t lock;
+ struct tsnep_tx_entry entry[TSNEP_RING_SIZE];
+ int write;
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+};
+
+struct tsnep_rx_entry {
+ struct tsnep_rx_desc *desc;
+ struct tsnep_rx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+
+ u32 properties;
+
+ struct sk_buff *skb;
+ size_t len;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+};
+
+struct tsnep_rx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ struct tsnep_rx_entry entry[TSNEP_RING_SIZE];
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+ u32 multicast;
+};
+
+struct tsnep_queue {
+ struct tsnep_adapter *adapter;
+
+ struct tsnep_tx *tx;
+ struct tsnep_rx *rx;
+
+ struct napi_struct napi;
+
+ u32 irq_mask;
+};
+
+struct tsnep_adapter {
+ struct net_device *netdev;
+ u8 mac_address[ETH_ALEN];
+ struct mii_bus *mdiobus;
+ bool suppress_preamble;
+ phy_interface_t phy_mode;
+ struct phy_device *phydev;
+ int msg_enable;
+
+ struct platform_device *pdev;
+ struct device *dmadev;
+ void __iomem *addr;
+ int irq;
+
+ bool gate_control;
+ /* gate control lock */
+ struct mutex gate_control_lock;
+ bool gate_control_active;
+ struct tsnep_gcl gcl[2];
+ int next_gcl;
+
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ /* ptp clock lock */
+ spinlock_t ptp_lock;
+
+ int num_tx_queues;
+ struct tsnep_tx tx[TSNEP_MAX_QUEUES];
+ int num_rx_queues;
+ struct tsnep_rx rx[TSNEP_MAX_QUEUES];
+
+ int num_queues;
+ struct tsnep_queue queue[TSNEP_MAX_QUEUES];
+};
+
+extern const struct ethtool_ops tsnep_ethtool_ops;
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter);
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter);
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
+int tsnep_tc_init(struct tsnep_adapter *adapter);
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+
+#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
+int tsnep_ethtool_get_test_count(void);
+void tsnep_ethtool_get_test_strings(u8 *data);
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data);
+#else
+static inline int tsnep_ethtool_get_test_count(void)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ /* not enabled */
+}
+
+static inline void tsnep_ethtool_self_test(struct net_device *dev,
+ struct ethtool_test *eth_test,
+ u64 *data)
+{
+ /* not enabled */
+}
+#endif /* CONFIG_TSNEP_SELFTESTS */
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time);
+
+#endif /* _TSNEP_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
new file mode 100644
index 000000000000..e6760dc68ddd
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bytes",
+ "rx_dropped",
+ "rx_multicast",
+ "rx_phy_errors",
+ "rx_forwarded_phy_errors",
+ "rx_invalid_frame_errors",
+ "tx_packets",
+ "tx_bytes",
+ "tx_dropped",
+};
+
+struct tsnep_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_phy_errors;
+ u64 rx_forwarded_phy_errors;
+ u64 rx_invalid_frame_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_STATS_COUNT (sizeof(struct tsnep_stats) / sizeof(u64))
+
+static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_%d_packets",
+ "rx_%d_bytes",
+ "rx_%d_dropped",
+ "rx_%d_multicast",
+ "rx_%d_no_descriptor_errors",
+ "rx_%d_buffer_too_small_errors",
+ "rx_%d_fifo_overflow_errors",
+ "rx_%d_invalid_frame_errors",
+};
+
+struct tsnep_rx_queue_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_no_descriptor_errors;
+ u64 rx_buffer_too_small_errors;
+ u64 rx_fifo_overflow_errors;
+ u64 rx_invalid_frame_errors;
+};
+
+#define TSNEP_RX_QUEUE_STATS_COUNT (sizeof(struct tsnep_rx_queue_stats) / \
+ sizeof(u64))
+
+static const char tsnep_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "tx_%d_packets",
+ "tx_%d_bytes",
+ "tx_%d_dropped",
+};
+
+struct tsnep_tx_queue_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_TX_QUEUE_STATS_COUNT (sizeof(struct tsnep_tx_queue_stats) / \
+ sizeof(u64))
+
+static void tsnep_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ strscpy(drvinfo->driver, TSNEP, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(&adapter->pdev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+static int tsnep_ethtool_get_regs_len(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int len;
+ int num_additional_queues;
+
+ len = TSNEP_MAC_SIZE;
+
+ /* first queue pair is within TSNEP_MAC_SIZE, only queues additional to
+ * the first queue pair extend the register length by TSNEP_QUEUE_SIZE
+ */
+ num_additional_queues =
+ max(adapter->num_tx_queues, adapter->num_rx_queues) - 1;
+ len += TSNEP_QUEUE_SIZE * num_additional_queues;
+
+ return len;
+}
+
+static void tsnep_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ regs->version = 1;
+
+ memcpy_fromio(p, adapter->addr, regs->len);
+}
+
+static u32 tsnep_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void tsnep_ethtool_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = data;
+}
+
+static void tsnep_ethtool_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, tsnep_stats_strings, sizeof(tsnep_stats_strings));
+ data += sizeof(tsnep_stats_strings);
+
+ for (i = 0; i < rx_count; i++) {
+ for (j = 0; j < TSNEP_RX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_rx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ for (j = 0; j < TSNEP_TX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_tx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ case ETH_SS_TEST:
+ tsnep_ethtool_get_test_strings(data);
+ break;
+ }
+}
+
+static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ struct tsnep_stats tsnep_stats;
+ struct tsnep_rx_queue_stats tsnep_rx_queue_stats;
+ struct tsnep_tx_queue_stats tsnep_tx_queue_stats;
+ u32 reg;
+ int i;
+
+ memset(&tsnep_stats, 0, sizeof(tsnep_stats));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ tsnep_stats.rx_packets += adapter->rx[i].packets;
+ tsnep_stats.rx_bytes += adapter->rx[i].bytes;
+ tsnep_stats.rx_dropped += adapter->rx[i].dropped;
+ tsnep_stats.rx_multicast += adapter->rx[i].multicast;
+ }
+ reg = ioread32(adapter->addr + ECM_STAT);
+ tsnep_stats.rx_phy_errors =
+ (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ tsnep_stats.rx_forwarded_phy_errors =
+ (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ tsnep_stats.rx_invalid_frame_errors =
+ (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tsnep_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_stats.tx_dropped += adapter->tx[i].dropped;
+ }
+ memcpy(data, &tsnep_stats, sizeof(tsnep_stats));
+ data += TSNEP_STATS_COUNT;
+
+ for (i = 0; i < rx_count; i++) {
+ memset(&tsnep_rx_queue_stats, 0, sizeof(tsnep_rx_queue_stats));
+ tsnep_rx_queue_stats.rx_packets = adapter->rx[i].packets;
+ tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes;
+ tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped;
+ tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast;
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ tsnep_rx_queue_stats.rx_no_descriptor_errors =
+ (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ tsnep_rx_queue_stats.rx_buffer_too_small_errors =
+ (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ tsnep_rx_queue_stats.rx_fifo_overflow_errors =
+ (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ tsnep_rx_queue_stats.rx_invalid_frame_errors =
+ (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ memcpy(data, &tsnep_rx_queue_stats,
+ sizeof(tsnep_rx_queue_stats));
+ data += TSNEP_RX_QUEUE_STATS_COUNT;
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ memset(&tsnep_tx_queue_stats, 0, sizeof(tsnep_tx_queue_stats));
+ tsnep_tx_queue_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_tx_queue_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_tx_queue_stats.tx_dropped += adapter->tx[i].dropped;
+ memcpy(data, &tsnep_tx_queue_stats,
+ sizeof(tsnep_tx_queue_stats));
+ data += TSNEP_TX_QUEUE_STATS_COUNT;
+ }
+}
+
+static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count;
+ int tx_count;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ rx_count = adapter->num_rx_queues;
+ tx_count = adapter->num_tx_queues;
+ return TSNEP_STATS_COUNT +
+ TSNEP_RX_QUEUE_STATS_COUNT * rx_count +
+ TSNEP_TX_QUEUE_STATS_COUNT * tx_count;
+ case ETH_SS_TEST:
+ return tsnep_ethtool_get_test_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+const struct ethtool_ops tsnep_ethtool_ops = {
+ .get_drvinfo = tsnep_ethtool_get_drvinfo,
+ .get_regs_len = tsnep_ethtool_get_regs_len,
+ .get_regs = tsnep_ethtool_get_regs,
+ .get_msglevel = tsnep_ethtool_get_msglevel,
+ .set_msglevel = tsnep_ethtool_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .self_test = tsnep_ethtool_self_test,
+ .get_strings = tsnep_ethtool_get_strings,
+ .get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
+ .get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_ts_info = tsnep_ethtool_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
new file mode 100644
index 000000000000..71cc8577d640
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* Hardware definition of TSNEP and EtherCAT MAC device */
+
+#ifndef _TSNEP_HW_H
+#define _TSNEP_HW_H
+
+#include <linux/types.h>
+
+/* type */
+#define ECM_TYPE 0x0000
+#define ECM_REVISION_MASK 0x000000FF
+#define ECM_REVISION_SHIFT 0
+#define ECM_VERSION_MASK 0x0000FF00
+#define ECM_VERSION_SHIFT 8
+#define ECM_QUEUE_COUNT_MASK 0x00070000
+#define ECM_QUEUE_COUNT_SHIFT 16
+#define ECM_GATE_CONTROL 0x02000000
+
+/* system time */
+#define ECM_SYSTEM_TIME_LOW 0x0008
+#define ECM_SYSTEM_TIME_HIGH 0x000C
+
+/* clock */
+#define ECM_CLOCK_RATE 0x0010
+#define ECM_CLOCK_RATE_OFFSET_MASK 0x7FFFFFFF
+#define ECM_CLOCK_RATE_OFFSET_SIGN 0x80000000
+
+/* interrupt */
+#define ECM_INT_ENABLE 0x0018
+#define ECM_INT_ACTIVE 0x001C
+#define ECM_INT_ACKNOWLEDGE 0x001C
+#define ECM_INT_LINK 0x00000020
+#define ECM_INT_TX_0 0x00000100
+#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_ALL 0x7FFFFFFF
+#define ECM_INT_DISABLE 0x80000000
+
+/* reset */
+#define ECM_RESET 0x0020
+#define ECM_RESET_COMMON 0x00000001
+#define ECM_RESET_CHANNEL 0x00000100
+#define ECM_RESET_TXRX 0x00010000
+
+/* control and status */
+#define ECM_STATUS 0x0080
+#define ECM_LINK_MODE_OFF 0x01000000
+#define ECM_LINK_MODE_100 0x02000000
+#define ECM_LINK_MODE_1000 0x04000000
+#define ECM_NO_LINK 0x01000000
+#define ECM_LINK_MODE_MASK 0x06000000
+
+/* management data */
+#define ECM_MD_CONTROL 0x0084
+#define ECM_MD_STATUS 0x0084
+#define ECM_MD_PREAMBLE 0x00000001
+#define ECM_MD_READ 0x00000004
+#define ECM_MD_WRITE 0x00000002
+#define ECM_MD_ADDR_MASK 0x000000F8
+#define ECM_MD_ADDR_SHIFT 3
+#define ECM_MD_PHY_ADDR_MASK 0x00001F00
+#define ECM_MD_PHY_ADDR_SHIFT 8
+#define ECM_MD_BUSY 0x00000001
+#define ECM_MD_DATA_MASK 0xFFFF0000
+#define ECM_MD_DATA_SHIFT 16
+
+/* statistic */
+#define ECM_STAT 0x00B0
+#define ECM_STAT_RX_ERR_MASK 0x000000FF
+#define ECM_STAT_RX_ERR_SHIFT 0
+#define ECM_STAT_INV_FRM_MASK 0x0000FF00
+#define ECM_STAT_INV_FRM_SHIFT 8
+#define ECM_STAT_FWD_RX_ERR_MASK 0x00FF0000
+#define ECM_STAT_FWD_RX_ERR_SHIFT 16
+
+/* tsnep */
+#define TSNEP_MAC_SIZE 0x4000
+#define TSNEP_QUEUE_SIZE 0x1000
+#define TSNEP_QUEUE(n) ({ typeof(n) __n = (n); \
+ (__n) == 0 ? \
+ 0 : \
+ TSNEP_MAC_SIZE + TSNEP_QUEUE_SIZE * ((__n) - 1); })
+#define TSNEP_MAX_QUEUES 8
+#define TSNEP_MAX_FRAME_SIZE (2 * 1024) /* hardware supports actually 16k */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_OFFSET 128
+
+/* tsnep register */
+#define TSNEP_INFO 0x0100
+#define TSNEP_INFO_RX_ASSIGN 0x00010000
+#define TSNEP_INFO_TX_TIME 0x00020000
+#define TSNEP_CONTROL 0x0108
+#define TSNEP_CONTROL_TX_RESET 0x00000001
+#define TSNEP_CONTROL_TX_ENABLE 0x00000002
+#define TSNEP_CONTROL_TX_DMA_ERROR 0x00000010
+#define TSNEP_CONTROL_TX_DESC_ERROR 0x00000020
+#define TSNEP_CONTROL_RX_RESET 0x00000100
+#define TSNEP_CONTROL_RX_ENABLE 0x00000200
+#define TSNEP_CONTROL_RX_DISABLE 0x00000400
+#define TSNEP_CONTROL_RX_DMA_ERROR 0x00001000
+#define TSNEP_CONTROL_RX_DESC_ERROR 0x00002000
+#define TSNEP_TX_DESC_ADDR_LOW 0x0140
+#define TSNEP_TX_DESC_ADDR_HIGH 0x0144
+#define TSNEP_RX_DESC_ADDR_LOW 0x0180
+#define TSNEP_RX_DESC_ADDR_HIGH 0x0184
+#define TSNEP_RESET_OWNER_COUNTER 0x01
+#define TSNEP_RX_STATISTIC 0x0190
+#define TSNEP_RX_STATISTIC_NO_DESC_MASK 0x000000FF
+#define TSNEP_RX_STATISTIC_NO_DESC_SHIFT 0
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK 0x0000FF00
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT 8
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK 0x00FF0000
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT 16
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_MASK 0xFF000000
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT 24
+#define TSNEP_RX_STATISTIC_NO_DESC 0x0190
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
+#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
+#define TSNEP_RX_ASSIGN 0x01A0
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_MASK 0xFFFF0000
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_SHIFT 16
+#define TSNEP_MAC_ADDRESS_LOW 0x0800
+#define TSNEP_MAC_ADDRESS_HIGH 0x0804
+#define TSNEP_RX_FILTER 0x0806
+#define TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS 0x0001
+#define TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS 0x0002
+#define TSNEP_GC 0x0808
+#define TSNEP_GC_ENABLE_A 0x00000002
+#define TSNEP_GC_ENABLE_B 0x00000004
+#define TSNEP_GC_DISABLE 0x00000008
+#define TSNEP_GC_ENABLE_TIMEOUT 0x00000010
+#define TSNEP_GC_ACTIVE_A 0x00000002
+#define TSNEP_GC_ACTIVE_B 0x00000004
+#define TSNEP_GC_CHANGE_AB 0x00000008
+#define TSNEP_GC_TIMEOUT_ACTIVE 0x00000010
+#define TSNEP_GC_TIMEOUT_SIGNAL 0x00000020
+#define TSNEP_GC_LIST_ERROR 0x00000080
+#define TSNEP_GC_OPEN 0x00FF0000
+#define TSNEP_GC_OPEN_SHIFT 16
+#define TSNEP_GC_NEXT_OPEN 0xFF000000
+#define TSNEP_GC_NEXT_OPEN_SHIFT 24
+#define TSNEP_GC_TIMEOUT 131072
+#define TSNEP_GC_TIME 0x080C
+#define TSNEP_GC_CHANGE 0x0810
+#define TSNEP_GCL_A 0x2000
+#define TSNEP_GCL_B 0x2800
+#define TSNEP_GCL_SIZE SZ_2K
+
+/* tsnep gate control list operation */
+struct tsnep_gcl_operation {
+ u32 properties;
+ u32 interval;
+};
+
+#define TSNEP_GCL_COUNT (TSNEP_GCL_SIZE / sizeof(struct tsnep_gcl_operation))
+#define TSNEP_GCL_MASK 0x000000FF
+#define TSNEP_GCL_INSERT 0x20000000
+#define TSNEP_GCL_CHANGE 0x40000000
+#define TSNEP_GCL_LAST 0x80000000
+#define TSNEP_GCL_MIN_INTERVAL 32
+
+/* tsnep TX/RX descriptor */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_SIZE_DATA_AFTER 2048
+#define TSNEP_DESC_OFFSET 128
+#define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000
+#define TSNEP_DESC_OWNER_COUNTER_SHIFT 30
+#define TSNEP_DESC_LENGTH_MASK 0x00003FFF
+#define TSNEP_DESC_INTERRUPT_FLAG 0x00040000
+#define TSNEP_DESC_EXTENDED_WRITEBACK_FLAG 0x00080000
+#define TSNEP_DESC_NO_LINK_FLAG 0x01000000
+
+/* tsnep TX descriptor */
+struct tsnep_tx_desc {
+ __le32 properties;
+ __le32 more_properties;
+ __le32 reserved[2];
+ __le64 next;
+ __le64 tx;
+};
+
+#define TSNEP_TX_DESC_OWNER_MASK 0xE0000000
+#define TSNEP_TX_DESC_OWNER_USER_FLAG 0x20000000
+#define TSNEP_TX_DESC_LAST_FRAGMENT_FLAG 0x00010000
+#define TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG 0x00020000
+
+/* tsnep TX descriptor writeback */
+struct tsnep_tx_desc_wb {
+ __le32 properties;
+ __le32 reserved1[3];
+ __le64 timestamp;
+ __le32 dma_delay;
+ __le32 reserved2;
+};
+
+#define TSNEP_TX_DESC_UNDERRUN_ERROR_FLAG 0x00010000
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_MASK 0x0000FFFC
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_SHIFT 2
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_MASK 0xFFFC0000
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_SHIFT 18
+#define TSNEP_TX_DESC_DMA_DELAY_NS 64
+
+/* tsnep RX descriptor */
+struct tsnep_rx_desc {
+ __le32 properties;
+ __le32 reserved[3];
+ __le64 next;
+ __le64 rx;
+};
+
+#define TSNEP_RX_DESC_BUFFER_SIZE_MASK 0x00003FFC
+
+/* tsnep RX descriptor writeback */
+struct tsnep_rx_desc_wb {
+ __le32 properties;
+ __le32 reserved[7];
+};
+
+/* tsnep RX inline meta */
+struct tsnep_rx_inline {
+ __le64 reserved;
+ __le64 timestamp;
+};
+
+#define TSNEP_RX_INLINE_METADATA_SIZE (sizeof(struct tsnep_rx_inline))
+
+#endif /* _TSNEP_HW_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
new file mode 100644
index 000000000000..904f3304727e
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -0,0 +1,1272 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* TSN endpoint Ethernet MAC driver
+ *
+ * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
+ * communication. It is designed for endpoints within TSN (Time Sensitive
+ * Networking) networks; e.g., for PLCs in the industrial automation case.
+ *
+ * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
+ * by the driver.
+ *
+ * More information can be found here:
+ * - www.embedded-experts.at/tsn
+ * - www.engleder-embedded.com
+ */
+
+#include "tsnep.h"
+#include "tsnep_hw.h"
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/iopoll.h>
+
+#define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
+ TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
+#define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
+#define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
+#else
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
+#endif
+#define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
+
+static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ mask |= ECM_INT_DISABLE;
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static irqreturn_t tsnep_irq(int irq, void *arg)
+{
+ struct tsnep_adapter *adapter = arg;
+ u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
+
+ /* acknowledge interrupt */
+ if (active != 0)
+ iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
+
+ /* handle link interrupt */
+ if ((active & ECM_INT_LINK) != 0) {
+ if (adapter->netdev->phydev)
+ phy_mac_interrupt(adapter->netdev->phydev);
+ }
+
+ /* handle TX/RX queue 0 interrupt */
+ if ((active & adapter->queue[0].irq_mask) != 0) {
+ if (adapter->netdev) {
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ napi_schedule(&adapter->queue[0].napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ md = ECM_MD_READ;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
+}
+
+static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ md = ECM_MD_WRITE;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
+static void tsnep_phy_link_status_change(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 mode;
+
+ if (phydev->link) {
+ switch (phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+ }
+
+ phy_print_status(netdev->phydev);
+}
+
+static int tsnep_phy_open(struct tsnep_adapter *adapter)
+{
+ struct phy_device *phydev;
+ struct ethtool_eee ethtool_eee;
+ int retval;
+
+ retval = phy_connect_direct(adapter->netdev, adapter->phydev,
+ tsnep_phy_link_status_change,
+ adapter->phy_mode);
+ if (retval)
+ return retval;
+ phydev = adapter->netdev->phydev;
+
+ /* MAC supports only 100Mbps|1000Mbps full duplex
+ * SPE (Single Pair Ethernet) is also an option but not implemented yet
+ */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ /* disable EEE autoneg, EEE not supported by TSNEP */
+ memset(&ethtool_eee, 0, sizeof(ethtool_eee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
+
+ adapter->phydev->irq = PHY_MAC_INTERRUPT;
+ phy_start(adapter->phydev);
+
+ return 0;
+}
+
+static void tsnep_phy_close(struct tsnep_adapter *adapter)
+{
+ phy_stop(adapter->netdev->phydev);
+ phy_disconnect(adapter->netdev->phydev);
+ adapter->netdev->phydev = NULL;
+}
+
+static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ int i;
+
+ memset(tx->entry, 0, sizeof(tx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (tx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
+ tx->page_dma[i]);
+ tx->page[i] = NULL;
+ tx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_tx_ring_init(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct tsnep_tx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ tx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
+ GFP_KERNEL);
+ if (!tx->page[i]) {
+ retval = -ENOMEM;
+ goto alloc_failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_tx_desc_wb *)
+ (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_tx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ }
+ }
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &tx->entry[i];
+ next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+ }
+
+ return 0;
+
+alloc_failed:
+ tsnep_tx_ring_cleanup(tx);
+ return retval;
+}
+
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
+{
+ struct tsnep_tx_entry *entry = &tx->entry[index];
+
+ entry->properties = 0;
+ if (entry->skb) {
+ entry->properties =
+ skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
+
+ /* toggle user flag to prevent false acknowledge
+ *
+ * Only the first fragment is acknowledged. For all other
+ * fragments no acknowledge is done and the last written owner
+ * counter stays in the writeback descriptor. Therefore, it is
+ * possible that the last written owner counter is identical to
+ * the new incremented owner counter and a false acknowledge is
+ * detected before the real acknowledge has been done by
+ * hardware.
+ *
+ * The user flag is used to prevent this situation. The user
+ * flag is copied to the writeback descriptor by the hardware
+ * and is used as additional acknowledge data. By toggeling the
+ * user flag only for the first fragment (which is
+ * acknowledged), it is guaranteed that the last acknowledge
+ * done for this descriptor has used a different user flag and
+ * cannot be detected as false acknowledge.
+ */
+ entry->owner_user_flag = !entry->owner_user_flag;
+ }
+ if (last)
+ entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
+ if (index == tx->increment_owner_counter) {
+ tx->owner_counter++;
+ if (tx->owner_counter == 4)
+ tx->owner_counter = 1;
+ tx->increment_owner_counter--;
+ if (tx->increment_owner_counter < 0)
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+ if (entry->owner_user_flag)
+ entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
+ entry->desc->more_properties =
+ __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_tx_desc_available(struct tsnep_tx *tx)
+{
+ if (tx->read <= tx->write)
+ return TSNEP_RING_SIZE - tx->write + tx->read - 1;
+ else
+ return tx->read - tx->write - 1;
+}
+
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ unsigned int len;
+ dma_addr_t dma;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
+
+ if (i == 0) {
+ len = skb_headlen(skb);
+ dma = dma_map_single(dmadev, skb->data, len,
+ DMA_TO_DEVICE);
+ } else {
+ len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
+ dma = skb_frag_dma_map(dmadev,
+ &skb_shinfo(skb)->frags[i - 1],
+ 0, len, DMA_TO_DEVICE);
+ }
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+ }
+
+ return 0;
+}
+
+static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
+
+ if (entry->len) {
+ if (i == 0)
+ dma_unmap_single(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ entry->len = 0;
+ }
+ }
+}
+
+static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ struct tsnep_tx *tx)
+{
+ unsigned long flags;
+ int count = 1;
+ struct tsnep_tx_entry *entry;
+ int i;
+ int retval;
+
+ if (skb_shinfo(skb)->nr_frags > 0)
+ count += skb_shinfo(skb)->nr_frags;
+
+ spin_lock_irqsave(&tx->lock, flags);
+
+ if (tsnep_tx_desc_available(tx) < count) {
+ /* ring full, shall not happen because queue is stopped if full
+ * below
+ */
+ netif_stop_queue(tx->adapter->netdev);
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = &tx->entry[tx->write];
+ entry->skb = skb;
+
+ retval = tsnep_tx_map(skb, tx, count);
+ if (retval != 0) {
+ tsnep_tx_unmap(tx, count);
+ dev_kfree_skb_any(entry->skb);
+ entry->skb = NULL;
+
+ tx->dropped++;
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
+
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
+ i == (count - 1));
+ tx->write = (tx->write + count) % TSNEP_RING_SIZE;
+
+ skb_tx_timestamp(skb);
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
+ /* ring can get full with next frame */
+ netif_stop_queue(tx->adapter->netdev);
+ }
+
+ tx->packets++;
+ tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+{
+ unsigned long flags;
+ int budget = 128;
+ struct tsnep_tx_entry *entry;
+ int count;
+
+ spin_lock_irqsave(&tx->lock, flags);
+
+ do {
+ if (tx->read == tx->write)
+ break;
+
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) !=
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ break;
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ count = 1;
+ if (skb_shinfo(entry->skb)->nr_frags > 0)
+ count += skb_shinfo(entry->skb)->nr_frags;
+
+ tsnep_tx_unmap(tx, count);
+
+ if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ (__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 timestamp =
+ __le64_to_cpu(entry->desc_wb->timestamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(timestamp);
+
+ skb_tstamp_tx(entry->skb, &hwtstamps);
+ }
+
+ napi_consume_skb(entry->skb, budget);
+ entry->skb = NULL;
+
+ tx->read = (tx->read + count) % TSNEP_RING_SIZE;
+
+ budget--;
+ } while (likely(budget));
+
+ if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
+ netif_queue_stopped(tx->adapter->netdev)) {
+ netif_wake_queue(tx->adapter->netdev);
+ }
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return (budget != 0);
+}
+
+static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
+ struct tsnep_tx *tx)
+{
+ dma_addr_t dma;
+ int retval;
+
+ memset(tx, 0, sizeof(*tx));
+ tx->adapter = adapter;
+ tx->addr = addr;
+
+ retval = tsnep_tx_ring_init(tx);
+ if (retval)
+ return retval;
+
+ dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
+ tx->owner_counter = 1;
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+
+ spin_lock_init(&tx->lock);
+
+ return 0;
+}
+
+static void tsnep_tx_close(struct tsnep_tx *tx)
+{
+ u32 val;
+
+ readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
+ 1000000);
+
+ tsnep_tx_ring_cleanup(tx);
+}
+
+static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ int i;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ if (dma_unmap_addr(entry, dma))
+ dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_FROM_DEVICE);
+ if (entry->skb)
+ dev_kfree_skb(entry->skb);
+ }
+
+ memset(rx->entry, 0, sizeof(rx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (rx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
+ rx->page_dma[i]);
+ rx->page[i] = NULL;
+ rx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
+ struct tsnep_rx_entry *entry)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
+ GFP_ATOMIC | GFP_DMA);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, RX_SKB_RESERVE);
+
+ dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dmadev, dma)) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ entry->skb = skb;
+ entry->len = RX_SKB_LENGTH;
+ dma_unmap_addr_set(entry, dma, dma);
+ entry->desc->rx = __cpu_to_le64(dma);
+
+ return 0;
+}
+
+static int tsnep_rx_ring_init(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ struct tsnep_rx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ rx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
+ GFP_KERNEL);
+ if (!rx->page[i]) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_rx_desc_wb *)
+ (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_rx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ }
+ }
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+
+ retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ if (retval)
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tsnep_rx_ring_cleanup(rx);
+ return retval;
+}
+
+static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+
+ /* RX_SKB_LENGTH is a multiple of 4 */
+ entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if (index == rx->increment_owner_counter) {
+ rx->owner_counter++;
+ if (rx->owner_counter == 4)
+ rx->owner_counter = 1;
+ rx->increment_owner_counter--;
+ if (rx->increment_owner_counter < 0)
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ int done = 0;
+ struct tsnep_rx_entry *entry;
+ struct sk_buff *skb;
+ size_t len;
+ dma_addr_t dma;
+ int length;
+ bool enable = false;
+ int retval;
+
+ while (likely(done < budget)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ skb = entry->skb;
+ len = dma_unmap_len(entry, len);
+ dma = dma_unmap_addr(entry, dma);
+
+ /* forward skb only if allocation is successful, otherwise
+ * skb is reused and frame dropped
+ */
+ retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ if (!retval) {
+ dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
+
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ skb_put(skb, length - ETH_FCS_LEN);
+ if (rx->adapter->hwtstamp_config.rx_filter ==
+ HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps =
+ skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)skb->data;
+ u64 timestamp =
+ __le64_to_cpu(rx_inline->timestamp);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+ }
+ skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
+ skb->protocol = eth_type_trans(skb,
+ rx->adapter->netdev);
+
+ rx->packets++;
+ rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
+
+ napi_gro_receive(napi, skb);
+ done++;
+ } else {
+ rx->dropped++;
+ }
+
+ tsnep_rx_activate(rx, rx->read);
+
+ enable = true;
+
+ rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
+ }
+
+ if (enable) {
+ /* descriptor properties shall be valid before hardware is
+ * notified
+ */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+ }
+
+ return done;
+}
+
+static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
+ struct tsnep_rx *rx)
+{
+ dma_addr_t dma;
+ int i;
+ int retval;
+
+ memset(rx, 0, sizeof(*rx));
+ rx->adapter = adapter;
+ rx->addr = addr;
+
+ retval = tsnep_rx_ring_init(rx);
+ if (retval)
+ return retval;
+
+ dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
+ rx->owner_counter = 1;
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++)
+ tsnep_rx_activate(rx, i);
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+
+ return 0;
+}
+
+static void tsnep_rx_close(struct tsnep_rx *rx)
+{
+ u32 val;
+
+ iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
+ readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
+ 1000000);
+
+ tsnep_rx_ring_cleanup(rx);
+}
+
+static int tsnep_poll(struct napi_struct *napi, int budget)
+{
+ struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
+ napi);
+ bool complete = true;
+ int done = 0;
+
+ if (queue->tx)
+ complete = tsnep_tx_poll(queue->tx, budget);
+
+ if (queue->rx) {
+ done = tsnep_rx_poll(queue->rx, napi, budget);
+ if (done >= budget)
+ complete = false;
+ }
+
+ /* if all work not completed, return budget and keep polling */
+ if (!complete)
+ return budget;
+
+ if (likely(napi_complete_done(napi, done)))
+ tsnep_enable_irq(queue->adapter, queue->irq_mask);
+
+ return min(done, budget - 1);
+}
+
+static int tsnep_netdev_open(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+ void __iomem *addr;
+ int tx_queue_index = 0;
+ int rx_queue_index = 0;
+ int retval;
+
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ adapter->queue[i].adapter = adapter;
+ if (adapter->queue[i].tx) {
+ addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
+ retval = tsnep_tx_open(adapter, addr,
+ adapter->queue[i].tx);
+ if (retval)
+ goto failed;
+ tx_queue_index++;
+ }
+ if (adapter->queue[i].rx) {
+ addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
+ retval = tsnep_rx_open(adapter, addr,
+ adapter->queue[i].rx);
+ if (retval)
+ goto failed;
+ rx_queue_index++;
+ }
+ }
+
+ retval = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (retval)
+ goto failed;
+ retval = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (retval)
+ goto failed;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
+ tsnep_poll, 64);
+ napi_enable(&adapter->queue[i].napi);
+
+ tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
+ }
+
+ return 0;
+
+failed:
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+ tsnep_phy_close(adapter);
+ return retval;
+}
+
+static int tsnep_netdev_close(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
+
+ napi_disable(&adapter->queue[i].napi);
+ netif_napi_del(&adapter->queue[i].napi);
+
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+
+ tsnep_phy_close(adapter);
+
+ return 0;
+}
+
+static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+
+ if (queue_mapping >= adapter->num_tx_queues)
+ queue_mapping = 0;
+
+ return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
+}
+
+static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
+ return tsnep_ptp_ioctl(netdev, ifr, cmd);
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void tsnep_netdev_set_multicast(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ u16 rx_filter = 0;
+
+ /* configured MAC address and broadcasts are never filtered */
+ if (netdev->flags & IFF_PROMISC) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
+ } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ }
+ iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
+}
+
+static void tsnep_netdev_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u32 reg;
+ u32 val;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ stats->tx_packets += adapter->tx[i].packets;
+ stats->tx_bytes += adapter->tx[i].bytes;
+ stats->tx_dropped += adapter->tx[i].dropped;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ stats->rx_packets += adapter->rx[i].packets;
+ stats->rx_bytes += adapter->rx[i].bytes;
+ stats->rx_dropped += adapter->rx[i].dropped;
+ stats->multicast += adapter->rx[i].multicast;
+
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_fifo_errors += val;
+ val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_frame_errors += val;
+ }
+
+ reg = ioread32(adapter->addr + ECM_STAT);
+ val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+ val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_crc_errors += val;
+ val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+}
+
+static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
+{
+ iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ iowrite16(*(u16 *)(addr + sizeof(u32)),
+ adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+
+ ether_addr_copy(adapter->mac_address, addr);
+ netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
+ addr);
+}
+
+static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *sock_addr = addr;
+ int retval;
+
+ retval = eth_prepare_mac_addr_change(netdev, sock_addr);
+ if (retval)
+ return retval;
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
+ tsnep_mac_set_address(adapter, sock_addr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops tsnep_netdev_ops = {
+ .ndo_open = tsnep_netdev_open,
+ .ndo_stop = tsnep_netdev_close,
+ .ndo_start_xmit = tsnep_netdev_xmit_frame,
+ .ndo_eth_ioctl = tsnep_netdev_ioctl,
+ .ndo_set_rx_mode = tsnep_netdev_set_multicast,
+
+ .ndo_get_stats64 = tsnep_netdev_get_stats64,
+ .ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_setup_tc = tsnep_tc_setup,
+};
+
+static int tsnep_mac_init(struct tsnep_adapter *adapter)
+{
+ int retval;
+
+ /* initialize RX filtering, at least configured MAC address and
+ * broadcast are not filtered
+ */
+ iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
+
+ /* try to get MAC address in the following order:
+ * - device tree
+ * - valid MAC address already set
+ * - MAC address register if valid
+ * - random MAC address
+ */
+ retval = of_get_mac_address(adapter->pdev->dev.of_node,
+ adapter->mac_address);
+ if (retval == -EPROBE_DEFER)
+ return retval;
+ if (retval && !is_valid_ether_addr(adapter->mac_address)) {
+ *(u32 *)adapter->mac_address =
+ ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ *(u16 *)(adapter->mac_address + sizeof(u32)) =
+ ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+ if (!is_valid_ether_addr(adapter->mac_address))
+ eth_random_addr(adapter->mac_address);
+ }
+
+ tsnep_mac_set_address(adapter, adapter->mac_address);
+ eth_hw_addr_set(adapter->netdev, adapter->mac_address);
+
+ return 0;
+}
+
+static int tsnep_mdio_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *np = adapter->pdev->dev.of_node;
+ int retval;
+
+ if (np) {
+ np = of_get_child_by_name(np, "mdio");
+ if (!np)
+ return 0;
+
+ adapter->suppress_preamble =
+ of_property_read_bool(np, "suppress-preamble");
+ }
+
+ adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
+ if (!adapter->mdiobus) {
+ retval = -ENOMEM;
+
+ goto out;
+ }
+
+ adapter->mdiobus->priv = (void *)adapter;
+ adapter->mdiobus->parent = &adapter->pdev->dev;
+ adapter->mdiobus->read = tsnep_mdiobus_read;
+ adapter->mdiobus->write = tsnep_mdiobus_write;
+ adapter->mdiobus->name = TSNEP "-mdiobus";
+ snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
+ adapter->pdev->name);
+
+ /* do not scan broadcast address */
+ adapter->mdiobus->phy_mask = 0x0000001;
+
+ retval = of_mdiobus_register(adapter->mdiobus, np);
+
+out:
+ if (np)
+ of_node_put(np);
+
+ return retval;
+}
+
+static int tsnep_phy_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *phy_node;
+ int retval;
+
+ retval = of_get_phy_mode(adapter->pdev->dev.of_node,
+ &adapter->phy_mode);
+ if (retval)
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+
+ phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
+ 0);
+ adapter->phydev = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!adapter->phydev && adapter->mdiobus)
+ adapter->phydev = phy_find_first(adapter->mdiobus);
+ if (!adapter->phydev)
+ return -EIO;
+
+ return 0;
+}
+
+static int tsnep_probe(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *io;
+ u32 type;
+ int revision;
+ int version;
+ int retval;
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct tsnep_adapter),
+ TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
+ if (!netdev)
+ return -ENODEV;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ adapter = netdev_priv(netdev);
+ platform_set_drvdata(pdev, adapter);
+ adapter->pdev = pdev;
+ adapter->dmadev = &pdev->dev;
+ adapter->netdev = netdev;
+ adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
+
+ mutex_init(&adapter->gate_control_lock);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adapter->addr = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(adapter->addr))
+ return PTR_ERR(adapter->addr);
+ adapter->irq = platform_get_irq(pdev, 0);
+ netdev->mem_start = io->start;
+ netdev->mem_end = io->end;
+ netdev->irq = adapter->irq;
+
+ type = ioread32(adapter->addr + ECM_TYPE);
+ revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
+ version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ adapter->gate_control = type & ECM_GATE_CONTROL;
+
+ adapter->num_tx_queues = TSNEP_QUEUES;
+ adapter->num_rx_queues = TSNEP_QUEUES;
+ adapter->num_queues = TSNEP_QUEUES;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+ retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
+ 0, TSNEP, adapter);
+ if (retval != 0) {
+ dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
+ adapter->irq);
+ return retval;
+ }
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+
+ retval = tsnep_mac_init(adapter);
+ if (retval)
+ goto mac_init_failed;
+
+ retval = tsnep_mdio_init(adapter);
+ if (retval)
+ goto mdio_init_failed;
+
+ retval = tsnep_phy_init(adapter);
+ if (retval)
+ goto phy_init_failed;
+
+ retval = tsnep_ptp_init(adapter);
+ if (retval)
+ goto ptp_init_failed;
+
+ retval = tsnep_tc_init(adapter);
+ if (retval)
+ goto tc_init_failed;
+
+ netdev->netdev_ops = &tsnep_netdev_ops;
+ netdev->ethtool_ops = &tsnep_ethtool_ops;
+ netdev->features = NETIF_F_SG;
+ netdev->hw_features = netdev->features;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ retval = register_netdev(netdev);
+ if (retval)
+ goto register_failed;
+
+ dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
+ revision);
+ if (adapter->gate_control)
+ dev_info(&adapter->pdev->dev, "gate control detected\n");
+
+ return 0;
+
+register_failed:
+ tsnep_tc_cleanup(adapter);
+tc_init_failed:
+ tsnep_ptp_cleanup(adapter);
+ptp_init_failed:
+phy_init_failed:
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+mdio_init_failed:
+mac_init_failed:
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+ return retval;
+}
+
+static int tsnep_remove(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
+
+ unregister_netdev(adapter->netdev);
+
+ tsnep_tc_cleanup(adapter);
+
+ tsnep_ptp_cleanup(adapter);
+
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+
+ return 0;
+}
+
+static const struct of_device_id tsnep_of_match[] = {
+ { .compatible = "engleder,tsnep", },
+{ },
+};
+MODULE_DEVICE_TABLE(of, tsnep_of_match);
+
+static struct platform_driver tsnep_driver = {
+ .driver = {
+ .name = TSNEP,
+ .of_match_table = of_match_ptr(tsnep_of_match),
+ },
+ .probe = tsnep_probe,
+ .remove = tsnep_remove,
+};
+module_platform_driver(tsnep_driver);
+
+MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
+MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
new file mode 100644
index 000000000000..4bfb4d8508f5
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time)
+{
+ u32 high_before;
+ u32 low;
+ u32 high;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ *time = (((u64)high) << 32) | ((u64)low);
+}
+
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!ifr)
+ return -EINVAL;
+
+ if (cmd == SIOCSHWTSTAMP) {
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&adapter->hwtstamp_config, &config,
+ sizeof(adapter->hwtstamp_config));
+ }
+
+ if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int tsnep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ bool negative = false;
+ u64 rate_offset;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ negative = true;
+ }
+
+ /* convert from 16 bit to 32 bit binary fractional, divide by 1000000 to
+ * eliminate ppm, multiply with 8 to compensate 8ns clock cycle time,
+ * simplify calculation because 15625 * 8 = 1000000 / 8
+ */
+ rate_offset = scaled_ppm;
+ rate_offset <<= 16 - 3;
+ rate_offset = div_u64(rate_offset, 15625);
+
+ rate_offset &= ECM_CLOCK_RATE_OFFSET_MASK;
+ if (negative)
+ rate_offset |= ECM_CLOCK_RATE_OFFSET_SIGN;
+ iowrite32(rate_offset & 0xFFFFFFFF, adapter->addr + ECM_CLOCK_RATE);
+
+ return 0;
+}
+
+static int tsnep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ tsnep_get_system_time(adapter, &system_time);
+
+ system_time += delta;
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+static int tsnep_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 system_time;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ system_time = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(system_time);
+
+ return 0;
+}
+
+static int tsnep_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter)
+{
+ int retval = 0;
+
+ adapter->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ snprintf(adapter->ptp_clock_info.name, 16, "%s", TSNEP);
+ adapter->ptp_clock_info.owner = THIS_MODULE;
+ /* at most 2^-1ns adjustment every clock cycle for 8ns clock cycle time,
+ * stay slightly below because only bits below 2^-1ns are supported
+ */
+ adapter->ptp_clock_info.max_adj = (500000000 / 8 - 1);
+ adapter->ptp_clock_info.adjfine = tsnep_ptp_adjfine;
+ adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime;
+ adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64;
+ adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64;
+
+ spin_lock_init(&adapter->ptp_lock);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ netdev_err(adapter->netdev, "ptp_clock_register failed\n");
+
+ retval = PTR_ERR(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ } else if (adapter->ptp_clock) {
+ netdev_info(adapter->netdev, "PHC added\n");
+ }
+
+ return retval;
+}
+
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter)
+{
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ netdev_info(adapter->netdev, "PHC removed\n");
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_selftests.c b/drivers/net/ethernet/engleder/tsnep_selftests.c
new file mode 100644
index 000000000000..1581d6b22232
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_selftests.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+enum tsnep_test {
+ TSNEP_TEST_ENABLE = 0,
+ TSNEP_TEST_TAPRIO,
+ TSNEP_TEST_TAPRIO_CHANGE,
+ TSNEP_TEST_TAPRIO_EXTENSION,
+};
+
+static const char tsnep_test_strings[][ETH_GSTRING_LEN] = {
+ "Enable timeout (offline)",
+ "TAPRIO (offline)",
+ "TAPRIO change (offline)",
+ "TAPRIO extension (offline)",
+};
+
+#define TSNEP_TEST_COUNT (sizeof(tsnep_test_strings) / ETH_GSTRING_LEN)
+
+static bool enable_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_ACTIVE))
+ return false;
+
+ return true;
+}
+
+static bool gc_timeout_signaled(struct tsnep_adapter *adapter)
+{
+ if (ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_SIGNAL)
+ return true;
+
+ return false;
+}
+
+static bool ack_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_TIMEOUT_ACTIVE | TSNEP_GC_TIMEOUT_SIGNAL))
+ return false;
+ return true;
+}
+
+static bool enable_gc(struct tsnep_adapter *adapter, bool a)
+{
+ u8 enable;
+ u8 active;
+
+ if (a) {
+ enable = TSNEP_GC_ENABLE_A;
+ active = TSNEP_GC_ACTIVE_A;
+ } else {
+ enable = TSNEP_GC_ENABLE_B;
+ active = TSNEP_GC_ACTIVE_B;
+ }
+
+ iowrite8(enable, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & active))
+ return false;
+
+ return true;
+}
+
+static bool disable_gc(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_ACTIVE_A | TSNEP_GC_ACTIVE_B))
+ return false;
+
+ return true;
+}
+
+static bool gc_delayed_enable(struct tsnep_adapter *adapter, bool a, int delay)
+{
+ u64 before, after;
+ u32 time;
+ bool enabled;
+
+ if (!disable_gc(adapter))
+ return false;
+
+ before = ktime_get_ns();
+
+ if (!enable_gc_timeout(adapter))
+ return false;
+
+ /* for start time after timeout, the timeout can guarantee, that enable
+ * is blocked if too late
+ */
+ time = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ time += TSNEP_GC_TIMEOUT;
+ iowrite32(time, adapter->addr + TSNEP_GC_TIME);
+
+ ndelay(delay);
+
+ enabled = enable_gc(adapter, a);
+ after = ktime_get_ns();
+
+ if (delay > TSNEP_GC_TIMEOUT) {
+ /* timeout must have blocked enable */
+ if (enabled)
+ return false;
+ } else if ((after - before) < TSNEP_GC_TIMEOUT * 14 / 16) {
+ /* timeout must not have blocked enable */
+ if (!enabled)
+ return false;
+ }
+
+ if (enabled) {
+ if (gc_timeout_signaled(adapter))
+ return false;
+ } else {
+ if (!gc_timeout_signaled(adapter))
+ return false;
+ if (!ack_gc_timeout(adapter))
+ return false;
+ }
+
+ if (!disable_gc(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_gc_enable(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_A + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_A + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, true, i))
+ return false;
+ }
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_B + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_B + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, false, i))
+ return false;
+ }
+
+ return true;
+}
+
+static void delay_base_time(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ u64 system_time;
+ u64 base_time = ktime_to_ns(qopt->base_time);
+ u64 n;
+
+ tsnep_get_system_time(adapter, &system_time);
+ system_time += ms * 1000000;
+ n = div64_u64(system_time - base_time, qopt->cycle_time);
+
+ qopt->base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+}
+
+static void get_gate_state(struct tsnep_adapter *adapter, u32 *gc, u32 *gc_time,
+ u64 *system_time)
+{
+ u32 time_high_before;
+ u32 time_low;
+ u32 time_high;
+ u32 gc_time_before;
+
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ do {
+ time_low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ *gc = ioread32(adapter->addr + TSNEP_GC);
+
+ gc_time_before = *gc_time;
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ time_high_before = time_high;
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while ((time_high != time_high_before) ||
+ (*gc_time != gc_time_before));
+
+ *system_time = (((u64)time_high) << 32) | ((u64)time_low);
+}
+
+static int get_operation(struct tsnep_gcl *gcl, u64 system_time, u64 *next)
+{
+ u64 n = div64_u64(system_time - gcl->base_time, gcl->cycle_time);
+ u64 cycle_start = gcl->base_time + gcl->cycle_time * n;
+ int i;
+
+ *next = cycle_start;
+ for (i = 0; i < gcl->count; i++) {
+ *next += gcl->operation[i].interval;
+ if (*next > system_time)
+ break;
+ }
+
+ return i;
+}
+
+static bool check_gate(struct tsnep_adapter *adapter)
+{
+ u32 gc_time;
+ u32 gc;
+ u64 system_time;
+ struct tsnep_gcl *curr;
+ struct tsnep_gcl *prev;
+ u64 next_time;
+ u8 gate_open;
+ u8 next_gate_open;
+
+ get_gate_state(adapter, &gc, &gc_time, &system_time);
+
+ if (gc & TSNEP_GC_ACTIVE_A) {
+ curr = &adapter->gcl[0];
+ prev = &adapter->gcl[1];
+ } else if (gc & TSNEP_GC_ACTIVE_B) {
+ curr = &adapter->gcl[1];
+ prev = &adapter->gcl[0];
+ } else {
+ return false;
+ }
+ if (curr->start_time <= system_time) {
+ /* GCL is already active */
+ int index;
+
+ index = get_operation(curr, system_time, &next_time);
+ gate_open = curr->operation[index].properties & TSNEP_GCL_MASK;
+ if (index == curr->count - 1)
+ index = 0;
+ else
+ index++;
+ next_gate_open =
+ curr->operation[index].properties & TSNEP_GCL_MASK;
+ } else if (curr->change) {
+ /* operation of previous GCL is active */
+ int index;
+ u64 start_before;
+ u64 n;
+
+ index = get_operation(prev, system_time, &next_time);
+ next_time = curr->start_time;
+ start_before = prev->base_time;
+ n = div64_u64(curr->start_time - start_before,
+ prev->cycle_time);
+ start_before += n * prev->cycle_time;
+ if (curr->start_time == start_before)
+ start_before -= prev->cycle_time;
+ if (((start_before + prev->cycle_time_extension) >=
+ curr->start_time) &&
+ (curr->start_time - prev->cycle_time_extension <=
+ system_time)) {
+ /* extend */
+ index = prev->count - 1;
+ }
+ gate_open = prev->operation[index].properties & TSNEP_GCL_MASK;
+ next_gate_open =
+ curr->operation[0].properties & TSNEP_GCL_MASK;
+ } else {
+ /* GCL is waiting for start */
+ next_time = curr->start_time;
+ gate_open = 0xFF;
+ next_gate_open = curr->operation[0].properties & TSNEP_GCL_MASK;
+ }
+
+ if (gc_time != (next_time & 0xFFFFFFFF)) {
+ dev_err(&adapter->pdev->dev, "gate control time 0x%x!=0x%llx\n",
+ gc_time, next_time);
+ return false;
+ }
+ if (((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT) != gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT),
+ gate_open);
+ return false;
+ }
+ if (((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT) !=
+ next_gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control next open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT),
+ next_gate_open);
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gate_duration(struct tsnep_adapter *adapter, s64 ms)
+{
+ ktime_t start = ktime_get();
+
+ do {
+ if (!check_gate(adapter))
+ return false;
+ } while (ktime_ms_delta(ktime_get(), start) < ms);
+
+ return true;
+}
+
+static bool enable_check_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ int retval;
+
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, qopt);
+ if (retval)
+ return false;
+
+ if (!check_gate_duration(adapter, ms))
+ return false;
+
+ return true;
+}
+
+static bool disable_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload qopt;
+ int retval;
+
+ memset(&qopt, 0, sizeof(qopt));
+ qopt.enable = 0;
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt);
+ if (retval)
+ return false;
+
+ return true;
+}
+
+static bool run_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ if (!enable_check_taprio(adapter, qopt, ms))
+ return false;
+
+ if (!disable_taprio(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x02;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x03;
+ qopt->entries[1].interval = 800000;
+ qopt->entries[2].gate_mask = 0x07;
+ qopt->entries[2].interval = 240000;
+ qopt->entries[3].gate_mask = 0x01;
+ qopt->entries[3].interval = 80000;
+ qopt->entries[4].gate_mask = 0x04;
+ qopt->entries[4].interval = 70000;
+ qopt->entries[5].gate_mask = 0x06;
+ qopt->entries[5].interval = 60000;
+ qopt->entries[6].gate_mask = 0x0F;
+ qopt->entries[6].interval = 50000;
+ qopt->num_entries = 7;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 411854;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x17;
+ qopt->entries[0].interval = 23842;
+ qopt->entries[1].gate_mask = 0x16;
+ qopt->entries[1].interval = 13482;
+ qopt->entries[2].gate_mask = 0x15;
+ qopt->entries[2].interval = 49428;
+ qopt->entries[3].gate_mask = 0x14;
+ qopt->entries[3].interval = 38189;
+ qopt->entries[4].gate_mask = 0x13;
+ qopt->entries[4].interval = 92321;
+ qopt->entries[5].gate_mask = 0x12;
+ qopt->entries[5].interval = 71239;
+ qopt->entries[6].gate_mask = 0x11;
+ qopt->entries[6].interval = 69932;
+ qopt->entries[7].gate_mask = 0x10;
+ qopt->entries[7].interval = 53421;
+ qopt->num_entries = 8;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ delay_base_time(adapter, qopt, 12);
+ qopt->cycle_time = 125000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x27;
+ qopt->entries[0].interval = 15000;
+ qopt->entries[1].gate_mask = 0x26;
+ qopt->entries[1].interval = 15000;
+ qopt->entries[2].gate_mask = 0x25;
+ qopt->entries[2].interval = 12500;
+ qopt->entries[3].gate_mask = 0x24;
+ qopt->entries[3].interval = 17500;
+ qopt->entries[4].gate_mask = 0x23;
+ qopt->entries[4].interval = 10000;
+ qopt->entries[5].gate_mask = 0x22;
+ qopt->entries[5].interval = 11000;
+ qopt->entries[6].gate_mask = 0x21;
+ qopt->entries[6].interval = 9000;
+ qopt->entries[7].gate_mask = 0x20;
+ qopt->entries[7].interval = 10000;
+ qopt->entries[8].gate_mask = 0x20;
+ qopt->entries[8].interval = 12500;
+ qopt->entries[9].gate_mask = 0x20;
+ qopt->entries[9].interval = 12500;
+ qopt->num_entries = 10;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x30;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x31;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to identical */
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ delay_base_time(adapter, qopt, 17);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to same cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x42;
+ qopt->entries[1].gate_mask = 0x43;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x54;
+ qopt->entries[0].interval = 33333;
+ qopt->entries[1].gate_mask = 0x55;
+ qopt->entries[1].interval = 66667;
+ delay_base_time(adapter, qopt, 23);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x66;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x67;
+ qopt->entries[1].interval = 25000;
+ qopt->entries[2].gate_mask = 0x68;
+ qopt->entries[2].interval = 25000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to multiple of cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 200000;
+ qopt->entries[0].gate_mask = 0x79;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x7A;
+ qopt->entries[1].interval = 150000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->entries[0].gate_mask = 0x7B;
+ qopt->entries[0].interval = 125000;
+ qopt->entries[1].gate_mask = 0x7C;
+ qopt->entries[1].interval = 250000;
+ qopt->entries[2].gate_mask = 0x7D;
+ qopt->entries[2].interval = 375000;
+ qopt->entries[3].gate_mask = 0x7E;
+ qopt->entries[3].interval = 250000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 333333;
+ qopt->entries[0].gate_mask = 0x8F;
+ qopt->entries[0].interval = 166666;
+ qopt->entries[1].gate_mask = 0x80;
+ qopt->entries[1].interval = 166667;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 62500;
+ qopt->entries[0].gate_mask = 0x81;
+ qopt->entries[0].interval = 31250;
+ qopt->entries[1].gate_mask = 0x82;
+ qopt->entries[1].interval = 15625;
+ qopt->entries[2].gate_mask = 0x83;
+ qopt->entries[2].interval = 15625;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->entries[0].gate_mask = 0x84;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0x85;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0x86;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0x87;
+ qopt->entries[3].interval = 100000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1700000;
+ qopt->entries[0].gate_mask = 0x88;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x89;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0x8A;
+ qopt->entries[2].interval = 600000;
+ qopt->entries[3].gate_mask = 0x8B;
+ qopt->entries[3].interval = 100000;
+ qopt->entries[4].gate_mask = 0x8C;
+ qopt->entries[4].interval = 500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 6);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 50000;
+ qopt->entries[0].gate_mask = 0x90;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x91;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase */
+ qopt->base_time = ktime_set(0, 50000);
+ qopt->entries[0].gate_mask = 0x92;
+ qopt->entries[0].interval = 33000;
+ qopt->entries[1].gate_mask = 0x93;
+ qopt->entries[1].interval = 67000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x94;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x95;
+ qopt->entries[1].interval = 600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 700000);
+ qopt->cycle_time = 2000000;
+ qopt->cycle_time_extension = 1900000;
+ qopt->entries[0].gate_mask = 0x96;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x97;
+ qopt->entries[1].interval = 1600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x98;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x99;
+ qopt->entries[1].interval = 600000;
+ qopt->entries[2].gate_mask = 0x9A;
+ qopt->entries[2].interval = 500000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 100000);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 300000;
+ qopt->entries[0].gate_mask = 0x9B;
+ qopt->entries[0].interval = 150000;
+ qopt->entries[1].gate_mask = 0x9C;
+ qopt->entries[1].interval = 350000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0xAD;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0xAE;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0xAF;
+ qopt->entries[2].interval = 300000;
+ qopt->num_entries = 3;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->cycle_time_extension = 100000;
+ qopt->entries[0].gate_mask = 0xA0;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0xA1;
+ qopt->entries[1].interval = 200000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 499999;
+ qopt->entries[0].gate_mask = 0xB2;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0xB3;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0xB4;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0xB5;
+ qopt->entries[3].interval = 200000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 6000000;
+ qopt->cycle_time_extension = 5999999;
+ qopt->entries[0].gate_mask = 0xC6;
+ qopt->entries[0].interval = 1000000;
+ qopt->entries[1].gate_mask = 0xC7;
+ qopt->entries[1].interval = 1000000;
+ qopt->entries[2].gate_mask = 0xC8;
+ qopt->entries[2].interval = 1000000;
+ qopt->entries[3].gate_mask = 0xC9;
+ qopt->entries[3].interval = 1500000;
+ qopt->entries[4].gate_mask = 0xCA;
+ qopt->entries[4].interval = 1500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+int tsnep_ethtool_get_test_count(void)
+{
+ return TSNEP_TEST_COUNT;
+}
+
+void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ memcpy(data, tsnep_test_strings, sizeof(tsnep_test_strings));
+}
+
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ eth_test->len = TSNEP_TEST_COUNT;
+
+ if (eth_test->flags != ETH_TEST_FL_OFFLINE) {
+ /* no tests are done online */
+ data[TSNEP_TEST_ENABLE] = 0;
+ data[TSNEP_TEST_TAPRIO] = 0;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+
+ return;
+ }
+
+ if (tsnep_test_gc_enable(adapter)) {
+ data[TSNEP_TEST_ENABLE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_ENABLE] = 1;
+ }
+
+ if (tsnep_test_taprio(adapter)) {
+ data[TSNEP_TEST_TAPRIO] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO] = 1;
+ }
+
+ if (tsnep_test_taprio_change(adapter)) {
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 1;
+ }
+
+ if (tsnep_test_taprio_extension(adapter)) {
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 1;
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c
new file mode 100644
index 000000000000..c4c6e1357317
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_tc.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+/* save one operation at the end for additional operation at list change */
+#define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
+
+static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u64 cycle_time;
+
+ if (!qopt->cycle_time)
+ return -ERANGE;
+ if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
+ return -EINVAL;
+ cycle_time = 0;
+ for (i = 0; i < qopt->num_entries; i++) {
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+ if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
+ return -EINVAL;
+ if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
+ return -EINVAL;
+ cycle_time += qopt->entries[i].interval;
+ }
+ if (qopt->cycle_time != cycle_time)
+ return -EINVAL;
+ if (qopt->cycle_time_extension >= qopt->cycle_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
+ u32 properties, u32 interval, bool flush)
+{
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties = properties;
+ gcl->operation[index].interval = interval;
+
+ iowrite32(properties, addr);
+ iowrite32(interval, addr + sizeof(u32));
+
+ if (flush) {
+ /* flush write with read access */
+ ioread32(addr);
+ }
+}
+
+static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
+{
+ u64 duration;
+ int count;
+
+ /* change needs to be triggered one or two operations before start of
+ * new gate control list
+ * - change is triggered at start of operation (minimum one operation)
+ * - operation with adjusted interval is inserted on demand to exactly
+ * meet the start of the new gate control list (optional)
+ *
+ * additionally properties are read directly after start of previous
+ * operation
+ *
+ * therefore, three operations needs to be considered for the limit
+ */
+ duration = 0;
+ count = 3;
+ while (count) {
+ duration += gcl->operation[index].interval;
+
+ index--;
+ if (index < 0)
+ index = gcl->count - 1;
+
+ count--;
+ }
+
+ return duration;
+}
+
+static void tsnep_write_gcl(struct tsnep_gcl *gcl,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u32 properties;
+ u64 extend;
+ u64 cut;
+
+ gcl->base_time = ktime_to_ns(qopt->base_time);
+ gcl->cycle_time = qopt->cycle_time;
+ gcl->cycle_time_extension = qopt->cycle_time_extension;
+
+ for (i = 0; i < qopt->num_entries; i++) {
+ properties = qopt->entries[i].gate_mask;
+ if (i == (qopt->num_entries - 1))
+ properties |= TSNEP_GCL_LAST;
+
+ tsnep_write_gcl_operation(gcl, i, properties,
+ qopt->entries[i].interval, true);
+ }
+ gcl->count = qopt->num_entries;
+
+ /* calculate change limit; i.e., the time needed between enable and
+ * start of new gate control list
+ */
+
+ /* case 1: extend cycle time for change
+ * - change duration of last operation
+ * - cycle time extension
+ */
+ extend = tsnep_change_duration(gcl, gcl->count - 1);
+ extend += gcl->cycle_time_extension;
+
+ /* case 2: cut cycle time for change
+ * - maximum change duration
+ */
+ cut = 0;
+ for (i = 0; i < gcl->count; i++)
+ cut = max(cut, tsnep_change_duration(gcl, i));
+
+ /* use maximum, because the actual case (extend or cut) can be
+ * determined only after limit is known (chicken-and-egg problem)
+ */
+ gcl->change_limit = max(extend, cut);
+}
+
+static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ if (start <= limit) {
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += (n + 1) * gcl->cycle_time;
+ }
+
+ return start;
+}
+
+static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += n * gcl->cycle_time;
+ if (start == limit)
+ start -= gcl->cycle_time;
+
+ return start;
+}
+
+static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
+ bool insert)
+{
+ /* previous operation triggers change and properties are evaluated at
+ * start of operation
+ */
+ if (index == 0)
+ index = gcl->count - 1;
+ else
+ index = index - 1;
+ change -= gcl->operation[index].interval;
+
+ /* optionally change to new list with additional operation in between */
+ if (insert) {
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties |= TSNEP_GCL_INSERT;
+ iowrite32(gcl->operation[index].properties, addr);
+ }
+
+ return change;
+}
+
+static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
+{
+ int i;
+ u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
+ void __iomem *addr;
+
+ /* search for insert operation and reset properties */
+ for (i = 0; i < gcl->count; i++) {
+ if (gcl->operation[i].properties & ~mask) {
+ addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * i;
+
+ gcl->operation[i].properties &= mask;
+ iowrite32(gcl->operation[i].properties, addr);
+
+ break;
+ }
+ }
+}
+
+static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
+ u64 change, u32 interval)
+{
+ u32 properties;
+
+ properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
+ /* change to new list directly after inserted operation */
+ properties |= TSNEP_GCL_CHANGE;
+
+ /* last operation of list is reserved to insert operation */
+ tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
+ interval, false);
+
+ return tsnep_set_gcl_change(gcl, ref, change, true);
+}
+
+static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
+{
+ int ref = gcl->count - 1;
+ u32 interval = gcl->operation[ref].interval + extension;
+
+ start -= gcl->operation[ref].interval;
+
+ return tsnep_insert_gcl_operation(gcl, ref, start, interval);
+}
+
+static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
+{
+ u64 sum = 0;
+ int i;
+
+ /* find operation which shall be cutted */
+ for (i = 0; i < gcl->count; i++) {
+ u64 sum_tmp = sum + gcl->operation[i].interval;
+ u64 interval;
+
+ /* sum up operations as long as cycle time is not exceeded */
+ if (sum_tmp > cycle_time)
+ break;
+
+ /* remaining interval must be big enough for hardware */
+ interval = cycle_time - sum_tmp;
+ if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
+ break;
+
+ sum = sum_tmp;
+ }
+ if (sum == cycle_time) {
+ /* no need to cut operation itself or whole cycle
+ * => change exactly at operation
+ */
+ return tsnep_set_gcl_change(gcl, i, start + sum, false);
+ }
+ return tsnep_insert_gcl_operation(gcl, i, start + sum,
+ cycle_time - sum);
+}
+
+static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
+ struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
+{
+ u64 system_time;
+ u64 timeout;
+ u64 limit;
+
+ /* estimate timeout limit after timeout enable, actually timeout limit
+ * in hardware will be earlier than estimate so we are on the safe side
+ */
+ tsnep_get_system_time(adapter, &system_time);
+ timeout = system_time + TSNEP_GC_TIMEOUT;
+
+ if (curr)
+ limit = timeout + curr->change_limit;
+ else
+ limit = timeout;
+
+ gcl->start_time = tsnep_gcl_start_after(gcl, limit);
+
+ /* gate control time register is only 32bit => time shall be in the near
+ * future (no driver support for far future implemented)
+ */
+ if ((gcl->start_time - system_time) >= U32_MAX)
+ return -EAGAIN;
+
+ if (curr) {
+ /* change gate control list */
+ u64 last;
+ u64 change;
+
+ last = tsnep_gcl_start_before(curr, gcl->start_time);
+ if ((last + curr->cycle_time) == gcl->start_time)
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+ else if (((gcl->start_time - last) <=
+ curr->cycle_time_extension) ||
+ ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
+ change = tsnep_extend_gcl(curr, last,
+ gcl->start_time - last);
+ else
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+
+ WARN_ON(change <= timeout);
+ gcl->change = true;
+ iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
+ } else {
+ /* start gate control list */
+ WARN_ON(gcl->start_time <= timeout);
+ gcl->change = false;
+ iowrite32(gcl->start_time & 0xFFFFFFFF,
+ adapter->addr + TSNEP_GC_TIME);
+ }
+
+ return 0;
+}
+
+static int tsnep_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tsnep_gcl *gcl;
+ struct tsnep_gcl *curr;
+ int retval;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ if (!qopt->enable) {
+ /* disable gate control if active */
+ mutex_lock(&adapter->gate_control_lock);
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+ }
+
+ retval = tsnep_validate_gcl(qopt);
+ if (retval)
+ return retval;
+
+ mutex_lock(&adapter->gate_control_lock);
+
+ gcl = &adapter->gcl[adapter->next_gcl];
+ tsnep_write_gcl(gcl, qopt);
+
+ /* select current gate control list if active */
+ if (adapter->gate_control_active) {
+ if (adapter->next_gcl == 0)
+ curr = &adapter->gcl[1];
+ else
+ curr = &adapter->gcl[0];
+ } else {
+ curr = NULL;
+ }
+
+ for (;;) {
+ /* start timeout which discards late enable, this helps ensuring
+ * that start/change time are in the future at enable
+ */
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+
+ retval = tsnep_enable_gcl(adapter, gcl, curr);
+ if (retval) {
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return retval;
+ }
+
+ /* enable gate control list */
+ if (adapter->next_gcl == 0)
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+ else
+ iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
+
+ /* done if timeout did not happen */
+ if (!(ioread32(adapter->addr + TSNEP_GC) &
+ TSNEP_GC_TIMEOUT_SIGNAL))
+ break;
+
+ /* timeout is acknowledged with any enable */
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+
+ if (curr)
+ tsnep_clean_gcl(curr);
+
+ /* retry because of timeout */
+ }
+
+ adapter->gate_control_active = true;
+
+ if (adapter->next_gcl == 0)
+ adapter->next_gcl = 1;
+ else
+ adapter->next_gcl = 0;
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+}
+
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return tsnep_taprio(adapter, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int tsnep_tc_init(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return 0;
+
+ /* open all gates */
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
+
+ adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
+ adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
+
+ return 0;
+}
+
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return;
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index b1c8ffea6ad2..d618a8b785b0 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -945,7 +945,9 @@ static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
static void ethoc_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
@@ -961,7 +963,9 @@ static void ethoc_get_ringparam(struct net_device *dev,
}
static int ethoc_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 97c5d70de76e..691605c15265 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1178,8 +1178,11 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
-static void ftgmac100_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static void
+ftgmac100_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1190,8 +1193,11 @@ static void ftgmac100_get_ringparam(struct net_device *netdev,
ering->tx_pending = priv->tx_q_entries;
}
-static int ftgmac100_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+ftgmac100_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6b2927d863e2..d6871437d951 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2325,7 +2325,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
txq = netdev_get_tx_queue(net_dev, queue_mapping);
/* LLTX requires to do our own update of trans_start */
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
@@ -2531,7 +2531,7 @@ static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
/* Bump the trans_start */
txq = netdev_get_tx_queue(net_dev, smp_processor_id());
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
if (err) {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index ef8f0a055024..34b2a73c347f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -90,88 +90,6 @@ static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
return err;
}
-static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
- phy_interface_t interface)
-{
- switch (interface) {
- /* We can switch between SGMII and 1000BASE-X at runtime with
- * pcs-lynx
- */
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- if (mac->pcs &&
- (mac->if_mode == PHY_INTERFACE_MODE_SGMII ||
- mac->if_mode == PHY_INTERFACE_MODE_1000BASEX))
- return false;
- return interface != mac->if_mode;
-
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- return (interface != mac->if_mode);
- default:
- return true;
- }
-}
-
-static void dpaa2_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- dpaa2_mac_phy_mode_mismatch(mac, state->interface)) {
- goto empty_set;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- phylink_set_10g_modes(mask);
- if (state->interface == PHY_INTERFACE_MODE_10GBASER)
- break;
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseT_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 1000baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- break;
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10baseT_Full);
- break;
- default:
- goto empty_set;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-empty_set:
- linkmode_zero(supported);
-}
-
static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -243,7 +161,7 @@ static void dpaa2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
- .validate = dpaa2_mac_validate,
+ .validate = phylink_generic_validate,
.mac_config = dpaa2_mac_config,
.mac_link_up = dpaa2_mac_link_up,
.mac_link_down = dpaa2_mac_link_down,
@@ -336,9 +254,34 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return err;
}
+ memset(&mac->phylink_config, 0, sizeof(mac->phylink_config));
mac->phylink_config.dev = &net_dev->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the PLLs to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
phylink = phylink_create(&mac->phylink_config,
dpmac_node, mac->if_mode,
&dpaa2_mac_phylink_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 910b9f722504..fa5b4f885b17 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -562,7 +562,9 @@ static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
}
static void enetc_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 0e87c7043b77..fe6a544f37f0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -930,45 +930,6 @@ static void enetc_mdiobus_destroy(struct enetc_pf *pf)
enetc_imdio_remove(pf);
}
-static void enetc_pl_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_2500BASEX &&
- state->interface != PHY_INTERFACE_MODE_USXGMII &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_USXGMII) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void enetc_pl_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -1096,7 +1057,7 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
- .validate = enetc_pl_mac_validate,
+ .validate = phylink_generic_validate,
.mac_config = enetc_pl_mac_config,
.mac_link_up = enetc_pl_mac_link_up,
.mac_link_down = enetc_pl_mac_link_down,
@@ -1111,6 +1072,18 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv,
pf->phylink_config.dev = &priv->ndev->dev;
pf->phylink_config.type = PHYLINK_NETDEV;
+ pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
+ pf->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
pf->if_mode, &enetc_mac_phylink_ops);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bc418b910999..f38e70692514 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3558,7 +3558,7 @@ static int fec_enet_init(struct net_device *ndev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
- ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(ndev, FEC_MAX_TSO_SEGS);
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 7b32ed29bf4c..ff756265d58f 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -372,7 +372,9 @@ static int gfar_scoalesce(struct net_device *dev,
* rx, rx_mini, and rx_jumbo rings are the same size, as mini and
* jumbo are ignored by the driver */
static void gfar_gringparam(struct net_device *dev,
- struct ethtool_ringparam *rvals)
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -399,7 +401,9 @@ static void gfar_gringparam(struct net_device *dev,
* necessary so that we don't mess things up while we're in motion.
*/
static int gfar_sringparam(struct net_device *dev,
- struct ethtool_ringparam *rvals)
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
{
struct gfar_private *priv = netdev_priv(dev);
int err = 0, i;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 14c08a868190..69b2b98b1525 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -207,7 +207,9 @@ uec_get_regs(struct net_device *netdev,
static void
uec_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
@@ -226,7 +228,9 @@ uec_get_ringparam(struct net_device *netdev,
static int
uec_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index c8df47a97fa4..fd2d2c705391 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -419,7 +419,9 @@ static int gve_set_channels(struct net_device *netdev,
}
static void gve_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *cmd)
+ struct ethtool_ringparam *cmd,
+ struct kernel_ethtool_ringparam *kernel_cmd,
+ struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index ab7390225942..d7a27c244d48 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -663,9 +663,13 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
* hns_get_ringparam - get ring parameter
* @net_dev: net device
* @param: ethtool parameter
+ * @kernel_param: ethtool external parameter
+ * @extack: netlink extended ACK report struct
*/
static void hns_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index c2bd2584201f..b668df6193be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -80,6 +80,9 @@ enum hclge_mbx_tbl_cfg_subcode {
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
+#define HCLGE_RESET_SCHED_TIMEOUT (3 * HZ)
+#define HCLGE_MBX_SCHED_TIMEOUT (HZ / 2)
+
struct hclge_ring_chain_param {
u8 ring_type;
u8 tqp_index;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 3f7a9a4c59d5..54caf0dd1204 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -859,6 +859,20 @@ struct hnae3_handle {
#define hnae3_get_bit(origin, shift) \
hnae3_get_field(origin, 0x1 << (shift), shift)
+#define HNAE3_FORMAT_MAC_ADDR_LEN 18
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5
+
+static inline void hnae3_format_mac_addr(char *format_mac_addr,
+ const u8 *mac_addr)
+{
+ snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x",
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]);
+}
+
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 9ccebbaa0d69..496ddf397bd4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -17,6 +17,7 @@
#include <linux/skbuff.h>
#include <linux/sctp.h>
#include <net/gre.h>
+#include <net/gro.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
@@ -53,10 +54,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, " Network interface message level setting");
-static unsigned int tx_spare_buf_size;
-module_param(tx_spare_buf_size, uint, 0400);
-MODULE_PARM_DESC(tx_spare_buf_size, "Size used to allocate tx spare buffer");
-
static unsigned int tx_sgl = 1;
module_param(tx_sgl, uint, 0600);
MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
@@ -1041,8 +1038,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
dma_addr_t dma;
int order;
- alloc_size = tx_spare_buf_size ? tx_spare_buf_size :
- ring->tqp->handle->kinfo.tx_spare_buf_size;
+ alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
if (!alloc_size)
return;
@@ -2255,6 +2251,8 @@ out_err_tx_ok:
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
{
+ char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN];
+ char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = hns3_get_handle(netdev);
struct sockaddr *mac_addr = p;
int ret;
@@ -2263,8 +2261,9 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
- netdev_info(netdev, "already using mac address %pM\n",
- mac_addr->sa_data);
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_info(netdev, "already using mac address %s\n",
+ format_mac_addr_sa);
return 0;
}
@@ -2273,8 +2272,10 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
*/
if (!hns3_is_phys_func(h->pdev) &&
!is_zero_ether_addr(netdev->perm_addr)) {
- netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
- netdev->perm_addr, mac_addr->sa_data);
+ hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr);
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n",
+ format_mac_addr_perm, format_mac_addr_sa);
return -EPERM;
}
@@ -2678,10 +2679,17 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
unsigned long trans_start;
q = netdev_get_tx_queue(ndev, i);
- trans_start = q->trans_start;
+ trans_start = READ_ONCE(q->trans_start);
if (netif_xmit_stopped(q) &&
time_after(jiffies,
(trans_start + ndev->watchdog_timeo))) {
+#ifdef CONFIG_BQL
+ struct dql *dql = &q->dql;
+
+ netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
+ dql->last_obj_cnt, dql->num_queued,
+ dql->adj_limit, dql->num_completed);
+#endif
timeout_queue = i;
netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
q->state,
@@ -2836,14 +2844,16 @@ static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
if (!h->ae_algo->ops->set_vf_mac)
return -EOPNOTSUPP;
if (is_multicast_ether_addr(mac)) {
+ hnae3_format_mac_addr(format_mac_addr, mac);
netdev_err(netdev,
- "Invalid MAC:%pM specified. Could not set MAC\n",
- mac);
+ "Invalid MAC:%s specified. Could not set MAC\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -4934,6 +4944,7 @@ static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
static int hns3_init_mac_addr(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
int ret = 0;
@@ -4944,8 +4955,9 @@ static int hns3_init_mac_addr(struct net_device *netdev)
/* Check if the MAC address is valid, if not get a random one */
if (!is_valid_ether_addr(mac_addr_temp)) {
eth_hw_addr_random(netdev);
- dev_warn(priv->dev, "using random MAC address %pM\n",
- netdev->dev_addr);
+ hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr);
+ dev_warn(priv->dev, "using random MAC address %s\n",
+ format_mac_addr);
} else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
eth_hw_addr_set(netdev, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
@@ -4997,8 +5009,10 @@ static void hns3_client_stop(struct hnae3_handle *handle)
static void hns3_info_show(struct hns3_nic_priv *priv)
{
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
- dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr);
+ dev_info(priv->dev, "MAC address: %s\n", format_mac_addr);
dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
@@ -5531,8 +5545,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
-static int hns3_reset_notify(struct hnae3_handle *handle,
- enum hnae3_reset_notify_type type)
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
{
int ret = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1715c98d906d..361a6390e159 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -705,6 +705,8 @@ void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c9b4568d7a8d..c06c39ece80d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -643,11 +643,13 @@ static u32 hns3_get_link(struct net_device *netdev)
}
static void hns3_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int queue_num = h->kinfo.num_tqps;
+ int rx_queue_index = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev)) {
netdev_err(netdev, "dev resetting!");
@@ -658,7 +660,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->rx_max_pending = HNS3_RING_MAX_PENDING;
param->tx_pending = priv->ring[0].desc_num;
- param->rx_pending = priv->ring[queue_num].desc_num;
+ param->rx_pending = priv->ring[rx_queue_index].desc_num;
+ kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size;
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -1064,14 +1067,23 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
}
static int hns3_check_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param)
{
+#define RX_BUF_LEN_2K 2048
+#define RX_BUF_LEN_4K 4096
if (hns3_nic_resetting(ndev))
return -EBUSY;
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
+ if (kernel_param->rx_buf_len != RX_BUF_LEN_2K &&
+ kernel_param->rx_buf_len != RX_BUF_LEN_4K) {
+ netdev_err(ndev, "Rx buf len only support 2048 and 4096\n");
+ return -EINVAL;
+ }
+
if (param->tx_pending > HNS3_RING_MAX_PENDING ||
param->tx_pending < HNS3_RING_MIN_PENDING ||
param->rx_pending > HNS3_RING_MAX_PENDING ||
@@ -1084,8 +1096,26 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0;
}
+static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ h->kinfo.rx_buf_len = rx_buf_len;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ h->kinfo.tqp[i]->buf_size = rx_buf_len;
+ priv->ring[i + h->kinfo.num_tqps].buf_size = rx_buf_len;
+ }
+
+ return 0;
+}
+
static int hns3_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
@@ -1094,9 +1124,10 @@ static int hns3_set_ringparam(struct net_device *ndev,
u32 old_tx_desc_num, new_tx_desc_num;
u32 old_rx_desc_num, new_rx_desc_num;
u16 queue_num = h->kinfo.num_tqps;
+ u32 old_rx_buf_len;
int ret, i;
- ret = hns3_check_ringparam(ndev, param);
+ ret = hns3_check_ringparam(ndev, param, kernel_param);
if (ret)
return ret;
@@ -1105,8 +1136,10 @@ static int hns3_set_ringparam(struct net_device *ndev,
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
old_tx_desc_num = priv->ring[0].desc_num;
old_rx_desc_num = priv->ring[queue_num].desc_num;
+ old_rx_buf_len = priv->ring[queue_num].buf_size;
if (old_tx_desc_num == new_tx_desc_num &&
- old_rx_desc_num == new_rx_desc_num)
+ old_rx_desc_num == new_rx_desc_num &&
+ kernel_param->rx_buf_len == old_rx_buf_len)
return 0;
tmp_rings = hns3_backup_ringparam(priv);
@@ -1117,19 +1150,22 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n",
old_tx_desc_num, old_rx_desc_num,
- new_tx_desc_num, new_rx_desc_num);
+ new_tx_desc_num, new_rx_desc_num,
+ old_rx_buf_len, kernel_param->rx_buf_len);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
+ hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len);
ret = hns3_init_all_ring(priv);
if (ret) {
- netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
+ netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret);
+ hns3_change_rx_buf_len(ndev, old_rx_buf_len);
hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
@@ -1699,6 +1735,7 @@ static int hns3_get_tunable(struct net_device *netdev,
void *data)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
int ret = 0;
switch (tuna->id) {
@@ -1709,6 +1746,9 @@ static int hns3_get_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
*(u32 *)data = priv->rx_copybreak;
break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ *(u32 *)data = h->kinfo.tx_spare_buf_size;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1717,11 +1757,43 @@ static int hns3_get_tunable(struct net_device *netdev,
return ret;
}
+static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
+ u32 data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ h->kinfo.tx_spare_buf_size = data;
+
+ ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UP_CLIENT);
+ if (ret)
+ hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+
+ return ret;
+}
+
static int hns3_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u32 old_tx_spare_buf_size, new_tx_spare_buf_size;
struct hnae3_handle *h = priv->ae_handle;
int i, ret = 0;
@@ -1740,6 +1812,26 @@ static int hns3_set_tunable(struct net_device *netdev,
priv->ring[i].rx_copybreak = priv->rx_copybreak;
break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
+ new_tx_spare_buf_size = *(u32 *)data;
+ ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
+ if (ret) {
+ int ret1;
+
+ netdev_warn(netdev,
+ "change tx spare buf size fail, revert to old value\n");
+ ret1 = hns3_set_tx_spare_buf_size(netdev,
+ old_tx_spare_buf_size);
+ if (ret1) {
+ netdev_err(netdev,
+ "revert to old tx spare buf size fail\n");
+ return ret1;
+ }
+
+ return ret;
+ }
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1755,6 +1847,8 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_COALESCE_MAX_FRAMES | \
ETHTOOL_COALESCE_USE_CQE)
+#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN
+
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
@@ -1833,6 +1927,7 @@ static int hns3_get_link_ext_state(struct net_device *netdev,
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
@@ -1864,6 +1959,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
static const struct ethtool_ops hns3_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index c526591a7240..5b6018204f92 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -321,10 +321,10 @@ static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{true, "IGU_RX_OUT_UDP0_PKT"},
{true, "IGU_RX_IN_UDP0_PKT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
+ {true, "IGU_MC_CAR_DROP_PKT_L"},
+ {true, "IGU_MC_CAR_DROP_PKT_H"},
+ {true, "IGU_BC_CAR_DROP_PKT_L"},
+ {true, "IGU_BC_CAR_DROP_PKT_H"},
{false, "Reserved"},
{true, "IGU_RX_OVERSIZE_PKT_L"},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2a58101144e..a0628d139149 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2933,16 +2933,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_mbx_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_rst_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
@@ -3831,6 +3835,13 @@ static void hclge_mailbox_service_task(struct hclge_dev *hdev)
test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "mbx service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
+ smp_processor_id());
+
hclge_mbx_handler(hdev);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
@@ -4480,6 +4491,13 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_rst_scheduled +
+ HCLGE_RESET_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "reset service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
+ smp_processor_id());
+
down(&hdev->reset_sem);
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
@@ -8743,6 +8761,7 @@ int hclge_update_mac_list(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_node *mac_node;
struct list_head *list;
@@ -8767,9 +8786,10 @@ int hclge_update_mac_list(struct hclge_vport *vport,
/* if this address is never added, unnecessary to delete */
if (state == HCLGE_MAC_TO_DEL) {
spin_unlock_bh(&vport->mac_list_lock);
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "failed to delete address %pM from mac list\n",
- addr);
+ "failed to delete address %s from mac list\n",
+ format_mac_addr);
return -ENOENT;
}
@@ -8802,6 +8822,7 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc;
@@ -8812,9 +8833,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
- addr, is_zero_ether_addr(addr),
+ "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
+ format_mac_addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
@@ -8871,6 +8893,7 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
int ret;
@@ -8879,8 +8902,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
- dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
- addr);
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -8911,6 +8935,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
int hclge_add_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
@@ -8919,9 +8944,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Add mc mac err! invalid mac:%pM.\n",
- addr);
+ "Add mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
memset(&req, 0, sizeof(req));
@@ -8973,6 +8999,7 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
enum hclge_cmd_status status;
@@ -8980,9 +9007,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_dbg(&hdev->pdev->dev,
- "Remove mc mac err! invalid mac:%pM.\n",
- addr);
+ "Remove mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9422,16 +9450,18 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
u8 *mac_addr)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
return -EINVAL;
+ hnae3_format_mac_addr(format_mac_addr, mac_addr);
if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
dev_info(&hdev->pdev->dev,
- "Specified MAC(=%pM) is same as before, no change committed!\n",
- mac_addr);
+ "Specified MAC(=%s) is same as before, no change committed!\n",
+ format_mac_addr);
return 0;
}
@@ -9439,13 +9469,13 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
dev_info(&hdev->pdev->dev,
- "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
- vf, mac_addr);
+ "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
+ vf, format_mac_addr);
return hclge_inform_reset_assert_to_vf(vport);
}
- dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
- vf, mac_addr);
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
+ vf, format_mac_addr);
return 0;
}
@@ -9549,6 +9579,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
{
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
unsigned char *old_addr = NULL;
int ret;
@@ -9557,9 +9588,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
if (is_zero_ether_addr(new_addr) ||
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "change uc mac err! invalid mac: %pM.\n",
- new_addr);
+ "change uc mac err! invalid mac: %s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9577,9 +9609,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
spin_lock_bh(&vport->mac_list_lock);
ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "failed to change the mac addr:%pM, ret = %d\n",
- new_addr, ret);
+ "failed to change the mac addr:%s, ret = %d\n",
+ format_mac_addr, ret);
spin_unlock_bh(&vport->mac_list_lock);
if (!is_first)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ebba603483a0..42ce1eee33c4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -955,6 +955,8 @@ struct hclge_dev {
u16 hclge_fd_rule_num;
unsigned long serv_processed_cnt;
unsigned long last_serv_processed;
+ unsigned long last_rst_scheduled;
+ unsigned long last_mbx_scheduled;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 65d78ee4d65a..c495df2e5953 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -848,6 +848,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
resp_msg.status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ req->mbx_src_vfid,
+ req->msg.code,
+ req->msg.subcode);
+
hclge_gen_resp_to_vf(vport, req, &resp_msg);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 41afaeea881b..3f29062eaf2e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1514,15 +1514,18 @@ static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
struct list_head *list,
enum HCLGEVF_MAC_ADDR_TYPE mac_type)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclgevf_mac_addr_node *mac_node, *tmp;
int ret;
list_for_each_entry_safe(mac_node, tmp, list, node) {
ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr,
+ mac_node->mac_addr);
dev_err(&hdev->pdev->dev,
- "failed to configure mac %pM, state = %d, ret = %d\n",
- mac_node->mac_addr, mac_node->state, ret);
+ "failed to configure mac %s, state = %d, ret = %d\n",
+ format_mac_addr, mac_node->state, ret);
return;
}
if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index a85667078b72..93192f58ac88 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -547,7 +547,9 @@ static void hinic_get_drvinfo(struct net_device *netdev,
}
static void hinic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
@@ -580,7 +582,9 @@ static int check_ringparam_valid(struct hinic_dev *nic_dev,
}
static int hinic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
u16 new_sq_depth, new_rq_depth;
@@ -1205,8 +1209,6 @@ static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
return HINIC_RSS_INDIR_SIZE;
}
-#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0])))
-
#define HINIC_FUNC_STAT(_stat_item) { \
.name = #_stat_item, \
.size = sizeof_field(struct hinic_vport_stats, _stat_item), \
@@ -1374,7 +1376,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
break;
hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
- for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) {
p = (char *)&txq_stats +
hinic_tx_queue_stats[j].offset;
data[i] = (hinic_tx_queue_stats[j].size ==
@@ -1387,7 +1389,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
break;
hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
- for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) {
p = (char *)&rxq_stats +
hinic_rx_queue_stats[j].offset;
data[i] = (hinic_rx_queue_stats[j].size ==
@@ -1411,7 +1413,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
netif_err(nic_dev, drv, netdev,
"Failed to get vport stats from firmware\n");
- for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) {
p = (char *)&vport_stats + hinic_function_stats[j].offset;
data[i] = (hinic_function_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
@@ -1420,8 +1422,8 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
if (!port_stats) {
memset(&data[i], 0,
- ARRAY_LEN(hinic_port_stats) * sizeof(*data));
- i += ARRAY_LEN(hinic_port_stats);
+ ARRAY_SIZE(hinic_port_stats) * sizeof(*data));
+ i += ARRAY_SIZE(hinic_port_stats);
goto get_drv_stats;
}
@@ -1430,7 +1432,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
netif_err(nic_dev, drv, netdev,
"Failed to get port stats from firmware\n");
- for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) {
p = (char *)port_stats + hinic_port_stats[j].offset;
data[i] = (hinic_port_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
@@ -1449,14 +1451,14 @@ static int hinic_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_TEST:
- return ARRAY_LEN(hinic_test_strings);
+ return ARRAY_SIZE(hinic_test_strings);
case ETH_SS_STATS:
q_num = nic_dev->num_qps;
- count = ARRAY_LEN(hinic_function_stats) +
- (ARRAY_LEN(hinic_tx_queue_stats) +
- ARRAY_LEN(hinic_rx_queue_stats)) * q_num;
+ count = ARRAY_SIZE(hinic_function_stats) +
+ (ARRAY_SIZE(hinic_tx_queue_stats) +
+ ARRAY_SIZE(hinic_rx_queue_stats)) * q_num;
- count += ARRAY_LEN(hinic_port_stats);
+ count += ARRAY_SIZE(hinic_port_stats);
return count;
default:
@@ -1476,27 +1478,27 @@ static void hinic_get_strings(struct net_device *netdev,
memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
return;
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
memcpy(p, hinic_function_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
memcpy(p, hinic_port_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
sprintf(p, hinic_tx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
}
for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
sprintf(p, hinic_rx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index b482f6f633bd..3ee89ae496d0 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1178,7 +1178,8 @@ found:
DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
for (i = 0; i < 6; i++)
- DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
+ DEB(DEB_PROBE,printk(" %2.2X", eth_addr[i]));
+ eth_hw_addr_set(dev, eth_addr);
DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 48e001881c75..0af70094aba3 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -147,6 +147,7 @@ lan_init_chip(struct parisc_device *dev)
struct net_device *netdevice;
struct i596_private *lp;
int retval = -ENOMEM;
+ u8 addr[ETH_ALEN];
int i;
if (!dev->irq) {
@@ -167,13 +168,14 @@ lan_init_chip(struct parisc_device *dev)
netdevice->base_addr = dev->hpa.start;
netdevice->irq = dev->irq;
- if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) {
+ if (pdc_lan_station_id(addr, netdevice->base_addr)) {
for (i = 0; i < 6; i++) {
- netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
+ addr[i] = gsc_readb(LAN_PROM_ADDR + i);
}
printk(KERN_INFO
"%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
}
+ eth_hw_addr_set(netdevice, addr);
lp = netdev_priv(netdevice);
lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 6b3fc8823c54..fbea9f7efe8c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2137,8 +2137,11 @@ emac_ethtool_set_link_ksettings(struct net_device *ndev,
return 0;
}
-static void emac_ethtool_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *rp)
+static void
+emac_ethtool_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3cca51735421..ad9a2819e202 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2058,7 +2058,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_packets++;
tx_bytes += skb->len;
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
@@ -3088,7 +3088,9 @@ static u32 ibmvnic_get_link(struct net_device *netdev)
}
static void ibmvnic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -3108,7 +3110,9 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
}
static int ibmvnic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int ret;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 0bf3d47bb90d..4a8013f20152 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2557,7 +2557,9 @@ static int e100_set_eeprom(struct net_device *netdev,
}
static void e100_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nic *nic = netdev_priv(netdev);
struct param_range *rfds = &nic->params.rfds;
@@ -2570,7 +2572,9 @@ static void e100_get_ringparam(struct net_device *netdev,
}
static int e100_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nic *nic = netdev_priv(netdev);
struct param_range *rfds = &nic->params.rfds;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 0a57172dfcbc..32803b0cf1e8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -539,7 +539,9 @@ static void e1000_get_drvinfo(struct net_device *netdev,
}
static void e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -556,7 +558,9 @@ static void e1000_get_ringparam(struct net_device *netdev,
}
static int e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 8515e00d1b40..b80ae9a82224 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -655,7 +655,9 @@ static void e1000_get_drvinfo(struct net_device *netdev,
}
static void e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -666,7 +668,9 @@ static void e1000_get_ringparam(struct net_device *netdev,
}
static int e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *temp_tx = NULL, *temp_rx = NULL;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 0d37f011d0ce..d53369e30040 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -502,7 +502,9 @@ static void fm10k_set_msglevel(struct net_device *netdev, u32 data)
}
static void fm10k_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -517,7 +519,9 @@ static void fm10k_get_ringparam(struct net_device *netdev,
}
static int fm10k_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 513ba6974355..091f36adbbe1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1916,7 +1916,9 @@ static void i40e_get_drvinfo(struct net_device *netdev,
}
static void i40e_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -1944,7 +1946,9 @@ static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
}
static int i40e_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
struct i40e_netdev_priv *np = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 0cecaff38d04..27c7b36427d2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -583,12 +583,16 @@ static void iavf_get_drvinfo(struct net_device *netdev,
* iavf_get_ringparam - Get ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool extenal ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Returns current ring parameters. TX and RX rings are reported separately,
* but the number of rings is not reported.
**/
static void iavf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -602,12 +606,16 @@ static void iavf_get_ringparam(struct net_device *netdev,
* iavf_set_ringparam - Set ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool external ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Sets ring parameters. TX and RX rings are controlled separately, but the
* number of rings is not specified, so all rings get the same settings.
**/
static int iavf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b2db39ee5f85..b67ad51cbcc9 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -576,6 +576,7 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
+ u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index b9bd9f9472f6..478412b28a76 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -430,6 +430,120 @@ static const struct devlink_ops ice_devlink_ops = {
.flash_update = ice_devlink_flash_update,
};
+static int
+ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2;
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool roce_ena = ctx->val.vbool;
+ int ret;
+
+ if (!roce_ena) {
+ ice_unplug_aux_dev(pf);
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ return 0;
+ }
+
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+
+ return ret;
+}
+
+static int
+ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool iw_ena = ctx->val.vbool;
+ int ret;
+
+ if (!iw_ena) {
+ ice_unplug_aux_dev(pf);
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ return 0;
+ }
+
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+
+ return ret;
+}
+
+static int
+ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ice_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_roce_get,
+ ice_devlink_enable_roce_set,
+ ice_devlink_enable_roce_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_iw_get,
+ ice_devlink_enable_iw_set,
+ ice_devlink_enable_iw_validate),
+
+};
+
static void ice_devlink_free(void *devlink_ptr)
{
devlink_free((struct devlink *)devlink_ptr);
@@ -484,6 +598,36 @@ void ice_devlink_unregister(struct ice_pf *pf)
devlink_unregister(priv_to_devlink(pf));
}
+int ice_devlink_register_params(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
+ if (err)
+ return err;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ value);
+
+ return 0;
+}
+
+void ice_devlink_unregister_params(struct ice_pf *pf)
+{
+ devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
+}
+
/**
* ice_devlink_create_pf_port - Create a devlink port for this PF
* @pf: the PF to create a devlink port for
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index b7f9551e4fc4..faea757fcf5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -4,10 +4,16 @@
#ifndef _ICE_DEVLINK_H_
#define _ICE_DEVLINK_H_
+enum ice_devlink_param_id {
+ ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+};
+
struct ice_pf *ice_allocate_pf(struct device *dev);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
+int ice_devlink_register_params(struct ice_pf *pf);
+void ice_devlink_unregister_params(struct ice_pf *pf);
int ice_devlink_create_pf_port(struct ice_pf *pf);
void ice_devlink_destroy_pf_port(struct ice_pf *pf);
int ice_devlink_create_vf_port(struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 572519e402f4..5af2faaa21e1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -2686,7 +2686,9 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
static void
-ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2704,7 +2706,9 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
}
static int
-ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_tx_ring *xdp_rings = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index adcc9a251595..fc3580167e7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -288,7 +288,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = IIDC_RDMA_ROCE_NAME;
+ adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
@@ -335,6 +335,6 @@ int ice_init_rdma(struct ice_pf *pf)
dev_err(dev, "failed to reserve vectors for RDMA\n");
return ret;
}
-
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
return ice_plug_aux_dev(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4d1fc48c9744..515ac8c4e891 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4721,6 +4721,10 @@ probe_done:
if (err)
goto err_netdev_reg;
+ err = ice_devlink_register_params(pf);
+ if (err)
+ goto err_netdev_reg;
+
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
if (ice_is_aux_ena(pf)) {
@@ -4728,7 +4732,7 @@ probe_done:
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
err = -ENOMEM;
- goto err_netdev_reg;
+ goto err_devlink_reg_param;
}
err = ice_init_rdma(pf);
@@ -4747,6 +4751,8 @@ probe_done:
err_init_aux_unroll:
pf->adev = NULL;
ida_free(&ice_aux_ida, pf->aux_idx);
+err_devlink_reg_param:
+ ice_devlink_unregister_params(pf);
err_netdev_reg:
err_send_version_unroll:
ice_vsi_release_all(pf);
@@ -4861,6 +4867,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_unplug_aux_dev(pf);
if (pf->aux_idx >= 0)
ida_free(&ice_aux_ida, pf->aux_idx);
+ ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index fb1029352c3e..51a2dcaf553d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -864,7 +864,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
}
static void igb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -875,7 +877,9 @@ static void igb_get_ringparam(struct net_device *netdev,
}
static int igb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fd54d3ef890b..dd208930fbe4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2927,7 +2927,7 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
@@ -2961,7 +2961,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 06e5bd646a0e..9d4322b74163 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -175,7 +175,9 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
}
static void igbvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -188,7 +190,9 @@ static void igbvf_get_ringparam(struct net_device *netdev,
}
static int igbvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index e0a76ac1bbbc..8cc077b712ad 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -567,8 +567,11 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,
return ret_val;
}
-static void igc_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void
+igc_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -578,8 +581,11 @@ static void igc_ethtool_get_ringparam(struct net_device *netdev,
ring->tx_pending = adapter->tx_ring_count;
}
-static int igc_ethtool_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static int
+igc_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 582099a5ad41..46efcfab7234 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -464,7 +464,9 @@ ixgb_get_drvinfo(struct net_device *netdev,
static void
ixgb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
@@ -478,7 +480,9 @@ ixgb_get_ringparam(struct net_device *netdev,
static int
ixgb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 8362822316a9..f70967c32116 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1118,7 +1118,9 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
}
static void ixgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -1131,7 +1133,9 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
}
static int ixgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 6bace746eaac..5f08779c0e4e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -281,6 +281,10 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_INVALID_MAC_ADDR -1
#define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_MBX -5
+#define IXGBE_ERR_TIMEOUT -6
+#define IXGBE_ERR_PARAM -7
/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8380f905e708..3b41f83c8dff 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -225,7 +225,9 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -236,7 +238,9 @@ static void ixgbevf_get_ringparam(struct net_device *netdev,
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index e3e4676af9e4..e763cee0695e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -40,16 +40,16 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
spin_lock_bh(&adapter->mbx_lock);
- ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+ ret = ixgbevf_write_mbx(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
if (ret)
goto out;
- ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ ret = ixgbevf_poll_mbx(hw, msgbuf, 2);
if (ret)
goto out;
ret = (int)msgbuf[1];
- if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0)
+ if (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE && ret >= 0)
ret = -1;
out:
@@ -77,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
spin_lock_bh(&adapter->mbx_lock);
- err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
+ err = ixgbevf_write_mbx(hw, msgbuf, 2);
if (err)
goto out;
- err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ err = ixgbevf_poll_mbx(hw, msgbuf, 2);
if (err)
goto out;
@@ -623,6 +623,7 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index a0e325774819..e257390a4f6a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -430,6 +430,7 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
@@ -491,4 +492,8 @@ void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
#define hw_dbg(hw, format, arg...) \
netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg)
+
+s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);
+s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);
+
#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d81811ab4ec4..b1dfbaff8b31 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2266,6 +2266,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
static const int api[] = {
+ ixgbe_mbox_api_15,
ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
ixgbe_mbox_api_12,
@@ -2284,6 +2285,12 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
+ if (hw->api_version >= ixgbe_mbox_api_15) {
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mbx_operations));
+ }
+
spin_unlock_bh(&adapter->mbx_lock);
}
@@ -2627,6 +2634,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -4565,7 +4573,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
- memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
sizeof(struct ixgbe_mbx_operations));
/* setup the private structure */
@@ -4625,6 +4633,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 6bc1953263b9..a55dd978f7ca 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -15,16 +15,15 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ if (!countdown || !mbx->ops.check_for_msg)
+ return IXGBE_ERR_CONFIG;
+
while (countdown && mbx->ops.check_for_msg(hw)) {
countdown--;
udelay(mbx->udelay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
+ return countdown ? 0 : IXGBE_ERR_TIMEOUT;
}
/**
@@ -38,87 +37,82 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ if (!countdown || !mbx->ops.check_for_ack)
+ return IXGBE_ERR_CONFIG;
+
while (countdown && mbx->ops.check_for_ack(hw)) {
countdown--;
udelay(mbx->udelay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
+ return countdown ? 0 : IXGBE_ERR_TIMEOUT;
}
/**
- * ixgbevf_read_posted_mbx - Wait for message notification and receive message
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
+ * ixgbevf_read_mailbox_vf - read VF's mailbox register
+ * @hw: pointer to the HW structure
*
- * returns 0 if it successfully received a message notification and
- * copied it into the receive buffer.
+ * This function is used to read the mailbox register dedicated for VF without
+ * losing the read to clear status bits.
**/
-static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+static u32 ixgbevf_read_mailbox_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
- if (!mbx->ops.read)
- goto out;
+ vf_mailbox |= hw->mbx.vf_mailbox;
+ hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
- ret_val = ixgbevf_poll_for_msg(hw);
-
- /* if ack received read message, otherwise we timed out */
- if (!ret_val)
- ret_val = mbx->ops.read(hw, msg, size);
-out:
- return ret_val;
+ return vf_mailbox;
}
/**
- * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
+ * ixgbevf_clear_msg_vf - clear PF status bit
+ * @hw: pointer to the HW structure
*
- * returns 0 if it successfully copied message into the buffer and
- * received an ack to that message within delay * timeout period
+ * This function is used to clear PFSTS bit in the VFMAILBOX register
**/
-static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+static void ixgbevf_clear_msg_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- /* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
- goto out;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
+ hw->mbx.stats.reqs++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
+ }
+}
- /* send msg */
- ret_val = mbx->ops.write(hw, msg, size);
+/**
+ * ixgbevf_clear_ack_vf - clear PF ACK bit
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to clear PFACK bit in the VFMAILBOX register
+ **/
+static void ixgbevf_clear_ack_vf(struct ixgbe_hw *hw)
+{
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- /* if msg sent wait until we receive an ack */
- if (!ret_val)
- ret_val = ixgbevf_poll_for_ack(hw);
-out:
- return ret_val;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
+ hw->mbx.stats.acks++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
+ }
}
/**
- * ixgbevf_read_v2p_mailbox - read v2p mailbox
- * @hw: pointer to the HW structure
+ * ixgbevf_clear_rst_vf - clear PF reset bit
+ * @hw: pointer to the HW structure
*
- * This function is used to read the v2p mailbox without losing the read to
- * clear status bits.
+ * This function is used to clear reset indication and reset done bit in
+ * VFMAILBOX register after reset the shared resources and the reset sequence.
**/
-static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+static void ixgbevf_clear_rst_vf(struct ixgbe_hw *hw)
{
- u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
-
- v2p_mailbox |= hw->mbx.v2p_mailbox;
- hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- return v2p_mailbox;
+ if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
+ hw->mbx.stats.rsts++;
+ hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
+ IXGBE_VFMAILBOX_RSTD);
+ }
}
/**
@@ -131,14 +125,12 @@ static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
{
- u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
s32 ret_val = IXGBE_ERR_MBX;
- if (v2p_mailbox & mask)
+ if (vf_mailbox & mask)
ret_val = 0;
- hw->mbx.v2p_mailbox &= ~mask;
-
return ret_val;
}
@@ -172,6 +164,7 @@ static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
ret_val = 0;
+ ixgbevf_clear_ack_vf(hw);
hw->mbx.stats.acks++;
}
@@ -191,6 +184,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
IXGBE_VFMAILBOX_RSTI))) {
ret_val = 0;
+ ixgbevf_clear_rst_vf(hw);
hw->mbx.stats.rsts++;
}
@@ -205,19 +199,59 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
{
- s32 ret_val = IXGBE_ERR_MBX;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+ int countdown = mbx->timeout;
+ u32 vf_mailbox;
- /* Take ownership of the buffer */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+ if (!mbx->timeout)
+ return ret_val;
- /* reserve mailbox for VF use */
- if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
- ret_val = 0;
+ while (countdown--) {
+ /* Reserve mailbox for VF use */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* Verify that VF is the owner of the lock */
+ if (ixgbevf_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
+ ret_val = 0;
+ break;
+ }
+
+ /* Wait a bit before trying again */
+ udelay(mbx->udelay);
+ }
+
+ if (ret_val)
+ ret_val = IXGBE_ERR_TIMEOUT;
return ret_val;
}
/**
+ * ixgbevf_release_mbx_lock_vf - release mailbox lock
+ * @hw: pointer to the HW structure
+ **/
+static void ixgbevf_release_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ u32 vf_mailbox;
+
+ /* Return ownership of the buffer */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox &= ~IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+}
+
+/**
+ * ixgbevf_release_mbx_lock_vf_legacy - release mailbox lock
+ * @hw: pointer to the HW structure
+ **/
+static void ixgbevf_release_mbx_lock_vf_legacy(struct ixgbe_hw *__always_unused hw)
+{
+}
+
+/**
* ixgbevf_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
@@ -227,6 +261,50 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent PF/VF race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_clear_msg_vf(hw);
+ ixgbevf_clear_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* interrupt the PF to tell it a message has been sent */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* if msg sent wait until we receive an ack */
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+out_no_write:
+ hw->mbx.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf_legacy - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
s32 ret_val;
u16 i;
@@ -237,7 +315,9 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
/* flush msg and acks as we are overwriting the message buffer */
ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_clear_msg_vf(hw);
ixgbevf_check_for_ack_vf(hw);
+ ixgbevf_clear_ack_vf(hw);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
@@ -263,6 +343,42 @@ out_no_write:
**/
static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ /* check if there is a message from PF */
+ ret_val = ixgbevf_check_for_msg_vf(hw);
+ if (ret_val)
+ return ret_val;
+
+ ixgbevf_clear_msg_vf(hw);
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_ACK;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf_legacy - Reads a message from the inbox intended for VF
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
s32 ret_val = 0;
u16 i;
@@ -298,7 +414,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
/* start mailbox as timed out and let the reset_hw call set the timeout
* value to begin communications
*/
- mbx->timeout = 0;
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
@@ -312,12 +428,79 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbevf_poll_mbx - Wait for message and read it from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ if (!mbx->ops.read || !mbx->ops.check_for_msg || !mbx->timeout)
+ return ret_val;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx - Write a message to the mailbox and wait for ACK
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ACK to that message within specified period
+ **/
+s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /**
+ * exit if either we can't write, release
+ * or there is no timeout defined
+ */
+ if (!mbx->ops.write || !mbx->ops.check_for_ack || !mbx->ops.release ||
+ !mbx->timeout)
+ return ret_val;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_PARAM;
+ else
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ return ret_val;
+}
+
const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.init_params = ixgbevf_init_mbx_params_vf,
+ .release = ixgbevf_release_mbx_lock_vf,
.read = ixgbevf_read_mbx_vf,
.write = ixgbevf_write_mbx_vf,
- .read_posted = ixgbevf_read_posted_mbx,
- .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
+const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .release = ixgbevf_release_mbx_lock_vf_legacy,
+ .read = ixgbevf_read_mbx_vf_legacy,
+ .write = ixgbevf_write_mbx_vf_legacy,
.check_for_msg = ixgbevf_check_for_msg_vf,
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 853796c8ef0e..7346ccf014a5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -7,7 +7,6 @@
#include "vf.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
@@ -39,14 +38,17 @@
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
+ * Message results are the value or'd with 0xF0000000
*/
-/* Messages below or'd with this are the ACK */
-#define IXGBE_VT_MSGTYPE_ACK 0x80000000
-/* Messages below or'd with this are the NACK */
-#define IXGBE_VT_MSGTYPE_NACK 0x40000000
-/* Indicates that VF is still clear to send requests */
-#define IXGBE_VT_MSGTYPE_CTS 0x20000000
+#define IXGBE_VT_MSGTYPE_SUCCESS 0x80000000 /* Messages or'd with this
+ * have succeeded
+ */
+#define IXGBE_VT_MSGTYPE_FAILURE 0x40000000 /* Messages or'd with this
+ * have failed
+ */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests
+ */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
@@ -63,6 +65,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d459f5c8e98f..61d8970c6d1d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -13,13 +13,12 @@
static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
u32 *retmsg, u16 size)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 retval = mbx->ops.write_posted(hw, msg, size);
+ s32 retval = ixgbevf_write_mbx(hw, msg, size);
if (retval)
return retval;
- return mbx->ops.read_posted(hw, retmsg, size);
+ return ixgbevf_poll_mbx(hw, retmsg, size);
}
/**
@@ -75,6 +74,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
/* reset the api version */
hw->api_version = ixgbe_mbox_api_10;
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
+ sizeof(struct ixgbe_mbx_operations));
IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
@@ -92,7 +94,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
- mbx->ops.write_posted(hw, msgbuf, 1);
+ ixgbevf_write_mbx(hw, msgbuf, 1);
mdelay(10);
@@ -100,7 +102,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3
*/
- ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
if (ret_val)
return ret_val;
@@ -108,11 +110,11 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
* to indicate that no MAC address has yet been assigned for
* the VF.
*/
- if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
- msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_INVALID_MAC_ADDR;
- if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
ether_addr_copy(hw->mac.perm_addr, addr);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
@@ -269,7 +271,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
return -ENOMEM;
}
@@ -311,6 +313,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -323,12 +326,12 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
msgbuf[0] = IXGBE_VF_GET_RETA;
- err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+ err = ixgbevf_write_mbx(hw, msgbuf, 1);
if (err)
return err;
- err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
+ err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
if (err)
return err;
@@ -336,14 +339,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
/* If we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such.
*/
- if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* ixgbevf doesn't support more than 2 queues at the moment */
@@ -379,6 +382,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -390,12 +394,12 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
}
msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
- err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+ err = ixgbevf_write_mbx(hw, msgbuf, 1);
if (err)
return err;
- err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
+ err = ixgbevf_poll_mbx(hw, msgbuf, 11);
if (err)
return err;
@@ -403,14 +407,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
/* If we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such.
*/
- if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
@@ -442,7 +446,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
return IXGBE_ERR_MBX;
}
@@ -545,8 +549,9 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP;
fallthrough;
- case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return -EOPNOTSUPP;
@@ -561,7 +566,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
return err;
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
return 0;
@@ -606,7 +611,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
- if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
err = IXGBE_ERR_INVALID_ARGUMENT;
mbx_err:
@@ -705,12 +710,15 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
- if (mbx->ops.read(hw, &in_msg, 1))
+ if (mbx->ops.read(hw, &in_msg, 1)) {
+ if (hw->api_version >= ixgbe_mbox_api_15)
+ mac->get_link_status = false;
goto out;
+ }
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
/* msg is not CTS and is NACK we must have lost CTS status */
- if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
ret_val = -1;
goto out;
}
@@ -816,7 +824,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
if (ret_val)
return ret_val;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
- (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_MBX;
return 0;
@@ -863,7 +871,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
- if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
+ IXGBE_VT_MSGTYPE_SUCCESS)) {
hw->api_version = api;
return 0;
}
@@ -901,6 +910,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return 0;
@@ -918,7 +928,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
* some sort of mailbox error so we should treat it
* as such
*/
- if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
+ if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* record and validate values from message */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 1d8209df4162..54158dac8707 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -73,10 +73,9 @@ struct ixgbe_mac_info {
struct ixgbe_mbx_operations {
s32 (*init_params)(struct ixgbe_hw *hw);
+ void (*release)(struct ixgbe_hw *hw);
s32 (*read)(struct ixgbe_hw *, u32 *, u16);
s32 (*write)(struct ixgbe_hw *, u32 *, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
s32 (*check_for_msg)(struct ixgbe_hw *);
s32 (*check_for_ack)(struct ixgbe_hw *);
s32 (*check_for_rst)(struct ixgbe_hw *);
@@ -96,7 +95,7 @@ struct ixgbe_mbx_info {
struct ixgbe_mbx_stats stats;
u32 timeout;
u32 udelay;
- u32 v2p_mailbox;
+ u32 vf_mailbox;
u16 size;
};
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index bb14fa2241a3..105247582684 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1638,7 +1638,9 @@ static int mv643xx_eth_set_coalesce(struct net_device *dev,
}
static void
-mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -1650,7 +1652,9 @@ mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
}
static int
-mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -3197,7 +3201,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
- dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
/* MTU range: 64 - 9500 */
dev->min_mtu = 64;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5a7bdca22a63..80e4b500695e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3823,8 +3823,6 @@ static void mvneta_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
/* We only support QSGMII, SGMII, 802.3z and RGMII modes.
* When in 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
@@ -3836,34 +3834,7 @@ static void mvneta_validate(struct phylink_config *config,
return;
}
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- /* Asymmetric pause is unsupported */
- phylink_set(mask, Pause);
-
- /* Half-duplex at speeds higher than 100Mbit is unsupported */
- if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
-
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- if (!phy_interface_mode_is_8023z(state->interface)) {
- /* 10M and 100M are only supported in non-802.3z mode */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ phylink_generic_validate(config, supported, state);
}
static void mvneta_mac_pcs_get_state(struct phylink_config *config,
@@ -4539,8 +4510,11 @@ static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
}
-static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void
+mvneta_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(netdev);
@@ -4550,8 +4524,11 @@ static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
ring->tx_pending = pp->tx_ring_size;
}
-static int mvneta_ethtool_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static int
+mvneta_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -5166,6 +5143,9 @@ static int mvneta_probe(struct platform_device *pdev)
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
+ pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD | MAC_2500FD;
+
phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
pp->phylink_config.supported_interfaces);
@@ -5355,7 +5335,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
/* MTU range: 68 - 9676 */
dev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ce486e16489c..a48e804c46f2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1488,6 +1488,7 @@ static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
static bool mvpp2_is_xlg(phy_interface_t interface)
{
return interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_5GBASER ||
interface == PHY_INTERFACE_MODE_XAUI;
}
@@ -1627,6 +1628,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port);
break;
+ case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_10GBASER:
if (!mvpp2_port_supports_xlg(port))
goto invalid_conf;
@@ -2186,6 +2188,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
switch (interface) {
+ case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
@@ -5433,8 +5436,11 @@ static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
-static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static void
+mvpp2_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -5444,8 +5450,11 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
ring->tx_pending = port->tx_ring_size;
}
-static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static int
+mvpp2_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
u16 prev_rx_ring_size = port->rx_ring_size;
@@ -6120,7 +6129,10 @@ static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
u32 val;
- state->speed = SPEED_10000;
+ if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
+ state->speed = SPEED_5000;
+ else
+ state->speed = SPEED_10000;
state->duplex = 1;
state->an_complete = 1;
@@ -6262,9 +6274,6 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
/* When in 802.3z mode, we must have AN enabled:
* Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1.
@@ -6273,52 +6282,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
!phylink_test(state->advertising, Autoneg))
goto empty_set;
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- if (port->priv->global_tx_fc) {
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- }
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
- if (mvpp2_port_supports_xlg(port)) {
- phylink_set_10g_modes(mask);
- phylink_set(mask, 10000baseKR_Full);
- }
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_SGMII:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- break;
-
- case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- break;
-
- default:
- goto empty_set;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ phylink_generic_validate(config, supported, state);
return;
empty_set:
@@ -6901,7 +6865,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mvpp2_set_hw_csum(port, port->pool_long->id);
dev->vlan_features |= features;
- dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
@@ -6913,12 +6877,44 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_config.mac_capabilities =
+ MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+
+ if (port->priv->global_tx_fc)
+ port->phylink_config.mac_capabilities |=
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
if (mvpp2_port_supports_xlg(port)) {
- __set_bit(PHY_INTERFACE_MODE_10GBASER,
- port->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_XAUI,
- port->phylink_config.supported_interfaces);
+ /* If a COMPHY is present, we can support any of
+ * the serdes modes and switch between them.
+ */
+ if (comphy) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ }
+
+ if (comphy)
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD | MAC_5000FD;
+ else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
+ port->phylink_config.mac_capabilities |=
+ MAC_5000FD;
+ else
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD;
}
if (mvpp2_port_supports_rgmii(port))
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 80d4ce61f442..d85db90632d6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -360,7 +360,9 @@ static int otx2_set_pauseparam(struct net_device *netdev,
}
static void otx2_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct otx2_qset *qs = &pfvf->qset;
@@ -372,7 +374,9 @@ static void otx2_get_ringparam(struct net_device *netdev,
}
static int otx2_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 1e0d0c9c1dac..1333edf1c361 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2741,7 +2741,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
- netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 78944ad3492f..254bebffe8c1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -663,7 +663,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_RXALL;
- netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 0c864e5bf0a6..cf03c67fbf40 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -492,7 +492,9 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
static void skge_get_ring_param(struct net_device *dev,
- struct ethtool_ringparam *p)
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
{
struct skge_port *skge = netdev_priv(dev);
@@ -504,7 +506,9 @@ static void skge_get_ring_param(struct net_device *dev,
}
static int skge_set_ring_param(struct net_device *dev,
- struct ethtool_ringparam *p)
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
{
struct skge_port *skge = netdev_priv(dev);
int err = 0;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 28b5b9341145..ea16b1dd6a98 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4149,7 +4149,9 @@ static unsigned long roundup_ring_size(unsigned long pending)
}
static void sky2_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -4161,7 +4163,9 @@ static void sky2_get_ringparam(struct net_device *dev,
}
static int sky2_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -4266,96 +4270,36 @@ static int sky2_get_eeprom_len(struct net_device *dev)
return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
}
-static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
-{
- unsigned long start = jiffies;
-
- while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
- /* Can take up to 10.6 ms for write */
- if (time_after(jiffies, start + HZ/4)) {
- dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
- return -ETIMEDOUT;
- }
- msleep(1);
- }
-
- return 0;
-}
-
-static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
- u16 offset, size_t length)
-{
- int rc = 0;
-
- while (length > 0) {
- u32 val;
-
- sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
- rc = sky2_vpd_wait(hw, cap, 0);
- if (rc)
- break;
-
- val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
-
- memcpy(data, &val, min(sizeof(val), length));
- offset += sizeof(u32);
- data += sizeof(u32);
- length -= sizeof(u32);
- }
-
- return rc;
-}
-
-static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
- u16 offset, unsigned int length)
-{
- unsigned int i;
- int rc = 0;
-
- for (i = 0; i < length; i += sizeof(u32)) {
- u32 val = *(u32 *)(data + i);
-
- sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
- sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
-
- rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
- if (rc)
- break;
- }
- return rc;
-}
-
static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-
- if (!cap)
- return -EINVAL;
+ int rc;
eeprom->magic = SKY2_EEPROM_MAGIC;
+ rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
+ if (rc < 0)
+ return rc;
+
+ eeprom->len = rc;
- return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+ return 0;
}
static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-
- if (!cap)
- return -EINVAL;
+ int rc;
if (eeprom->magic != SKY2_EEPROM_MAGIC)
return -EINVAL;
- /* Partial writes not supported */
- if ((eeprom->offset & 3) || (eeprom->len & 3))
- return -EINVAL;
+ rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
- return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+ return rc < 0 ? rc : 0;
}
static netdev_features_t sky2_fix_features(struct net_device *dev,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 75d67d1b5f6b..de4152e2e3e4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -463,94 +463,8 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
-static void mtk_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
- phy_interface_mode_is_rgmii(state->interface)) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
- !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
- (state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_8023z(state->interface)))) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_TRGMII:
- phylink_set(mask, 1000baseT_Full);
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- break;
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseT_Half);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_RMII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_NA:
- default:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- break;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_NA) {
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
- }
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
-}
-
static const struct phylink_mac_ops mtk_phylink_ops = {
- .validate = mtk_validate,
+ .validate = phylink_generic_validate,
.mac_pcs_get_state = mtk_mac_pcs_get_state,
.mac_an_restart = mtk_mac_an_restart,
.mac_config = mtk_mac_config,
@@ -3009,6 +2923,29 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
+ phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ mac->phylink_config.supported_interfaces);
+ }
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 066d79e4ecfc..e97d97e94231 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1141,7 +1141,9 @@ static void mlx4_en_get_pauseparam(struct net_device *dev,
}
static int mlx4_en_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -1208,7 +1210,9 @@ out:
}
static void mlx4_en_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f0ac6b0d9653..48b12ee44b8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1148,9 +1148,12 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal);
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal);
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
index 7edde4d536fd..17325c5d6516 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
@@ -155,3 +155,61 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh)
return mh->modify_hdr;
}
+char *
+mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ int new_num_actions, max_hw_actions;
+ size_t new_sz, old_sz;
+ void *ret;
+
+ if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
+ goto out;
+
+ max_hw_actions = mlx5e_mod_hdr_max_actions(mdev, namespace);
+ new_num_actions = min(max_hw_actions,
+ mod_hdr_acts->actions ?
+ mod_hdr_acts->max_actions * 2 : 1);
+ if (mod_hdr_acts->max_actions == new_num_actions)
+ return ERR_PTR(-ENOSPC);
+
+ new_sz = MLX5_MH_ACT_SZ * new_num_actions;
+ old_sz = mod_hdr_acts->max_actions * MLX5_MH_ACT_SZ;
+
+ if (mod_hdr_acts->is_static) {
+ ret = kzalloc(new_sz, GFP_KERNEL);
+ if (ret) {
+ memcpy(ret, mod_hdr_acts->actions, old_sz);
+ mod_hdr_acts->is_static = false;
+ }
+ } else {
+ ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
+ if (ret)
+ memset(ret + old_sz, 0, new_sz - old_sz);
+ }
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ mod_hdr_acts->actions = ret;
+ mod_hdr_acts->max_actions = new_num_actions;
+
+out:
+ return mod_hdr_acts->actions + (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
+}
+
+void
+mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ if (!mod_hdr_acts->is_static)
+ kfree(mod_hdr_acts->actions);
+
+ mod_hdr_acts->actions = NULL;
+ mod_hdr_acts->num_actions = 0;
+ mod_hdr_acts->max_actions = 0;
+}
+
+char *
+mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos)
+{
+ return mod_hdr_acts->actions + (pos * MLX5_MH_ACT_SZ);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
index 33b23d8f9182..b8dac418d0a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
@@ -7,14 +7,32 @@
#include <linux/hashtable.h>
#include <linux/mlx5/fs.h>
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
+
struct mlx5e_mod_hdr_handle;
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
+ bool is_static;
void *actions;
};
+#define DECLARE_MOD_HDR_ACTS_ACTIONS(name, len) \
+ u8 name[len][MLX5_MH_ACT_SZ] = {}
+
+#define DECLARE_MOD_HDR_ACTS(name, acts_arr) \
+ struct mlx5e_tc_mod_hdr_acts name = { \
+ .max_actions = ARRAY_SIZE(acts_arr), \
+ .is_static = true, \
+ .actions = acts_arr, \
+ }
+
+char *mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+char *mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos);
+
struct mlx5e_mod_hdr_handle *
mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
@@ -28,4 +46,12 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh);
void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl);
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl);
+static inline int mlx5e_mod_hdr_max_actions(struct mlx5_core_dev *mdev, int namespace)
+{
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
+ else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+ return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
+}
+
#endif /* __MLX5E_EN_MOD_HDR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 4f4bc8726ec4..860605133287 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -565,7 +565,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
snprintf(err_str, sizeof(err_str),
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
- jiffies_to_usecs(jiffies - sq->txq->trans_start));
+ jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start)));
mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
return to_ctx.status;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index df6888c4793c..ff4b4f8a5a9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -5,6 +5,7 @@
#include <net/psample.h>
#include "en/mapping.h"
#include "en/tc/post_act.h"
+#include "en/mod_hdr.h"
#include "sample.h"
#include "eswitch.h"
#include "en_tc.h"
@@ -255,12 +256,12 @@ sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
goto err_modify_hdr;
}
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
return modify_hdr;
err_modify_hdr:
err_post_act:
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
err_set_regc0:
return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 2445e2ae3324..9f33729e7fc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -36,6 +36,12 @@
#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
+/* Statically allocate modify actions for
+ * ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
+ * This will be increased dynamically if needed (for the ipv6 snat + dnat).
+ */
+#define MLX5_CT_MIN_MOD_ACTS 10
+
#define ct_dbg(fmt, args...)\
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
@@ -609,22 +615,15 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
struct flow_action *flow_action = &flow_rule->action;
struct mlx5_core_dev *mdev = ct_priv->dev;
struct flow_action_entry *act;
- size_t action_size;
char *modact;
int err, i;
- action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
-
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_MANGLE: {
- err = alloc_mod_hdr_actions(mdev, ct_priv->ns_type,
- mod_acts);
- if (err)
- return err;
-
- modact = mod_acts->actions +
- mod_acts->num_actions * action_size;
+ modact = mlx5e_mod_hdr_alloc(mdev, ct_priv->ns_type, mod_acts);
+ if (IS_ERR(modact))
+ return PTR_ERR(modact);
err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact);
if (err)
@@ -652,7 +651,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_mod_hdr_handle **mh,
u8 zone_restore_id, bool nat)
{
- struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
+ DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
struct flow_action_entry *meta;
u16 ct_state = 0;
int err;
@@ -706,11 +706,11 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
}
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
return 0;
err_mapping:
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
return err;
}
@@ -907,12 +907,9 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_tuple rev_tuple = entry->tuple;
struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
- __be16 tmp_port;
/* get the reversed tuple */
- tmp_port = rev_tuple.port.src;
- rev_tuple.port.src = rev_tuple.port.dst;
- rev_tuple.port.dst = tmp_port;
+ swap(rev_tuple.port.src, rev_tuple.port.dst);
if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
__be32 tmp_addr = rev_tuple.ip.src_v4;
@@ -1460,7 +1457,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
}
pre_ct->miss_rule = rule;
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
kvfree(spec);
return 0;
@@ -1469,7 +1466,7 @@ err_miss_rule:
err_flow_rule:
mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
err_mapping:
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
kvfree(spec);
return err;
}
@@ -1865,14 +1862,14 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
attr->ct_attr.ct_flow = ct_flow;
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
return ct_flow->pre_ct_rule;
err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
kfree(ct_flow->pre_ct_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index a5e450973225..33815246fead 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -103,7 +103,7 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
}
static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
+ struct net_device *dev,
struct mlx5e_tc_tun_route_attr *attr)
{
struct net_device *route_dev;
@@ -122,13 +122,13 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
} else {
- struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev);
if (tunnel && tunnel->get_remote_ifindex)
- attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev);
+ attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(dev);
}
- rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
+ rt = ip_route_output_key(dev_net(dev), &attr->fl.fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -440,10 +440,10 @@ release_neigh:
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
+ struct net_device *dev,
struct mlx5e_tc_tun_route_attr *attr)
{
- struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev);
struct net_device *route_dev;
struct net_device *out_dev;
struct dst_entry *dst;
@@ -451,8 +451,8 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
int ret;
if (tunnel && tunnel->get_remote_ifindex)
- attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev);
- dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
+ attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(dev);
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(dev), NULL, &attr->fl.fl6,
NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -708,7 +708,8 @@ release_neigh:
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *flow_attr)
+ struct mlx5_flow_attr *flow_attr,
+ struct net_device *filter_dev)
{
struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
struct mlx5e_tc_int_port *int_port;
@@ -720,14 +721,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
/* Addresses are swapped for decap */
attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
- err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
+ err = mlx5e_route_lookup_ipv4_get(priv, filter_dev, &attr);
}
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
else if (flow_attr->tun_ip_version == 6) {
/* Addresses are swapped for decap */
attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
- err = mlx5e_route_lookup_ipv6_get(priv, priv->netdev, &attr);
+ err = mlx5e_route_lookup_ipv6_get(priv, filter_dev, &attr);
}
#endif
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index aa092eaeaec3..b38f693bbb52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -94,7 +94,8 @@ mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
#endif
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr);
+ struct mlx5_flow_attr *attr,
+ struct net_device *filter_dev);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 042b1abe1437..3c8851ee8172 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -1159,7 +1159,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
tbl_time_after = tbl_time_before;
- err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr);
+ err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr, parse_attr->filter_dev);
if (err || !esw_attr->rx_tun_attr->decap_vport)
goto out;
@@ -1480,7 +1480,7 @@ static void mlx5e_reoffload_decap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
spec = &parse_attr->spec;
- err = mlx5e_tc_tun_route_lookup(priv, spec, attr);
+ err = mlx5e_tc_tun_route_lookup(priv, spec, attr, parse_attr->filter_dev);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to lookup route for flow, %d\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c2ea5fad48dd..c8757c5a812b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -314,7 +314,9 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
}
static void mlx5e_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -380,7 +382,9 @@ unlock:
}
static int mlx5e_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -511,7 +515,8 @@ static int mlx5e_set_channels(struct net_device *dev,
}
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal)
{
struct dim_cq_moder *rx_moder, *tx_moder;
@@ -528,6 +533,11 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->tx_max_coalesced_frames = tx_moder->pkts;
coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
+ kernel_coal->use_cqe_mode_rx =
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER);
+ kernel_coal->use_cqe_mode_tx =
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER);
+
return 0;
}
@@ -538,7 +548,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
@@ -578,14 +588,26 @@ mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
}
}
+/* convert a boolean value of cq_mode to mlx5 period mode
+ * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE
+ * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE
+ */
+static int cqe_mode_to_period_mode(bool val)
+{
+ return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
bool reset_rx, reset_tx;
bool reset = true;
+ u8 cq_period_mode;
int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
@@ -605,6 +627,12 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
return -ERANGE;
}
+ if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
+ !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
+ NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
@@ -621,6 +649,18 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
+ cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx);
+ if (cq_period_mode != rx_moder->cq_period_mode) {
+ mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
+ reset_rx = true;
+ }
+
+ cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx);
+ if (cq_period_mode != tx_moder->cq_period_mode) {
+ mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
+ reset_tx = true;
+ }
+
if (reset_rx) {
u8 mode = MLX5E_GET_PFLAG(&new_params,
MLX5E_PFLAG_RX_CQE_BASED_MODER);
@@ -656,9 +696,9 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_set_coalesce(priv, coal);
+ return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
@@ -2358,7 +2398,8 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
const struct ethtool_ops mlx5e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USE_ADAPTIVE,
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USE_CQE,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index e58a9ec42553..87c762066adb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -219,16 +219,22 @@ static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
}
}
-static void mlx5e_rep_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+static void
+mlx5e_rep_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ringparam(priv, param);
}
-static int mlx5e_rep_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+static int
+mlx5e_rep_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -258,7 +264,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
static int mlx5e_rep_set_coalesce(struct net_device *netdev,
@@ -268,7 +274,7 @@ static int mlx5e_rep_set_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_set_coalesce(priv, coal);
+ return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 96967b0a2441..e384f6458c06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -37,6 +37,7 @@
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
+#include <net/gro.h>
#include <net/udp.h>
#include <net/tcp.h>
#include "en.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 2a9bfc3ffa2e..3c91a11e27ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -2076,7 +2076,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
for (i = 0; i < NUM_PTP_CH_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_ch_stats_desc[i].format);
+ "%s", ptp_ch_stats_desc[i].format);
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3d45f4ae80c0..cb66c08783c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -71,7 +71,6 @@
#include "lag/mp.h"
#define nic_chains(priv) ((priv)->fs.tc.chains)
-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
@@ -209,12 +208,9 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
char *modact;
int err;
- err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
- if (err)
- return err;
-
- modact = mod_hdr_acts->actions +
- (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
+ modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
+ if (IS_ERR(modact))
+ return PTR_ERR(modact);
/* Firmware has 5bit length field and 0 means 32bits */
if (mlen == 32)
@@ -333,7 +329,7 @@ void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
char *modact;
- modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
+ modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
/* Firmware has 5bit length field and 0 means 32bits */
if (mlen == 32)
@@ -1076,7 +1072,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1132,16 +1128,16 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
}
mutex_unlock(&priv->fs.tc.t_lock);
- kvfree(attr->parse_attr);
-
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
- mlx5_fc_destroy(priv->mdev, attr->counter);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(priv->mdev, attr->counter);
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+ kvfree(attr->parse_attr);
kfree(flow->attr);
}
@@ -1624,15 +1620,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
if (vf_tun && attr->modify_hdr)
mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
else
mlx5e_detach_mod_hdr(priv, flow);
}
- kfree(attr->sample_attr);
- kvfree(attr->parse_attr);
- kvfree(attr->esw_attr->rx_tun_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
@@ -1646,6 +1639,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+ kfree(attr->sample_attr);
+ kvfree(attr->esw_attr->rx_tun_attr);
+ kvfree(attr->parse_attr);
kfree(flow->attr);
}
@@ -2767,13 +2763,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- int i, action_size, first, last, next_z;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
- struct mlx5_fields *f;
unsigned long mask, field_mask;
- int err;
+ int i, first, last, next_z;
+ struct mlx5_fields *f;
u8 cmd;
mod_acts = &parse_attr->mod_hdr_acts;
@@ -2785,8 +2780,6 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
set_vals = &hdrs[0].vals;
add_vals = &hdrs[1].vals;
- action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
-
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -2854,18 +2847,16 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
- if (err) {
+ action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
+ if (IS_ERR(action)) {
NL_SET_ERR_MSG_MOD(extack,
"too many pedit actions, can't offload");
mlx5_core_warn(priv->mdev,
"mlx5: parsed %d pedit actions, can't do more\n",
mod_acts->num_actions);
- return err;
+ return PTR_ERR(action);
}
- action = mod_acts->actions +
- (mod_acts->num_actions * action_size);
MLX5_SET(set_action_in, action, action_type, cmd);
MLX5_SET(set_action_in, action, field, f->field);
@@ -2895,57 +2886,6 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
- int namespace)
-{
- if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
- return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
- else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
- return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
-}
-
-int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
- int namespace,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
-{
- int action_size, new_num_actions, max_hw_actions;
- size_t new_sz, old_sz;
- void *ret;
-
- if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
- return 0;
-
- action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
-
- max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
- namespace);
- new_num_actions = min(max_hw_actions,
- mod_hdr_acts->actions ?
- mod_hdr_acts->max_actions * 2 : 1);
- if (mod_hdr_acts->max_actions == new_num_actions)
- return -ENOSPC;
-
- new_sz = action_size * new_num_actions;
- old_sz = mod_hdr_acts->max_actions * action_size;
- ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- memset(ret + old_sz, 0, new_sz - old_sz);
- mod_hdr_acts->actions = ret;
- mod_hdr_acts->max_actions = new_num_actions;
-
- return 0;
-}
-
-void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
-{
- kfree(mod_hdr_acts->actions);
- mod_hdr_acts->actions = NULL;
- mod_hdr_acts->num_actions = 0;
- mod_hdr_acts->max_actions = 0;
-}
-
static const struct pedit_headers zero_masks = {};
static int
@@ -2968,7 +2908,7 @@ parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
goto out_err;
}
- if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
+ if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) {
NL_SET_ERR_MSG_MOD(extack,
"The pedit offload action is not supported");
goto out_err;
@@ -3061,7 +3001,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
return 0;
out_dealloc_parsed_actions:
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
return err;
}
@@ -3485,12 +3425,12 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
if (err)
return err;
- /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
if (parse_attr->mod_hdr_acts.num_actions > 0)
return 0;
+ /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
return 0;
@@ -4714,7 +4654,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
err_free:
flow_flag_set(flow, FAILED);
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return err;
@@ -5014,14 +4954,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = ma->common.extack;
- if (!mlx5_esw_qos_enabled(esw)) {
- NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
- return -EOPNOTSUPP;
- }
-
if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index fdb222793027..eb042f0f5a41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -247,11 +247,6 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow);
-int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
- int namespace,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
-void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
-
struct mlx5e_tc_flow;
u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index 425c91814b34..c275fe028b6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -14,6 +14,7 @@
#include "fs_core.h"
#include "esw/indir_table.h"
#include "lib/fs_chains.h"
+#include "en/mod_hdr.h"
#define MLX5_ESW_INDIR_TABLE_SIZE 128
#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
@@ -226,7 +227,7 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
goto err_handle;
}
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
rule->handle = handle;
rule->vni = esw_attr->rx_tun_attr->vni;
rule->mh = flow_act.modify_hdr;
@@ -243,7 +244,7 @@ err_table:
mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr);
err_mod_hdr_alloc:
err_mod_hdr_regc1:
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
err_mod_hdr_regc0:
err_ethertype:
kfree(rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index df277a6cddc0..2b52f7c09152 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -522,9 +522,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_min_rate(esw, evport, min_rate, NULL);
- if (!err)
- err = mlx5_esw_qos_set_vport_max_rate(esw, evport, max_rate, NULL);
+ err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index c6cc67cb4f6a..ff0a07a91992 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -204,10 +204,8 @@ static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divid
return 0;
}
-int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 min_rate,
- struct netlink_ext_ack *extack)
+static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
+ u32 min_rate, struct netlink_ext_ack *extack)
{
u32 fw_max_bw_share, previous_min_rate;
bool min_rate_supported;
@@ -231,10 +229,8 @@ int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
return err;
}
-int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 max_rate,
- struct netlink_ext_ack *extack)
+static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
+ u32 max_rate, struct netlink_ext_ack *extack)
{
u32 act_max_rate = max_rate;
bool max_rate_supported;
@@ -432,16 +428,13 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
}
static struct mlx5_esw_rate_group *
-esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_esw_rate_group *group;
u32 divider;
int err;
- if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
- return ERR_PTR(-EOPNOTSUPP);
-
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -482,9 +475,32 @@ err_sched_elem:
return ERR_PTR(err);
}
-static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack);
+static void esw_qos_put(struct mlx5_eswitch *esw);
+
+static struct mlx5_esw_rate_group *
+esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_rate_group *group;
+ int err;
+
+ if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ err = esw_qos_get(esw, extack);
+ if (err)
+ return ERR_PTR(err);
+
+ group = __esw_qos_create_rate_group(esw, extack);
+ if (IS_ERR(group))
+ esw_qos_put(esw);
+
+ return group;
+}
+
+static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
+ struct mlx5_esw_rate_group *group,
+ struct netlink_ext_ack *extack)
{
u32 divider;
int err;
@@ -503,7 +519,21 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
+
kfree(group);
+
+ return err;
+}
+
+static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
+ struct mlx5_esw_rate_group *group,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = __esw_qos_destroy_rate_group(esw, group, extack);
+ esw_qos_put(esw);
+
return err;
}
@@ -526,7 +556,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
return false;
}
-void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
+static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = esw->dev;
@@ -534,14 +564,10 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return;
+ return -EOPNOTSUPP;
if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
- return;
-
- mutex_lock(&esw->state_lock);
- if (esw->qos.enabled)
- goto unlock;
+ return -EOPNOTSUPP;
MLX5_SET(scheduling_context, tsar_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
@@ -555,75 +581,93 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
&esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
- goto unlock;
+ return err;
}
INIT_LIST_HEAD(&esw->qos.groups);
if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
- esw->qos.group0 = esw_qos_create_rate_group(esw, NULL);
+ esw->qos.group0 = __esw_qos_create_rate_group(esw, extack);
if (IS_ERR(esw->qos.group0)) {
esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
PTR_ERR(esw->qos.group0));
goto err_group0;
}
}
- esw->qos.enabled = true;
-unlock:
- mutex_unlock(&esw->state_lock);
- return;
+ refcount_set(&esw->qos.refcnt, 1);
+
+ return 0;
err_group0:
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- esw->qos.root_tsar_ix);
- if (err)
- esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
- mutex_unlock(&esw->state_lock);
+ if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
+ esw->qos.root_tsar_ix))
+ esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
+
+ return err;
}
-void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw)
+static void esw_qos_destroy(struct mlx5_eswitch *esw)
{
- struct devlink *devlink = priv_to_devlink(esw->dev);
int err;
- devlink_rate_nodes_destroy(devlink);
- mutex_lock(&esw->state_lock);
- if (!esw->qos.enabled)
- goto unlock;
-
if (esw->qos.group0)
- esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
+ __esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix);
if (err)
esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
+}
- esw->qos.enabled = false;
-unlock:
- mutex_unlock(&esw->state_lock);
+static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ lockdep_assert_held(&esw->state_lock);
+
+ if (!refcount_inc_not_zero(&esw->qos.refcnt)) {
+ /* esw_qos_create() set refcount to 1 only on success.
+ * No need to decrement on failure.
+ */
+ err = esw_qos_create(esw, extack);
+ }
+
+ return err;
}
-int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share)
+static void esw_qos_put(struct mlx5_eswitch *esw)
+{
+ lockdep_assert_held(&esw->state_lock);
+ if (refcount_dec_and_test(&esw->qos.refcnt))
+ esw_qos_destroy(esw);
+}
+
+static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
{
int err;
lockdep_assert_held(&esw->state_lock);
- if (!esw->qos.enabled)
+ if (vport->qos.enabled)
return 0;
- if (vport->qos.enabled)
- return -EEXIST;
+ err = esw_qos_get(esw, extack);
+ if (err)
+ return err;
vport->qos.group = esw->qos.group0;
err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share);
- if (!err) {
- vport->qos.enabled = true;
- trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
- }
+ if (err)
+ goto err_out;
+
+ vport->qos.enabled = true;
+ trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
+
+ return 0;
+
+err_out:
+ esw_qos_put(esw);
return err;
}
@@ -633,7 +677,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
int err;
lockdep_assert_held(&esw->state_lock);
- if (!esw->qos.enabled || !vport->qos.enabled)
+ if (!vport->qos.enabled)
return;
WARN(vport->qos.group && vport->qos.group != esw->qos.group0,
"Disabling QoS on port before detaching it from group");
@@ -645,8 +689,27 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
vport->vport, err);
- vport->qos.enabled = false;
+ memset(&vport->qos, 0, sizeof(vport->qos));
trace_mlx5_esw_vport_qos_destroy(vport);
+
+ esw_qos_put(esw);
+}
+
+int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ u32 min_rate, u32 max_rate)
+{
+ int err;
+
+ lockdep_assert_held(&esw->state_lock);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, NULL);
+ if (err)
+ return err;
+
+ err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL);
+ if (!err)
+ err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL);
+
+ return err;
}
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
@@ -654,22 +717,29 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
u32 bitmask;
+ int err;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
- if (!vport->qos.enabled)
- return -EOPNOTSUPP;
-
- MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
- bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ mutex_lock(&esw->state_lock);
+ if (!vport->qos.enabled) {
+ /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
+ err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share, NULL);
+ } else {
+ MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
+
+ bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ err = mlx5_modify_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ ctx,
+ vport->qos.esw_tsar_ix,
+ bitmask);
+ }
+ mutex_unlock(&esw->state_lock);
- return mlx5_modify_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- ctx,
- vport->qos.esw_tsar_ix,
- bitmask);
+ return err;
}
#define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */
@@ -728,7 +798,12 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
return err;
mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ if (err)
+ goto unlock;
+
+ err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@@ -749,7 +824,12 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
return err;
mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ if (err)
+ goto unlock;
+
+ err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@@ -846,7 +926,9 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
int err;
mutex_lock(&esw->state_lock);
- err = esw_qos_vport_update_group(esw, vport, group, extack);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ if (!err)
+ err = esw_qos_vport_update_group(esw, vport, group, extack);
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 28451abe2d2f..0141e9d52037 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -6,18 +6,8 @@
#ifdef CONFIG_MLX5_ESWITCH
-int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 min_rate,
- struct netlink_ext_ack *extack);
-int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 max_rate,
- struct netlink_ext_ack *extack);
-void mlx5_esw_qos_create(struct mlx5_eswitch *esw);
-void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw);
-int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share);
+int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
+ u32 max_rate, u32 min_rate);
void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 51a8cecc4a7c..458ec0bca1b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -781,9 +781,6 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (err)
return err;
- /* Attach vport to the eswitch rate limiter */
- mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
-
if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0;
@@ -1260,8 +1257,6 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
- mlx5_esw_qos_create(esw);
-
esw->mode = mode;
if (mode == MLX5_ESWITCH_LEGACY) {
@@ -1290,7 +1285,6 @@ abort:
if (mode == MLX5_ESWITCH_OFFLOADS)
mlx5_rescan_drivers(esw->dev);
- mlx5_esw_qos_destroy(esw);
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1338,6 +1332,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
int old_mode;
lockdep_assert_held_write(&esw->mode_lock);
@@ -1367,7 +1362,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
if (old_mode == MLX5_ESWITCH_OFFLOADS)
mlx5_rescan_drivers(esw->dev);
- mlx5_esw_qos_destroy(esw);
+ devlink_rate_nodes_destroy(devlink);
+
mlx5_esw_acls_ns_cleanup(esw);
if (clear_vf)
@@ -1576,6 +1572,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
lockdep_register_key(&esw->mode_lock_key);
init_rwsem(&esw->mode_lock);
lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key);
+ refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1614,6 +1611,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
+ WARN_ON(refcount_read(&esw->qos.refcnt));
lockdep_unregister_key(&esw->mode_lock_key);
mutex_destroy(&esw->state_lock);
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
@@ -1703,82 +1701,6 @@ bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
}
-static bool
-is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
-{
- return vport_num == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(esw, vport_num) ||
- mlx5_esw_is_sf_vport(esw, vport_num);
-}
-
-int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
- u8 *hw_addr, int *hw_addr_len,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num))
- return -EOPNOTSUPP;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
- }
-
- mutex_lock(&esw->state_lock);
- if (vport->enabled) {
- ether_addr_copy(hw_addr, vport->info.mac);
- *hw_addr_len = ETH_ALEN;
- err = 0;
- }
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
- const u8 *hw_addr, int hw_addr_len,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw)) {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
- return PTR_ERR(esw);
- }
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num)) {
- NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
- return -EINVAL;
- }
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
- }
-
- mutex_lock(&esw->state_lock);
- if (vport->enabled)
- err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
- else
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
u16 vport, int link_state)
{
@@ -1835,8 +1757,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
- ivi->min_tx_rate = evport->qos.min_rate;
- ivi->max_tx_rate = evport->qos.max_rate;
+ if (evport->qos.enabled) {
+ ivi->min_tx_rate = evport->qos.min_rate;
+ ivi->max_tx_rate = evport->qos.max_rate;
+ }
mutex_unlock(&esw->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 42f8ee2e5d9f..513f741d16c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -308,10 +308,14 @@ struct mlx5_eswitch {
atomic64_t user_count;
struct {
- bool enabled;
u32 root_tsar_ix;
struct mlx5_esw_rate_group *group0;
struct list_head groups; /* Protected by esw->state_lock */
+
+ /* Protected by esw->state_lock.
+ * Initially 0, meaning no QoS users and QoS is disabled.
+ */
+ refcount_t refcnt;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
@@ -516,11 +520,6 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
-static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
-{
- return esw->qos.enabled;
-}
-
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a46455694f7a..98de91fc3d99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3855,3 +3855,62 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
return vport->metadata;
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
+
+static bool
+is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num == MLX5_VPORT_PF ||
+ mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_esw_is_sf_vport(esw, vport_num);
+}
+
+int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ if (!is_port_function_supported(esw, vport_num))
+ return -EOPNOTSUPP;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+
+ mutex_lock(&esw->state_lock);
+ ether_addr_copy(hw_addr, vport->info.mac);
+ *hw_addr_len = ETH_ALEN;
+ mutex_unlock(&esw->state_lock);
+ return 0;
+}
+
+int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ u16 vport_num;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw)) {
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
+ return PTR_ERR(esw);
+ }
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ if (!is_port_function_supported(esw, vport_num)) {
+ NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
+ return -EINVAL;
+ }
+
+ return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 64f1abc4dc36..75121bc1eaa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -420,6 +420,11 @@ static void print_health_info(struct mlx5_core_dev *dev)
if (!ioread8(&h->synd))
return;
+ if (ioread32be(&h->fw_ver) == 0xFFFFFFFF) {
+ mlx5_log(dev, LOGLEVEL_ERR, "PCI slot is unavailable\n");
+ return;
+ }
+
rfr_severity = ioread8(&h->rfr_severity);
severity = mlx5_health_get_severity(rfr_severity);
mlx5_log(dev, severity, "Health issue observed, %s, severity(%d) %s:\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 962d41418ce7..f4f7eaf16446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -67,7 +67,9 @@ static void mlx5i_get_ethtool_stats(struct net_device *dev,
}
static int mlx5i_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -75,7 +77,9 @@ static int mlx5i_set_ringparam(struct net_device *dev,
}
static void mlx5i_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -105,7 +109,7 @@ static int mlx5i_set_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_set_coalesce(priv, coal);
+ return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5i_get_coalesce(struct net_device *netdev,
@@ -115,7 +119,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
static int mlx5i_get_ts_info(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 830444f927d4..19bf2b66707d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -479,7 +479,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->xa_num_irqs.max = start + size - 1;
if (name)
snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
- name);
+ "%s", name);
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index 92b798f8e73a..ceeb7f4c3f6c 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -33,8 +33,11 @@ static void mlxbf_gige_get_regs(struct net_device *netdev,
memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ);
}
-static void mlxbf_gige_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static void
+mlxbf_gige_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index a15c95a10bae..cd3331a077bb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1973,6 +1973,7 @@ int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
{
pci_driver->probe = mlxsw_pci_probe;
pci_driver->remove = mlxsw_pci_remove;
+ pci_driver->shutdown = mlxsw_pci_remove;
return pci_register_driver(pci_driver);
}
EXPORT_SYMBOL(mlxsw_pci_driver_register);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 03e5bad4e405..24157bb59881 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -303,7 +303,7 @@ int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
- unsigned char *addr)
+ const unsigned char *addr)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char ppad_pl[MLXSW_REG_PPAD_LEN];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 217e3b351dfe..98df6e8fa45f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8369,9 +8369,6 @@ mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
int id;
idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
- if (!profile)
- continue;
-
if (ether_addr_equal_masked(profile->mac_prefix, mac,
mlxsw_sp->mac_mask))
return profile;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 99c0c1491af2..d024983815da 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6317,11 +6317,15 @@ static int netdev_set_pauseparam(struct net_device *dev,
* netdev_get_ringparam - get tx/rx ring parameters
* @dev: Network device.
* @ring: Ethtool RING settings data structure.
+ * @kernel_ring: Ethtool external RING settings data structure.
+ * @extack: Netlink handle.
*
* This procedure returns the TX/RX ring settings.
*/
static void netdev_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 4625d4fb4cde..16266275dd36 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -292,6 +292,33 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
spx5_port->phylink_config.type = PHYLINK_NETDEV;
spx5_port->phylink_config.pcs_poll = true;
+ spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_5000 ||
+ spx5_port->conf.bandwidth == SPEED_10000 ||
+ spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_10000 ||
+ spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_25GBASER,
+ spx5_port->phylink_config.supported_interfaces);
phylink = phylink_create(&spx5_port->phylink_config,
of_fwnode_handle(config->node),
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index fb74752de0ca..8ba33bc1a001 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -26,79 +26,6 @@ static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_po
return false;
}
-static void sparx5_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_5GBASER:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_25GBASER:
- case PHY_INTERFACE_MODE_NA:
- if (port->conf.bandwidth == SPEED_5000)
- phylink_set(mask, 5000baseT_Full);
- if (port->conf.bandwidth == SPEED_10000) {
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- }
- if (port->conf.bandwidth == SPEED_25000) {
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 25000baseCR_Full);
- phylink_set(mask, 25000baseSR_Full);
- }
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_NA) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
- break;
- default:
- linkmode_zero(supported);
- return;
- }
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void sparx5_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -202,7 +129,7 @@ const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
};
const struct phylink_mac_ops sparx5_phylink_mac_ops = {
- .validate = sparx5_phylink_validate,
+ .validate = phylink_generic_validate,
.mac_config = sparx5_phylink_mac_config,
.mac_link_down = sparx5_phylink_mac_link_down,
.mac_link_up = sparx5_phylink_mac_link_up,
diff --git a/drivers/net/ethernet/microsoft/mana/Makefile b/drivers/net/ethernet/microsoft/mana/Makefile
index 0edd5bb685f3..e16a4221f571 100644
--- a/drivers/net/ethernet/microsoft/mana/Makefile
+++ b/drivers/net/ethernet/microsoft/mana/Makefile
@@ -3,4 +3,4 @@
# Makefile for the Microsoft Azure Network Adapter driver
obj-$(CONFIG_MICROSOFT_MANA) += mana.o
-mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o
+mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o mana_bpf.o
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index d047ee876f12..0c5553887b75 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -298,6 +298,9 @@ struct mana_rxq {
struct mana_stats stats;
+ struct bpf_prog __rcu *bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
+
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
*/
@@ -353,6 +356,8 @@ struct mana_port_context {
/* This points to an array of num_queues of RQ pointers. */
struct mana_rxq **rxqs;
+ struct bpf_prog *bpf_prog;
+
/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
unsigned int max_queues;
unsigned int num_queues;
@@ -367,6 +372,7 @@ struct mana_port_context {
struct mana_ethtool_stats eth_stats;
};
+int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
bool update_hash, bool update_tab);
@@ -377,6 +383,13 @@ int mana_detach(struct net_device *ndev, bool from_close);
int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
+void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
+u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
+ struct xdp_buff *xdp, void *buf_va, uint pkt_len);
+struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
+void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
+int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+
extern const struct ethtool_ops mana_ethtool_ops;
struct mana_obj_spec {
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
new file mode 100644
index 000000000000..1bc8ff388341
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mm.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+
+#include "mana.h"
+
+void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ u16 txq_idx = skb_get_queue_mapping(skb);
+ struct netdev_queue *ndevtxq;
+ int rc;
+
+ __skb_push(skb, ETH_HLEN);
+
+ ndevtxq = netdev_get_tx_queue(ndev, txq_idx);
+ __netif_tx_lock(ndevtxq, smp_processor_id());
+
+ rc = mana_start_xmit(skb, ndev);
+
+ __netif_tx_unlock(ndevtxq);
+
+ if (dev_xmit_complete(rc))
+ return;
+
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+}
+
+u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
+ struct xdp_buff *xdp, void *buf_va, uint pkt_len)
+{
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+ prog = rcu_dereference(rxq->bpf_prog);
+
+ if (!prog)
+ goto out;
+
+ xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ case XDP_TX:
+ case XDP_DROP:
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ }
+
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
+static unsigned int mana_xdp_fraglen(unsigned int len)
+{
+ return SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
+{
+ ASSERT_RTNL();
+
+ return apc->bpf_prog;
+}
+
+static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc)
+{
+ return rtnl_dereference(apc->rxqs[0]->bpf_prog);
+}
+
+/* Set xdp program on channels */
+void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog = mana_chn_xdp_get(apc);
+ unsigned int num_queues = apc->num_queues;
+ int i;
+
+ ASSERT_RTNL();
+
+ if (old_prog == prog)
+ return;
+
+ if (prog)
+ bpf_prog_add(prog, num_queues);
+
+ for (i = 0; i < num_queues; i++)
+ rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog);
+
+ if (old_prog)
+ for (i = 0; i < num_queues; i++)
+ bpf_prog_put(old_prog);
+}
+
+static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+ int buf_max;
+
+ old_prog = mana_xdp_get(apc);
+
+ if (!old_prog && !prog)
+ return 0;
+
+ buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
+ if (prog && buf_max > PAGE_SIZE) {
+ netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
+ ndev->mtu, buf_max);
+ NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
+
+ return -EOPNOTSUPP;
+ }
+
+ /* One refcnt of the prog is hold by the caller already, so
+ * don't increase refcnt for this one.
+ */
+ apc->bpf_prog = prog;
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (apc->port_is_up)
+ mana_chn_setxdp(apc, prog);
+
+ return 0;
+}
+
+int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct netlink_ext_ack *extack = bpf->extack;
+ int ret;
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return mana_xdp_set(ndev, bpf->prog, extack);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 72cbf45c42d8..c1d5a374b967 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -125,7 +125,7 @@ frag_err:
return -ENOMEM;
}
-static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
struct mana_port_context *apc = netdev_priv(ndev);
@@ -378,6 +378,7 @@ static const struct net_device_ops mana_devops = {
.ndo_start_xmit = mana_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
+ .ndo_bpf = mana_bpf,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -906,6 +907,25 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq)
WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
}
+static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
+ struct xdp_buff *xdp)
+{
+ struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
+
+ if (!skb)
+ return NULL;
+
+ if (xdp->data_hard_start) {
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ skb_put(skb, xdp->data_end - xdp->data);
+ } else {
+ skb_reserve(skb, XDP_PACKET_HEADROOM);
+ skb_put(skb, pkt_len);
+ }
+
+ return skb;
+}
+
static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
struct mana_rxq *rxq)
{
@@ -914,8 +934,10 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
uint pkt_len = cqe->ppi[0].pkt_len;
u16 rxq_idx = rxq->rxq_idx;
struct napi_struct *napi;
+ struct xdp_buff xdp = {};
struct sk_buff *skb;
u32 hash_value;
+ u32 act;
rxq->rx_cq.work_done++;
napi = &rxq->rx_cq.napi;
@@ -925,15 +947,16 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
return;
}
- skb = build_skb(buf_va, PAGE_SIZE);
+ act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
- if (!skb) {
- free_page((unsigned long)buf_va);
- ++ndev->stats.rx_dropped;
- return;
- }
+ if (act != XDP_PASS && act != XDP_TX)
+ goto drop;
+
+ skb = mana_build_skb(buf_va, pkt_len, &xdp);
+
+ if (!skb)
+ goto drop;
- skb_put(skb, pkt_len);
skb->dev = napi->dev;
skb->protocol = eth_type_trans(skb, ndev);
@@ -954,12 +977,24 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
}
+ if (act == XDP_TX) {
+ skb_set_queue_mapping(skb, rxq_idx);
+ mana_xdp_tx(skb, ndev);
+ return;
+ }
+
napi_gro_receive(napi, skb);
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += pkt_len;
u64_stats_update_end(&rx_stats->syncp);
+ return;
+
+drop:
+ free_page((unsigned long)buf_va);
+ ++ndev->stats.rx_dropped;
+ return;
}
static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
@@ -1016,7 +1051,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
new_page = alloc_page(GFP_ATOMIC);
if (new_page) {
- da = dma_map_page(dev, new_page, 0, rxq->datasize,
+ da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
@@ -1291,6 +1326,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
napi_synchronize(napi);
napi_disable(napi);
+
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
netif_napi_del(napi);
mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
@@ -1342,7 +1380,8 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc,
if (!page)
return -ENOMEM;
- da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE);
+ da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
__free_page(page);
@@ -1485,6 +1524,12 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
netif_napi_add(ndev, &cq->napi, mana_poll, 1);
+
+ WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
+ cq->napi.napi_id));
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+
napi_enable(&cq->napi);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -1650,6 +1695,8 @@ int mana_alloc_queues(struct net_device *ndev)
if (err)
goto destroy_vport;
+ mana_chn_setxdp(apc, mana_xdp_get(apc));
+
return 0;
destroy_vport:
@@ -1698,6 +1745,8 @@ static int mana_dealloc_queues(struct net_device *ndev)
if (apc->port_is_up)
return -EINVAL;
+ mana_chn_setxdp(apc, NULL);
+
/* No packet can be transmitted now since apc->port_is_up is false.
* There is still a tiny chance that mana_poll_tx_cq() can re-enable
* a txq because it may not timely see apc->port_is_up being cleared
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 409cde1e59c6..fe8abb30f185 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -61,9 +61,9 @@ static void ocelot_mact_select(struct ocelot *ocelot,
}
-int ocelot_mact_learn(struct ocelot *ocelot, int port,
- const unsigned char mac[ETH_ALEN],
- unsigned int vid, enum macaccess_entry_type type)
+static int __ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
{
u32 cmd = ANA_TABLES_MACACCESS_VALID |
ANA_TABLES_MACACCESS_DEST_IDX(port) |
@@ -83,8 +83,6 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
if (mc_ports & BIT(ocelot->num_phys_ports))
cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
- mutex_lock(&ocelot->mact_lock);
-
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
@@ -92,9 +90,20 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
err = ocelot_mact_wait_for_completion(ocelot);
+ return err;
+}
+
+int ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ int ret;
+
+ mutex_lock(&ocelot->mact_lock);
+ ret = __ocelot_mact_learn(ocelot, port, mac, vid, type);
mutex_unlock(&ocelot->mact_lock);
- return err;
+ return ret;
}
EXPORT_SYMBOL(ocelot_mact_learn);
@@ -120,6 +129,66 @@ int ocelot_mact_forget(struct ocelot *ocelot,
}
EXPORT_SYMBOL(ocelot_mact_forget);
+int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type *type)
+{
+ int val;
+
+ mutex_lock(&ocelot->mact_lock);
+
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a read command with MACACCESS_VALID=1. */
+ ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
+ ANA_TABLES_MACACCESS);
+
+ if (ocelot_mact_wait_for_completion(ocelot)) {
+ mutex_unlock(&ocelot->mact_lock);
+ return -ETIMEDOUT;
+ }
+
+ /* Read back the entry flags */
+ val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ if (!(val & ANA_TABLES_MACACCESS_VALID))
+ return -ENOENT;
+
+ *dst_idx = ANA_TABLES_MACACCESS_DEST_IDX_X(val);
+ *type = ANA_TABLES_MACACCESS_ENTRYTYPE_X(val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_mact_lookup);
+
+int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type,
+ int sfid, int ssid)
+{
+ int ret;
+
+ mutex_lock(&ocelot->mact_lock);
+
+ ocelot_write(ocelot,
+ (sfid < 0 ? 0 : ANA_TABLES_STREAMDATA_SFID_VALID) |
+ ANA_TABLES_STREAMDATA_SFID(sfid) |
+ (ssid < 0 ? 0 : ANA_TABLES_STREAMDATA_SSID_VALID) |
+ ANA_TABLES_STREAMDATA_SSID(ssid),
+ ANA_TABLES_STREAMDATA);
+
+ ret = __ocelot_mact_learn(ocelot, dst_idx, mac, vid, type);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ocelot_mact_learn_streamdata);
+
static void ocelot_mact_init(struct ocelot *ocelot)
{
/* Configure the learning mode entries attributes:
@@ -594,9 +663,17 @@ void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
+ ocelot_port->speed = SPEED_UNKNOWN;
+
ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
DEV_MAC_ENA_CFG);
+ if (ocelot->ops->cut_through_fwd) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ ocelot->ops->cut_through_fwd(ocelot);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ }
+
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
err = ocelot_port_flush(ocelot, port);
@@ -628,6 +705,8 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
int mac_speed, mode = 0;
u32 mac_fc_cfg;
+ ocelot_port->speed = speed;
+
/* The MAC might be integrated in systems where the MAC speed is fixed
* and it's the PCS who is performing the rate adaptation, so we have
* to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG
@@ -700,6 +779,15 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
+ /* If the port supports cut-through forwarding, update the masks before
+ * enabling forwarding on the port.
+ */
+ if (ocelot->ops->cut_through_fwd) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ ocelot->ops->cut_through_fwd(ocelot);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ }
+
/* Core: Enable port for frame transfer */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
@@ -1709,15 +1797,18 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
return mask;
}
-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
- struct net_device *bridge)
+u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
{
struct ocelot_port *ocelot_port = ocelot->ports[src_port];
+ const struct net_device *bridge;
u32 mask = 0;
int port;
- if (!ocelot_port || ocelot_port->bridge != bridge ||
- ocelot_port->stp_state != BR_STATE_FORWARDING)
+ if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
+ bridge = ocelot_port->bridge;
+ if (!bridge)
return 0;
for (port = 0; port < ocelot->num_phys_ports; port++) {
@@ -1733,8 +1824,9 @@ static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
return mask;
}
+EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask);
-static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
+u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
{
u32 mask = 0;
int port;
@@ -1751,12 +1843,22 @@ static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
return mask;
}
+EXPORT_SYMBOL_GPL(ocelot_get_dsa_8021q_cpu_mask);
-void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
+void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
{
unsigned long cpu_fwd_mask;
int port;
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ /* If cut-through forwarding is supported, update the masks before a
+ * port joins the forwarding domain, to avoid potential underruns if it
+ * has the highest speed from the new domain.
+ */
+ if (joining && ocelot->ops->cut_through_fwd)
+ ocelot->ops->cut_through_fwd(ocelot);
+
/* If a DSA tag_8021q CPU exists, it needs to be included in the
* regular forwarding path of the front ports regardless of whether
* those are bridged or standalone.
@@ -1784,10 +1886,9 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
mask = GENMASK(ocelot->num_phys_ports - 1, 0);
mask &= ~cpu_fwd_mask;
} else if (ocelot_port->bridge) {
- struct net_device *bridge = ocelot_port->bridge;
struct net_device *bond = ocelot_port->bond;
- mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port);
mask |= cpu_fwd_mask;
mask &= ~BIT(port);
if (bond) {
@@ -1804,6 +1905,16 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
}
+
+ /* If cut-through forwarding is supported and a port is leaving, there
+ * is a chance that cut-through was disabled on the other ports due to
+ * the port which is leaving (it has a higher link speed). We need to
+ * update the cut-through masks of the remaining ports no earlier than
+ * after the port has left, to prevent underruns from happening between
+ * the cut-through update and the forwarding domain update.
+ */
+ if (!joining && ocelot->ops->cut_through_fwd)
+ ocelot->ops->cut_through_fwd(ocelot);
}
EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
@@ -1812,6 +1923,8 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 learn_ena = 0;
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->stp_state = state;
if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) &&
@@ -1821,7 +1934,9 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA,
ANA_PORT_PORT_CFG, port);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, state == BR_STATE_FORWARDING);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
@@ -2051,9 +2166,13 @@ void ocelot_port_bridge_join(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->bridge = bridge;
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_bridge_join);
@@ -2062,11 +2181,15 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->bridge = NULL;
ocelot_port_set_pvid(ocelot, port, NULL);
ocelot_port_manage_port_tag(ocelot, port);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, false);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -2188,12 +2311,16 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port,
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
return -EOPNOTSUPP;
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->bond = bond;
ocelot_setup_logical_port_ids(ocelot);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
ocelot_set_aggr_pgids(ocelot);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
return 0;
}
EXPORT_SYMBOL(ocelot_port_lag_join);
@@ -2201,11 +2328,15 @@ EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->bond = NULL;
ocelot_setup_logical_port_ids(ocelot);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, false);
ocelot_set_aggr_pgids(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_leave);
@@ -2496,6 +2627,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
+ mutex_init(&ocelot->fwd_domain_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
@@ -2519,6 +2651,9 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_vcap_init(ocelot);
ocelot_cpu_port_init(ocelot);
+ if (ocelot->ops->psfp_init)
+ ocelot->ops->psfp_init(ocelot);
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index e43da09b8f91..1eb0b5ad51e9 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -55,19 +55,6 @@ struct ocelot_dump_ctx {
int idx;
};
-/* MAC table entry types.
- * ENTRYTYPE_NORMAL is subject to aging.
- * ENTRYTYPE_LOCKED is not subject to aging.
- * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
- * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
- */
-enum macaccess_entry_type {
- ENTRYTYPE_NORMAL = 0,
- ENTRYTYPE_LOCKED,
- ENTRYTYPE_MACv4,
- ENTRYTYPE_MACv6,
-};
-
/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports
* possibilities of egress port masks for L2 multicast traffic.
* For a switch with 9 user ports, there are 512 possible port masks, but the
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 769a8159373e..58fce173f95b 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -20,6 +20,9 @@
(1 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP)
#define VCAP_IS2_CHAIN(lookup, pag) \
(2 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP + (pag))
+/* PSFP chain and block ID */
+#define PSFP_BLOCK_ID OCELOT_NUM_VCAP_BLOCKS
+#define OCELOT_PSFP_CHAIN (3 * VCAP_BLOCK)
static int ocelot_chain_to_block(int chain, bool ingress)
{
@@ -46,6 +49,9 @@ static int ocelot_chain_to_block(int chain, bool ingress)
if (chain == VCAP_IS2_CHAIN(lookup, pag))
return VCAP_IS2;
+ if (chain == OCELOT_PSFP_CHAIN)
+ return PSFP_BLOCK_ID;
+
return -EOPNOTSUPP;
}
@@ -84,7 +90,8 @@ static bool ocelot_is_goto_target_valid(int goto_target, int chain,
goto_target == VCAP_IS1_CHAIN(1) ||
goto_target == VCAP_IS1_CHAIN(2) ||
goto_target == VCAP_IS2_CHAIN(0, 0) ||
- goto_target == VCAP_IS2_CHAIN(1, 0));
+ goto_target == VCAP_IS2_CHAIN(1, 0) ||
+ goto_target == OCELOT_PSFP_CHAIN);
if (chain == VCAP_IS1_CHAIN(0))
return (goto_target == VCAP_IS1_CHAIN(1));
@@ -111,7 +118,11 @@ static bool ocelot_is_goto_target_valid(int goto_target, int chain,
if (chain == VCAP_IS2_CHAIN(0, pag))
return (goto_target == VCAP_IS2_CHAIN(1, pag));
- /* VCAP IS2 lookup 1 cannot jump anywhere */
+ /* VCAP IS2 lookup 1 can goto to PSFP block if hardware support */
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (chain == VCAP_IS2_CHAIN(1, pag))
+ return (goto_target == OCELOT_PSFP_CHAIN);
+
return false;
}
@@ -211,6 +222,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
const struct flow_action_entry *a;
enum ocelot_tag_tpid_sel tpid;
int i, chain, egress_port;
+ u32 pol_ix, pol_max;
u64 rate;
int err;
@@ -269,10 +281,14 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_POLICE:
+ if (filter->block_id == PSFP_BLOCK_ID) {
+ filter->type = OCELOT_PSFP_FILTER_OFFLOAD;
+ break;
+ }
if (filter->block_id != VCAP_IS2 ||
filter->lookup != 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Police action can only be offloaded to VCAP IS2 lookup 0");
+ "Police action can only be offloaded to VCAP IS2 lookup 0 or PSFP");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
@@ -286,6 +302,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
return -EOPNOTSUPP;
}
filter->action.police_ena = true;
+
+ pol_ix = a->police.index + ocelot->vcap_pol.base;
+ pol_max = ocelot->vcap_pol.max;
+
+ if (ocelot->vcap_pol.max2 && pol_ix > pol_max) {
+ pol_ix += ocelot->vcap_pol.base2 - pol_max - 1;
+ pol_max = ocelot->vcap_pol.max2;
+ }
+
+ if (pol_ix >= pol_max)
+ return -EINVAL;
+
+ filter->action.pol_ix = pol_ix;
+
rate = a->police.rate_bytes_ps;
filter->action.pol.rate = div_u64(rate, 1000) * 8;
filter->action.pol.burst = a->police.burst;
@@ -399,6 +429,14 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.pcp_a_val = a->vlan.prio;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
+ case FLOW_ACTION_GATE:
+ if (filter->block_id != PSFP_BLOCK_ID) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate action can only be offloaded to PSFP chain");
+ return -EOPNOTSUPP;
+ }
+ filter->type = OCELOT_PSFP_FILTER_OFFLOAD;
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Cannot offload action");
return -EOPNOTSUPP;
@@ -407,7 +445,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
if (filter->goto_target == -1) {
if ((filter->block_id == VCAP_IS2 && filter->lookup == 1) ||
- chain == 0) {
+ chain == 0 || filter->block_id == PSFP_BLOCK_ID) {
allow_missing_goto_target = true;
} else {
NL_SET_ERR_MSG_MOD(extack, "Missing GOTO action");
@@ -689,6 +727,10 @@ static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress,
if (ret)
return ret;
+ /* PSFP filter need to parse key by stream identification function. */
+ if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD)
+ return 0;
+
return ocelot_flower_parse_key(ocelot, port, ingress, f, filter);
}
@@ -792,6 +834,15 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
return ocelot_vcap_dummy_filter_add(ocelot, filter);
+ if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD) {
+ kfree(filter);
+ if (ocelot->ops->psfp_filter_add)
+ return ocelot->ops->psfp_filter_add(ocelot, port, f);
+
+ NL_SET_ERR_MSG_MOD(extack, "PSFP chain is not supported in HW");
+ return -EOPNOTSUPP;
+ }
+
return ocelot_vcap_filter_add(ocelot, filter, f->common.extack);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
@@ -807,6 +858,13 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
if (block_id < 0)
return 0;
+ if (block_id == PSFP_BLOCK_ID) {
+ if (ocelot->ops->psfp_filter_del)
+ return ocelot->ops->psfp_filter_del(ocelot, f);
+
+ return -EOPNOTSUPP;
+ }
+
block = &ocelot->block[block_id];
filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
@@ -825,12 +883,25 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
{
struct ocelot_vcap_filter *filter;
struct ocelot_vcap_block *block;
+ struct flow_stats stats = {0};
int block_id, ret;
block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
if (block_id < 0)
return 0;
+ if (block_id == PSFP_BLOCK_ID) {
+ if (ocelot->ops->psfp_stats_get) {
+ ret = ocelot->ops->psfp_stats_get(ocelot, f, &stats);
+ if (ret)
+ return ret;
+
+ goto stats_update;
+ }
+
+ return -EOPNOTSUPP;
+ }
+
block = &ocelot->block[block_id];
filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
@@ -841,7 +912,10 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
if (ret)
return ret;
- flow_stats_update(&f->stats, 0x0, filter->stats.pkts, 0, 0x0,
+ stats.pkts = filter->stats.pkts;
+
+stats_update:
+ flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 0x0,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index eaeba60b1bba..0fcf359a6975 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1498,40 +1498,6 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
.notifier_call = ocelot_switchdev_blocking_event,
};
-static void vsc7514_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- struct ocelot_port_private *priv = netdev_priv(ndev);
- struct ocelot_port *ocelot_port = &priv->port;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = {};
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void vsc7514_phylink_mac_config(struct phylink_config *config,
unsigned int link_an_mode,
const struct phylink_link_state *state)
@@ -1590,7 +1556,7 @@ static void vsc7514_phylink_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ocelot_phylink_ops = {
- .validate = vsc7514_phylink_validate,
+ .validate = phylink_generic_validate,
.mac_config = vsc7514_phylink_mac_config,
.mac_link_down = vsc7514_phylink_mac_link_down,
.mac_link_up = vsc7514_phylink_mac_link_up,
@@ -1654,6 +1620,11 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+
+ __set_bit(ocelot_port->phy_mode,
+ priv->phylink_config.supported_interfaces);
phylink = phylink_create(&priv->phylink_config,
of_fwnode_handle(portnp),
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 337cd08b1a54..d3544413a8a4 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -887,10 +887,18 @@ static void vcap_entry_set(struct ocelot *ocelot, int ix,
return es0_entry_set(ocelot, ix, filter);
}
-static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
- struct ocelot_policer *pol)
+struct vcap_policer_entry {
+ struct list_head list;
+ refcount_t refcount;
+ u32 pol_ix;
+};
+
+int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol)
{
struct qos_policer_conf pp = { 0 };
+ struct vcap_policer_entry *tmp;
+ int ret;
if (!pol)
return -EINVAL;
@@ -899,57 +907,74 @@ static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
pp.pir = pol->rate;
pp.pbs = pol->burst;
- return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ list_for_each_entry(tmp, &ocelot->vcap_pol.pol_list, list)
+ if (tmp->pol_ix == pol_ix) {
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ if (ret) {
+ kfree(tmp);
+ return ret;
+ }
+
+ tmp->pol_ix = pol_ix;
+ refcount_set(&tmp->refcount, 1);
+ list_add_tail(&tmp->list, &ocelot->vcap_pol.pol_list);
+
+ return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_policer_add);
-static void ocelot_vcap_policer_del(struct ocelot *ocelot,
- struct ocelot_vcap_block *block,
- u32 pol_ix)
+int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix)
{
- struct ocelot_vcap_filter *filter;
struct qos_policer_conf pp = {0};
- int index = -1;
-
- if (pol_ix < block->pol_lpr)
- return;
-
- list_for_each_entry(filter, &block->rules, list) {
- index++;
- if (filter->block_id == VCAP_IS2 &&
- filter->action.police_ena &&
- filter->action.pol_ix < pol_ix) {
- filter->action.pol_ix += 1;
- ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
- &filter->action.pol);
- is2_entry_set(ocelot, index, filter);
+ struct vcap_policer_entry *tmp, *n;
+ u8 z = 0;
+
+ list_for_each_entry_safe(tmp, n, &ocelot->vcap_pol.pol_list, list)
+ if (tmp->pol_ix == pol_ix) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
}
- }
- pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ if (z) {
+ pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+ return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ }
- block->pol_lpr++;
+ return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_policer_del);
-static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
- struct ocelot_vcap_block *block,
- struct ocelot_vcap_filter *filter)
+static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
+ struct ocelot_vcap_block *block,
+ struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_filter *tmp;
struct list_head *pos, *n;
+ int ret;
if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
- block->pol_lpr--;
- filter->action.pol_ix = block->pol_lpr;
- ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
- &filter->action.pol);
+ ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
+ &filter->action.pol);
+ if (ret)
+ return ret;
}
block->count++;
if (list_empty(&block->rules)) {
list_add(&filter->list, &block->rules);
- return;
+ return 0;
}
list_for_each_safe(pos, n, &block->rules) {
@@ -958,6 +983,8 @@ static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
break;
}
list_add(&filter->list, pos->prev);
+
+ return 0;
}
static bool ocelot_vcap_filter_equal(const struct ocelot_vcap_filter *a,
@@ -1132,7 +1159,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct netlink_ext_ack *extack)
{
struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
- int i, index;
+ int i, index, ret;
if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1141,7 +1168,9 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
}
/* Add filter to the linked list */
- ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+ ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+ if (ret)
+ return ret;
/* Get the index of the inserted filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
@@ -1174,7 +1203,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
if (ocelot_vcap_filter_equal(filter, tmp)) {
if (tmp->block_id == VCAP_IS2 &&
tmp->action.police_ena)
- ocelot_vcap_policer_del(ocelot, block,
+ ocelot_vcap_policer_del(ocelot,
tmp->action.pol_ix);
list_del(pos);
@@ -1366,13 +1395,13 @@ int ocelot_vcap_init(struct ocelot *ocelot)
struct vcap_props *vcap = &ocelot->vcap[i];
INIT_LIST_HEAD(&block->rules);
- block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
ocelot_vcap_detect_constants(ocelot, vcap);
ocelot_vcap_init_one(ocelot, vcap);
}
INIT_LIST_HEAD(&ocelot->dummy_rules);
+ INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 38103b0255b0..cd3eb101f159 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -20,6 +20,9 @@
#include <soc/mscc/ocelot_hsio.h>
#include "ocelot.h"
+#define VSC7514_VCAP_POLICER_BASE 128
+#define VSC7514_VCAP_POLICER_MAX 191
+
static const u32 ocelot_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
REG(ANA_VLANMASK, 0x009004),
@@ -1129,6 +1132,10 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
+
+ ocelot->vcap_pol.base = VSC7514_VCAP_POLICER_BASE;
+ ocelot->vcap_pol.max = VSC7514_VCAP_POLICER_MAX;
+
ocelot->npi = -1;
err = ocelot_init(ocelot);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 5736fcdafd7a..83a5e29c836a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1704,7 +1704,9 @@ myri10ge_set_pauseparam(struct net_device *netdev,
static void
myri10ge_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 8709d700e15a..b16f7c830f9b 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -203,6 +203,7 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
const int prom_addr = ONBOARD_SONIC_PROM_BASE;
unsigned short val;
+ u8 addr[ETH_ALEN];
/*
* On NuBus boards we can sometimes look in the ROM resources.
@@ -213,7 +214,8 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
int i;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = SONIC_READ_PROM(i);
+ addr[i] = SONIC_READ_PROM(i);
+ eth_hw_addr_set(dev, addr);
if (!INVALID_MAC(dev->dev_addr))
return;
@@ -222,7 +224,8 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
* source has a rather long and detailed historical account of
* why this is so.
*/
- bit_reverse_addr(dev->dev_addr);
+ bit_reverse_addr(addr);
+ eth_hw_addr_set(dev, addr);
if (!INVALID_MAC(dev->dev_addr))
return;
@@ -243,14 +246,15 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
SONIC_WRITE(SONIC_CEP, 15);
val = SONIC_READ(SONIC_CAP2);
- dev->dev_addr[5] = val >> 8;
- dev->dev_addr[4] = val & 0xff;
+ addr[5] = val >> 8;
+ addr[4] = val & 0xff;
val = SONIC_READ(SONIC_CAP1);
- dev->dev_addr[3] = val >> 8;
- dev->dev_addr[2] = val & 0xff;
+ addr[3] = val >> 8;
+ addr[2] = val & 0xff;
val = SONIC_READ(SONIC_CAP0);
- dev->dev_addr[1] = val >> 8;
- dev->dev_addr[0] = val & 0xff;
+ addr[1] = val >> 8;
+ addr[0] = val & 0xff;
+ eth_hw_addr_set(dev, addr);
if (!INVALID_MAC(dev->dev_addr))
return;
@@ -355,13 +359,16 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
static int mac_sonic_nubus_ethernet_addr(struct net_device *dev,
unsigned long prom_addr, int id)
{
+ u8 addr[ETH_ALEN];
int i;
+
for(i = 0; i < 6; i++)
- dev->dev_addr[i] = SONIC_READ_PROM(i);
+ addr[i] = SONIC_READ_PROM(i);
/* Some of the addresses are bit-reversed */
if (id != MACSONIC_DAYNA)
- bit_reverse_addr(dev->dev_addr);
+ bit_reverse_addr(addr);
+ eth_hw_addr_set(dev, addr);
return 0;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index d1c32c65db05..d6ae44f164a9 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5461,8 +5461,11 @@ static int s2io_ethtool_set_led(struct net_device *dev,
return 0;
}
-static void s2io_ethtool_gringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+s2io_ethtool_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct s2io_nic *sp = netdev_priv(dev);
int i, tx_desc_count = 0, rx_desc_count = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 2af9faee96c5..0c60a436a8f2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -338,11 +338,6 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
- if (nfp_release_stats_entry(app, stats_cxt)) {
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
- err = -EINVAL;
- goto err_remove_rhash;
- }
err = -ENOENT;
goto err_remove_rhash;
}
@@ -359,21 +354,6 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
if (check_entry) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
- if (nfp_release_stats_entry(app, stats_cxt)) {
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
- err = -EINVAL;
- goto err_remove_mask;
- }
-
- if (!nfp_flow->pre_tun_rule.dev &&
- !nfp_check_mask_remove(app, nfp_flow->mask_data,
- nfp_flow->meta.mask_len,
- NULL, &new_mask_id)) {
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
- err = -EINVAL;
- goto err_remove_mask;
- }
-
err = -EEXIST;
goto err_remove_mask;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 850bfdf83d0a..6e38da4ad554 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -4097,7 +4097,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
- netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
+ netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index cf7882933993..e0c27471bcdb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -381,7 +381,9 @@ err_bad_set:
}
static void nfp_net_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -406,7 +408,9 @@ static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
}
static int nfp_net_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
u32 rxd_cnt, txd_cnt;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 369f6ae700c7..181ac8e789a3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -286,8 +286,8 @@ nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower)
if (repr->dst->u.port_info.lower_dev != lower)
return;
- netdev->gso_max_size = lower->gso_max_size;
- netdev->gso_max_segs = lower->gso_max_segs;
+ netif_set_gso_max_size(netdev, lower->gso_max_size);
+ netif_set_gso_max_segs(netdev, lower->gso_max_segs);
netdev_update_features(netdev);
}
@@ -381,7 +381,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
+ netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
netdev->features |= NETIF_F_LLTX;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 9b530d7509a4..660013f716d4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -4651,7 +4651,10 @@ static int nv_nway_reset(struct net_device *dev)
return ret;
}
-static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static void nv_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fe_priv *np = netdev_priv(dev);
@@ -4662,7 +4665,10 @@ static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* r
ring->tx_pending = np->tx_ring_size;
}
-static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static int nv_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 660b07cb5b92..84cc79e928c8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -270,9 +270,13 @@ static int pch_gbe_nway_reset(struct net_device *netdev)
* pch_gbe_get_ringparam - Report ring sizes
* @netdev: Network interface device structure
* @ring: Ring param structure
+ * @kernel_ring: Ring external param structure
+ * @extack: netlink handle
*/
static void pch_gbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_tx_ring *txdr = adapter->tx_ring;
@@ -288,12 +292,16 @@ static void pch_gbe_get_ringparam(struct net_device *netdev,
* pch_gbe_set_ringparam - Set ring sizes
* @netdev: Network interface device structure
* @ring: Ring param structure
+ * @kernel_ring: Ring external param structure
+ * @extack: netlink handle
* Returns
* 0: Successful.
* Negative value: Failed.
*/
static int pch_gbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_tx_ring *txdr, *tx_old;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index e1a304886a3c..4c7e0c991105 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -69,7 +69,9 @@ pasemi_mac_ethtool_set_msglevel(struct net_device *netdev,
static void
pasemi_mac_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pasemi_mac *mac = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index c54d735b9e2e..386a5cf1e224 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -512,7 +512,9 @@ static int ionic_set_coalesce(struct net_device *netdev,
}
static void ionic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -523,7 +525,9 @@ static void ionic_get_ringparam(struct net_device *netdev,
}
static int ionic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_queue_params qparam;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index a075643f5826..3c4a84ea6321 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -392,7 +392,9 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
static void
netxen_nic_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netxen_adapter *adapter = netdev_priv(dev);
@@ -430,7 +432,9 @@ netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
static int
netxen_nic_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netxen_adapter *adapter = netdev_priv(dev);
u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 452494f8c298..65e20693c549 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1036,12 +1036,12 @@ static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
- kfree(p_mngr->acquired[type].cid_map);
+ bitmap_free(p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
for (vf = 0; vf < MAX_NUM_VFS; vf++) {
- kfree(p_mngr->acquired_vf[type][vf].cid_map);
+ bitmap_free(p_mngr->acquired_vf[type][vf].cid_map);
p_mngr->acquired_vf[type][vf].max_count = 0;
p_mngr->acquired_vf[type][vf].start_cid = 0;
}
@@ -1054,15 +1054,10 @@ qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
u32 cid_start,
u32 cid_count, struct qed_cid_acquired_map *p_map)
{
- u32 size;
-
if (!cid_count)
return 0;
- size = DIV_ROUND_UP(cid_count,
- sizeof(unsigned long) * BITS_PER_BYTE) *
- sizeof(unsigned long);
- p_map->cid_map = kzalloc(size, GFP_KERNEL);
+ p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL);
if (!p_map->cid_map)
return -ENOMEM;
@@ -1216,7 +1211,6 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
struct qed_cid_acquired_map *p_map;
struct qed_conn_type_cfg *p_cfg;
int type;
- u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
@@ -1225,11 +1219,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
p_cfg = &p_mngr->conn_cfg[type];
if (p_cfg->cid_count) {
p_map = &p_mngr->acquired[type];
- len = DIV_ROUND_UP(p_map->max_count,
- sizeof(unsigned long) *
- BITS_PER_BYTE) *
- sizeof(unsigned long);
- memset(p_map->cid_map, 0, len);
+ bitmap_zero(p_map->cid_map, p_map->max_count);
}
if (!p_cfg->cids_per_vf)
@@ -1237,11 +1227,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
for (vf = 0; vf < MAX_NUM_VFS; vf++) {
p_map = &p_mngr->acquired_vf[type][vf];
- len = DIV_ROUND_UP(p_map->max_count,
- sizeof(unsigned long) *
- BITS_PER_BYTE) *
- sizeof(unsigned long);
- memset(p_map->cid_map, 0, len);
+ bitmap_zero(p_map->cid_map, p_map->max_count);
}
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8284c4c1528f..100c9c52c20b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -888,7 +888,9 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
}
static void qede_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -899,7 +901,9 @@ static void qede_get_ringparam(struct net_device *dev,
}
static int qede_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct qede_dev *edev = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 065e9004598e..e113fbd56e86 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -10,6 +10,7 @@
#include <linux/bpf_trace.h>
#include <net/udp_tunnel.h>
#include <linux/ip.h>
+#include <net/gro.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <linux/if_ether.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index fc364b4ab6eb..e10fe071a40f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -632,7 +632,9 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
static void
qlcnic_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
@@ -663,7 +665,9 @@ qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
static int
qlcnic_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u16 num_rxd, num_jumbo_rxd, num_txd;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
index f72e13b83869..f502db9cdea9 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -133,7 +133,9 @@ static int emac_nway_reset(struct net_device *netdev)
}
static void emac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct emac_adapter *adpt = netdev_priv(netdev);
@@ -144,7 +146,9 @@ static void emac_get_ringparam(struct net_device *netdev,
}
static int emac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct emac_adapter *adpt = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index d59fff2fbcc6..792ce9a323cd 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -246,7 +246,9 @@ qcaspi_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
}
static void
-qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct qcaspi *qca = netdev_priv(dev);
@@ -257,7 +259,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
}
static int
-qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct qcaspi *qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 4f39f843bb3a..ad7b9e9d7f95 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1388,7 +1388,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
}
static void cp_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
ring->rx_max_pending = CP_RX_RING_SIZE;
ring->tx_max_pending = CP_TX_RING_SIZE;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 86c44bc5f73f..3d6843332c77 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1873,7 +1873,9 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
}
static void rtl8169_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *data)
+ struct ethtool_ringparam *data,
+ struct kernel_ethtool_ringparam *kernel_data,
+ struct netlink_ext_ack *extack)
{
data->rx_max_pending = NUM_RX_DESC;
data->rx_pending = NUM_RX_DESC;
@@ -1969,8 +1971,11 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
/* 8125A family. */
- { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
- { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+ { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 },
+ /* It seems only XID 609 made it to the mass market.
+ * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
+ * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+ */
/* RTL8117 */
{ 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
@@ -1978,17 +1983,26 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168EP family. */
{ 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
- { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
- { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
+ * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
+ */
/* 8168H family. */
{ 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
- { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
+ */
/* 8168G family. */
{ 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
{ 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
- { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
+ */
{ 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
/* 8168F family. */
@@ -5272,12 +5286,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* Disable ASPM L1 as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users.
- */
- rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
- tp->aspm_manageable = !rc;
-
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
@@ -5320,6 +5328,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mac_version = chipset;
+ /* Disable ASPM L1 as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users.
+ * Chips from RTL8168h partially have issues with L1.2, but seem
+ * to work fine with L1 and L1.1.
+ */
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+ else
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ tp->aspm_manageable = !rc;
+
tp->dash_type = rtl_check_dash(tp);
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
@@ -5375,12 +5394,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
- dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
- dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
+ netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V2);
+ netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V2);
} else {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
- dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
- dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
+ netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V1);
+ netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V1);
}
dev->hw_features |= NETIF_F_RXALL;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b4c597f4040c..ce09bd45527e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -30,8 +30,7 @@
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/reset.h>
-
-#include <asm/div64.h>
+#include <linux/math64.h>
#include "ravb.h"
@@ -1605,7 +1604,9 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
static void ravb_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -1616,7 +1617,9 @@ static void ravb_get_ringparam(struct net_device *ndev,
}
static int ravb_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -2488,8 +2491,7 @@ static int ravb_set_gti(struct net_device *ndev)
if (!rate)
return -EINVAL;
- inc = 1000000000ULL << 20;
- do_div(inc, rate);
+ inc = div64_ul(1000000000ULL << 20, rate);
if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a3fbb2221c9a..223626290ce0 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2296,7 +2296,9 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
static void sh_eth_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2307,7 +2309,9 @@ static void sh_eth_get_ringparam(struct net_device *ndev,
}
static int sh_eth_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index ba4062881eed..b620470c7905 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1995,17 +1995,6 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
return err ? -EOPNOTSUPP : 0;
}
-static int rocker_port_change_proto_down(struct net_device *dev,
- bool proto_down)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
-
- if (rocker_port->dev->flags & IFF_UP)
- rocker_port_set_enable(rocker_port, !proto_down);
- rocker_port->dev->proto_down = proto_down;
- return 0;
-}
-
static void rocker_port_neigh_destroy(struct net_device *dev,
struct neighbour *n)
{
@@ -2037,7 +2026,6 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_set_mac_address = rocker_port_set_mac_address,
.ndo_change_mtu = rocker_port_change_mtu,
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
- .ndo_change_proto_down = rocker_port_change_proto_down,
.ndo_neigh_destroy = rocker_port_neigh_destroy,
.ndo_get_port_parent_id = rocker_port_get_port_parent_id,
};
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 835c838b7dfa..5dba4125d953 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -20,8 +20,11 @@
/* This is the maximum number of descriptor rings supported by the QDMA */
#define EFX_EF100_MAX_DMAQ_SIZE 16384UL
-static void ef100_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static void
+ef100_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 6aa81229b68a..5a6fed33a1d7 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -993,11 +993,11 @@ static int ef100_process_design_param(struct efx_nic *efx,
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
- efx->net_dev->gso_max_size = nic_data->tso_max_payload_len;
+ netif_set_gso_max_size(efx->net_dev, nic_data->tso_max_payload_len);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
- efx->net_dev->gso_max_segs = nic_data->tso_max_payload_num_segs;
+ netif_set_gso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
@@ -1122,7 +1122,7 @@ static int ef100_probe_main(struct efx_nic *efx)
nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
- net_dev->gso_max_segs = ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT;
+ netif_set_gso_max_segs(net_dev, ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
/* Read design parameters */
rc = ef100_check_design_params(efx);
if (rc) {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6960a2fe2b53..a8c252e2b252 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -709,7 +709,7 @@ static int efx_register_netdev(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->ethtool_ops = &efx_ethtool_ops;
- net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
+ netif_set_gso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index e002ce21788d..48506373721a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -157,8 +157,11 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
return 0;
}
-static void efx_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static void
+efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -168,8 +171,11 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
ring->tx_pending = efx->txq_entries;
}
-static int efx_ethtool_set_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static int
+efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
u32 txq_entries;
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 314c9c69eb0e..60c595ef7589 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2267,7 +2267,7 @@ static int ef4_register_netdev(struct ef4_nic *efx)
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &ef4_netdev_ops;
net_dev->ethtool_ops = &ef4_ethtool_ops;
- net_dev->gso_max_segs = EF4_TSO_MAX_SEGS;
+ netif_set_gso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
net_dev->min_mtu = EF4_MIN_MTU;
net_dev->max_mtu = EF4_MAX_MTU;
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 137e8a7aeaa1..907254b36663 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -637,8 +637,11 @@ static int ef4_ethtool_set_coalesce(struct net_device *net_dev,
return 0;
}
-static void ef4_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static void
+ef4_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ef4_nic *efx = netdev_priv(net_dev);
@@ -648,8 +651,11 @@ static void ef4_ethtool_get_ringparam(struct net_device *net_dev,
ring->tx_pending = efx->txq_entries;
}
-static int ef4_ethtool_set_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static int
+ef4_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ef4_nic *efx = netdev_priv(net_dev);
u32 txq_entries;
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 0ce403fa5f1a..af661c65ffe2 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -856,6 +856,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
word configuration_register;
word memory_info_register;
word memory_cfg_register;
+ u8 addr[ETH_ALEN];
/* Grab the region so that no one else tries to probe our ioports. */
if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME))
@@ -924,9 +925,10 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
word address;
address = inw( ioaddr + ADDR0 + i );
- dev->dev_addr[ i + 1] = address >> 8;
- dev->dev_addr[ i ] = address & 0xFF;
+ addr[i + 1] = address >> 8;
+ addr[i] = address & 0xFF;
}
+ eth_hw_addr_set(dev, addr);
/* get the memory information */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 5c74b6279d69..8fea48e477e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -447,6 +447,24 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed)
ethqos_configure(ethqos);
}
+static int ethqos_clks_config(void *priv, bool enabled)
+{
+ struct qcom_ethqos *ethqos = priv;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(ethqos->rgmii_clk);
+ if (ret) {
+ dev_err(&ethqos->pdev->dev, "rgmii_clk enable failed\n");
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(ethqos->rgmii_clk);
+ }
+
+ return ret;
+}
+
static int qcom_ethqos_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -466,6 +484,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
}
+ plat_dat->clks_config = ethqos_clks_config;
+
ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL);
if (!ethqos) {
ret = -ENOMEM;
@@ -489,7 +509,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
goto err_mem;
}
- ret = clk_prepare_enable(ethqos->rgmii_clk);
+ ret = ethqos_clks_config(ethqos, true);
if (ret)
goto err_mem;
@@ -512,7 +532,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return ret;
err_clk:
- clk_disable_unprepare(ethqos->rgmii_clk);
+ ethqos_clks_config(ethqos, false);
err_mem:
stmmac_remove_config_dt(pdev, plat_dat);
@@ -530,7 +550,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
return -ENODEV;
ret = stmmac_pltfr_remove(pdev);
- clk_disable_unprepare(ethqos->rgmii_clk);
+ ethqos_clks_config(ethqos, false);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5f129733aabd..689f3cdb2458 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -317,8 +317,8 @@ void stmmac_set_ethtool_ops(struct net_device *netdev);
int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_open(struct net_device *dev);
-int stmmac_release(struct net_device *dev);
+int stmmac_xdp_open(struct net_device *dev);
+void stmmac_xdp_release(struct net_device *dev);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
int stmmac_dvr_remove(struct device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d89455803bed..eead45369eb9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -463,7 +463,9 @@ static int stmmac_nway_reset(struct net_device *dev)
}
static void stmmac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(netdev);
@@ -474,7 +476,9 @@ static void stmmac_get_ringparam(struct net_device *netdev,
}
static int stmmac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending < DMA_MIN_RX_SIZE ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 748195697e5a..89a6c35e2546 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -518,14 +518,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
return true;
}
-static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
-{
- /* Correct the clk domain crossing(CDC) error */
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
- return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
- return 0;
-}
-
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @p : descriptor pointer
@@ -557,7 +549,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
}
if (found) {
- ns -= stmmac_cdc_adjust(priv);
+ ns -= priv->plat->cdc_error_adj;
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -594,7 +586,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
- ns -= stmmac_cdc_adjust(priv);
+ ns -= priv->plat->cdc_error_adj;
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
@@ -2390,7 +2382,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
bool work_done = true;
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
budget = min(budget, stmmac_tx_avail(priv, queue));
@@ -3673,7 +3665,7 @@ static int stmmac_request_irq(struct net_device *dev)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-int stmmac_open(struct net_device *dev)
+static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface;
@@ -3797,7 +3789,7 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
* Description:
* This is the stop entry point of the driver.
*/
-int stmmac_release(struct net_device *dev)
+static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
@@ -4689,7 +4681,7 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
if (res == STMMAC_XDP_TX)
@@ -6326,7 +6318,7 @@ static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
for (i = 0; i < num_frames; i++) {
int res;
@@ -6462,6 +6454,140 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
}
+void stmmac_xdp_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ /* Disable NAPI process */
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ /* Stop TX/RX DMA channels */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ /* set trans_start so we don't get spurious
+ * watchdogs during reset
+ */
+ netif_trans_update(dev);
+ netif_carrier_off(dev);
+}
+
+int stmmac_xdp_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_cnt, tx_cnt);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 buf_size;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ ret = alloc_dma_desc_resources(priv);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto dma_desc_error;
+ }
+
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* Adjust Split header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_cnt; chan++) {
+ rx_q = &priv->rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ tx_q = &priv->tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Start Rx & Tx DMA Channels */
+ stmmac_start_all_dma(priv);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ /* Enable NAPI process*/
+ stmmac_enable_all_queues(priv);
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+irq_error:
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ free_dma_desc_resources(priv);
+dma_desc_error:
+ return ret;
+}
+
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 580cc035536b..e14c97c04317 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -309,6 +309,11 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
if (priv->plat->ptp_max_adj)
stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+ /* Calculate the clock domain crossing (CDC) error if necessary */
+ priv->plat->cdc_error_adj = 0;
+ if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
+
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
index 2a616c6f7cd0..9d4d8c3dad0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -119,7 +119,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
need_update = !!priv->xdp_prog != !!prog;
if (if_running && need_update)
- stmmac_release(dev);
+ stmmac_xdp_release(dev);
old_prog = xchg(&priv->xdp_prog, prog);
if (old_prog)
@@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
if (if_running && need_update)
- stmmac_open(dev);
+ stmmac_xdp_open(dev);
return 0;
}
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 0775a5542f2f..89bc1602661c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2245,9 +2245,13 @@ static inline int bdx_tx_fifo_size_to_packets(int tx_size)
* bdx_get_ringparam - report ring sizes
* @netdev
* @ring
+ * @kernel_ring
+ * @extack
*/
static void
-bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct bdx_priv *priv = netdev_priv(netdev);
@@ -2262,9 +2266,13 @@ bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
* bdx_set_ringparam - set ring sizes
* @netdev
* @ring
+ * @kernel_ring
+ * @extack
*/
static int
-bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct bdx_priv *priv = netdev_priv(netdev);
int rx_size = 0;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index b05de9b61ad6..d45b6bb86f0b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -453,8 +453,11 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
}
-static void am65_cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+am65_cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index c092cb61416a..750cea23e9cd 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -345,7 +345,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
netif_txq = netdev_get_tx_queue(ndev, txqueue);
tx_chn = &common->tx_chns[txqueue];
- trans_start = netif_txq->trans_start;
+ trans_start = READ_ONCE(netif_txq->trans_start);
netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
txqueue,
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 7449436fc87c..bef5e68dac31 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -817,7 +817,9 @@ static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
static void cpmac_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -833,7 +835,9 @@ static void cpmac_get_ringparam(struct net_device *dev,
}
static int cpmac_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct cpmac_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 158c8d3793f4..aa42141be3c0 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -658,7 +658,9 @@ err:
}
void cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
@@ -671,7 +673,9 @@ void cpsw_get_ringparam(struct net_device *ndev,
}
int cpsw_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
int descs_num, ret;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index ecc2a6b7e28f..c99dd9735087 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -710,20 +710,26 @@ int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
struct cpsw_priv *priv = netdev_priv(dev);
struct cpsw_common *cpsw = priv->cpsw;
int slave_no = cpsw_slave_index(cpsw, priv);
+ struct phy_device *phy;
if (!netif_running(dev))
return -EINVAL;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
+ phy = cpsw->slaves[slave_no].phy;
+
+ if (!phy_has_hwtstamp(phy)) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
+ }
}
- if (!cpsw->slaves[slave_no].phy)
- return -EOPNOTSUPP;
- return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
}
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 435668ee542d..f33c882eb70e 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -491,9 +491,13 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering);
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack);
int cpsw_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering);
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack);
int cpsw_set_channels_common(struct net_device *ndev,
struct ethtool_channels *chs,
cpdma_handler_fn rx_handler);
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 54f655a68148..93110dba0bfa 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -110,7 +110,9 @@ spider_net_ethtool_nway_reset(struct net_device *netdev)
static void
spider_net_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct spider_net_card *card = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index e7065c9a8e38..b900ab5aef2a 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1276,8 +1276,11 @@ static const struct attribute_group temac_attr_group = {
* ethtool support
*/
-static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+ll_temac_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct temac_local *lp = netdev_priv(ndev);
@@ -1291,8 +1294,11 @@ static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
ering->tx_pending = lp->tx_bd_num;
}
-static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static int
+ll_temac_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct temac_local *lp = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9b068b81ae09..e39356364f33 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1323,8 +1323,11 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
}
-static void axienet_ethtools_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+axienet_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
@@ -1338,8 +1341,11 @@ static void axienet_ethtools_get_ringparam(struct net_device *ndev,
ering->tx_pending = lp->tx_bd_num;
}
-static int axienet_ethtools_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static int
+axienet_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
@@ -1503,65 +1509,6 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.nway_reset = axienet_ethtools_nway_reset,
};
-static void axienet_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* Only support the mode we are configured for */
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- if (lp->switch_x_sgmii)
- break;
- fallthrough;
- default:
- if (state->interface != lp->phy_mode) {
- netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
- phy_modes(state->interface),
- phy_modes(lp->phy_mode));
- linkmode_zero(supported);
- return;
- }
- }
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 1000baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_MII:
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10baseT_Full);
- fallthrough;
- default:
- break;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void axienet_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
@@ -1687,7 +1634,7 @@ static void axienet_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops axienet_phylink_ops = {
- .validate = axienet_validate,
+ .validate = phylink_generic_validate,
.mac_pcs_get_state = axienet_mac_pcs_get_state,
.mac_an_restart = axienet_mac_an_restart,
.mac_prepare = axienet_mac_prepare,
@@ -2104,6 +2051,16 @@ static int axienet_probe(struct platform_device *pdev)
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD;
+
+ __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
+ if (lp->switch_x_sgmii) {
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ lp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ lp->phylink_config.supported_interfaces);
+ }
lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
lp->phy_mode,
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 6b68a53f1b38..72c31f0013ad 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -1846,10 +1846,10 @@ void smt_swap_para(struct smt_header *sm, int len, int direction)
}
}
+
static void smt_string_swap(char *data, const char *format, int len)
{
const char *open_paren = NULL ;
- int x ;
while (len > 0 && *format) {
switch (*format) {
@@ -1876,19 +1876,13 @@ static void smt_string_swap(char *data, const char *format, int len)
len-- ;
break ;
case 's' :
- x = data[0] ;
- data[0] = data[1] ;
- data[1] = x ;
+ swap(data[0], data[1]) ;
data += 2 ;
len -= 2 ;
break ;
case 'l' :
- x = data[0] ;
- data[0] = data[3] ;
- data[3] = x ;
- x = data[1] ;
- data[1] = data[2] ;
- data[2] = x ;
+ swap(data[0], data[3]) ;
+ swap(data[1], data[2]) ;
data += 4 ;
len -= 4 ;
break ;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 1ab94b5f9bbf..c1fdd721a730 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -17,6 +17,7 @@
#include <net/gro_cells.h>
#include <net/rtnetlink.h>
#include <net/geneve.h>
+#include <net/gro.h>
#include <net/protocol.h>
#define GENEVE_NETDEV_VER "0.6"
@@ -516,18 +517,15 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
type = gh->proto_type;
- rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (!ptype)
- goto out_unlock;
+ goto out;
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
@@ -547,13 +545,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
gh_len = geneve_hlen(gh);
type = gh->proto_type;
- rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
- rcu_read_unlock();
-
skb_set_inner_mac_header(skb, nhoff + gh_len);
return err;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 396bc1c204e6..5086cd07d1ed 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -155,7 +155,7 @@ static void free_netvsc_device(struct rcu_head *head)
kfree(nvdev->extension);
vfree(nvdev->recv_buf);
vfree(nvdev->send_buf);
- kfree(nvdev->send_section_map);
+ bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
@@ -336,7 +336,6 @@ static int netvsc_init_buf(struct hv_device *device,
struct net_device *ndev = hv_get_drvdata(device);
struct nvsp_message *init_packet;
unsigned int buf_size;
- size_t map_words;
int i, ret = 0;
/* Get receive buffer area. */
@@ -528,10 +527,9 @@ static int netvsc_init_buf(struct hv_device *device,
net_device->send_section_size, net_device->send_section_cnt);
/* Setup state for managing the send buffer. */
- map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
-
- net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
- if (net_device->send_section_map == NULL) {
+ net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
+ GFP_KERNEL);
+ if (!net_device->send_section_map) {
ret = -ENOMEM;
goto cleanup;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7e66ae1d2a59..efa963b7af54 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1858,7 +1858,9 @@ static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
}
static void netvsc_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
@@ -1870,7 +1872,9 @@ static void netvsc_get_ringparam(struct net_device *ndev,
}
static int netvsc_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index a2fcdb1abdb9..bc981043cc80 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -93,6 +93,7 @@
#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
+#define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
@@ -339,10 +340,10 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
* completion to be signaled. Returns true if the command completes
* or false if it times out.
*/
-static bool
-gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
+static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
{
unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+ struct completion *completion = &gsi->completion;
reinit_completion(completion);
@@ -366,8 +367,6 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
enum gsi_evt_cmd_opcode opcode)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
bool timeout;
u32 val;
@@ -378,7 +377,7 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
gsi_irq_ev_ctrl_disable(gsi);
@@ -478,7 +477,6 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
- struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
@@ -490,7 +488,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
gsi_irq_ch_ctrl_disable(gsi);
@@ -1074,13 +1072,10 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
while (channel_mask) {
u32 channel_id = __ffs(channel_mask);
- struct gsi_channel *channel;
channel_mask ^= BIT(channel_id);
- channel = &gsi->channel[channel_id];
-
- complete(&channel->completion);
+ complete(&gsi->completion);
}
}
@@ -1094,13 +1089,10 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
- struct gsi_evt_ring *evt_ring;
event_mask ^= BIT(evt_ring_id);
- evt_ring = &gsi->evt_ring[evt_ring_id];
-
- complete(&evt_ring->completion);
+ complete(&gsi->completion);
}
}
@@ -1110,7 +1102,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
{
if (code == GSI_OUT_OF_RESOURCES) {
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
- complete(&gsi->channel[channel_id].completion);
+ complete(&gsi->completion);
return;
}
@@ -1127,7 +1119,7 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
u32 channel_id = gsi_channel_id(evt_ring->channel);
- complete(&evt_ring->completion);
+ complete(&gsi->completion);
dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
channel_id);
return;
@@ -1171,18 +1163,23 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
u32 result;
u32 val;
- /* This interrupt is used to handle completions of the two GENERIC
- * GSI commands. We use these to allocate and halt channels on
- * the modem's behalf due to a hardware quirk on IPA v4.2. Once
- * allocated, the modem "owns" these channels, and as a result we
- * have no way of knowing the channel's state at any given time.
+ /* This interrupt is used to handle completions of GENERIC GSI
+ * commands. We use these to allocate and halt channels on the
+ * modem's behalf due to a hardware quirk on IPA v4.2. The modem
+ * "owns" channels even when the AP allocates them, and have no
+ * way of knowing whether a modem channel's state has been changed.
+ *
+ * We also use GENERIC commands to enable/disable channel flow
+ * control for IPA v4.2+.
*
* It is recommended that we halt the modem channels we allocated
* when shutting down, but it's possible the channel isn't running
* at the time we issue the HALT command. We'll get an error in
* that case, but it's harmless (the channel is already halted).
+ * Similarly, we could get an error back when updating flow control
+ * on a channel because it's not in the proper state.
*
- * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
+ * In either case, we silently ignore a CHANNEL_NOT_RUNNING error
* if we receive it.
*/
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
@@ -1648,19 +1645,25 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
}
+/* We use generic commands only to operate on modem channels. We don't have
+ * the ability to determine channel state for a modem channel, so we simply
+ * issue the command and wait for it to complete.
+ */
static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
- enum gsi_generic_cmd_opcode opcode)
+ enum gsi_generic_cmd_opcode opcode,
+ u8 params)
{
- struct completion *completion = &gsi->completion;
bool timeout;
u32 val;
- /* The error global interrupt type is always enabled (until we
- * teardown), so we won't change that. A generic EE command
- * completes with a GSI global interrupt of type GP_INT1. We
- * only perform one generic command at a time (to allocate or
- * halt a modem channel) and only from this function. So we
- * enable the GP_INT1 IRQ type here while we're expecting it.
+ /* The error global interrupt type is always enabled (until we tear
+ * down), so we will keep it enabled.
+ *
+ * A generic EE command completes with a GSI global interrupt of
+ * type GP_INT1. We only perform one generic command at a time
+ * (to allocate, halt, or enable/disable flow control on a modem
+ * channel), and only from this function. So we enable the GP_INT1
+ * IRQ type here, and disable it again after the command completes.
*/
val = BIT(ERROR_INT) | BIT(GP_INT1);
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
@@ -1674,8 +1677,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
+ val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
- timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
/* Disable the GP_INT1 IRQ type again */
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
@@ -1692,7 +1696,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
{
return gsi_generic_command(gsi, channel_id,
- GSI_GENERIC_ALLOCATE_CHANNEL);
+ GSI_GENERIC_ALLOCATE_CHANNEL, 0);
}
static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
@@ -1702,7 +1706,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
do
ret = gsi_generic_command(gsi, channel_id,
- GSI_GENERIC_HALT_CHANNEL);
+ GSI_GENERIC_HALT_CHANNEL, 0);
while (ret == -EAGAIN && retries--);
if (ret)
@@ -1710,6 +1714,32 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
ret, channel_id);
}
+/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
+void
+gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
+{
+ u32 retries = 0;
+ u32 command;
+ int ret;
+
+ command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
+ : GSI_GENERIC_DISABLE_FLOW_CONTROL;
+ /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
+ * is underway. In this case we need to retry the command.
+ */
+ if (!enable && gsi->version >= IPA_VERSION_4_11)
+ retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
+
+ do
+ ret = gsi_generic_command(gsi, channel_id, command, 0);
+ while (ret == -EAGAIN && retries--);
+
+ if (ret)
+ dev_err(gsi->dev,
+ "error %d %sabling mode channel %u flow control\n",
+ ret, enable ? "en" : "dis", channel_id);
+}
+
/* Setup function for channels */
static int gsi_channel_setup(struct gsi *gsi)
{
@@ -1975,18 +2005,6 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
-/* Init function for event rings; there is no gsi_evt_ring_exit() */
-static void gsi_evt_ring_init(struct gsi *gsi)
-{
- u32 evt_ring_id = 0;
-
- gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
- gsi->ieob_enabled_bitmap = 0;
- do
- init_completion(&gsi->evt_ring[evt_ring_id].completion);
- while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
-}
-
static bool gsi_channel_data_valid(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data)
{
@@ -2069,7 +2087,6 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->tlv_count = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
- init_completion(&channel->completion);
ret = gsi_channel_evt_ring_init(channel);
if (ret)
@@ -2129,7 +2146,8 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
/* IPA v4.2 requires the AP to allocate channels for the modem */
modem_alloc = gsi->version == IPA_VERSION_4_2;
- gsi_evt_ring_init(gsi); /* No matching exit required */
+ gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
+ gsi->ieob_enabled_bitmap = 0;
/* The endpoint data array is indexed by endpoint name */
for (i = 0; i < count; i++) {
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 88b80dc3db79..9cc657658811 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -101,6 +101,7 @@ enum gsi_channel_state {
GSI_CHANNEL_STATE_STARTED = 0x2,
GSI_CHANNEL_STATE_STOPPED = 0x3,
GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_FLOW_CONTROLLED = 0x5, /* IPA v4.2-v4.9 */
GSI_CHANNEL_STATE_ERROR = 0xf,
};
@@ -114,8 +115,6 @@ struct gsi_channel {
u16 tre_count;
u16 event_count;
- struct completion completion; /* signals channel command completion */
-
struct gsi_ring tre_ring;
u32 evt_ring_id;
@@ -141,28 +140,27 @@ enum gsi_evt_ring_state {
struct gsi_evt_ring {
struct gsi_channel *channel;
- struct completion completion; /* signals event ring state changes */
struct gsi_ring ring;
};
struct gsi {
struct device *dev; /* Same as IPA device */
enum ipa_version version;
- struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt_raw; /* I/O mapped address range */
void __iomem *virt; /* Adjusted for most registers */
u32 irq;
u32 channel_count;
u32 evt_ring_count;
- struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
- struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
u32 event_bitmap; /* allocated event rings */
u32 modem_channel_bitmap; /* modem channels to allocate */
u32 type_enabled_bitmap; /* GSI IRQ types enabled */
u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
- struct completion completion; /* for global EE commands */
int result; /* Negative errno (generic commands) */
+ struct completion completion; /* Signals GSI command completion */
struct mutex mutex; /* protects commands, programming */
+ struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
+ struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
+ struct net_device dummy_dev; /* needed for NAPI */
};
/**
@@ -219,6 +217,15 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id);
int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
/**
+ * gsi_modem_channel_flow_control() - Set channel flow control state (IPA v4.2+)
+ * @gsi: GSI pointer returned by gsi_setup()
+ * @channel_id: Modem TX channel to control
+ * @enable: Whether to enable flow control (i.e., prevent flow)
+ */
+void gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id,
+ bool enable);
+
+/**
* gsi_channel_reset() - Reset an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to be reset
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index bf9593d9eaea..8906f4381032 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -313,11 +313,15 @@ enum gsi_evt_cmd_opcode {
#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
#define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10)
+#define GENERIC_PARAMS_FMASK GENMASK(31, 24) /* IPA v4.11+ */
/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
enum gsi_generic_cmd_opcode {
GSI_GENERIC_HALT_CHANNEL = 0x1,
GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+ GSI_GENERIC_ENABLE_FLOW_CONTROL = 0x3, /* IPA v4.2+ */
+ GSI_GENERIC_DISABLE_FLOW_CONTROL = 0x4, /* IPA v4.2+ */
+ GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */
};
/* The next register is present for IPA v3.5.1 and above */
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 03a170993420..49d9a077d037 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -237,7 +237,8 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
}
/* suspend_delay represents suspend for RX, delay for TX endpoints.
- * Note that suspend is not supported starting with IPA v4.0.
+ * Note that suspend is not supported starting with IPA v4.0, and
+ * delay mode should not be used starting with IPA v4.2.
*/
static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
@@ -248,11 +249,8 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
u32 mask;
u32 val;
- /* Suspend is not supported for IPA v4.0+. Delay doesn't work
- * correctly on IPA v4.2.
- */
if (endpoint->toward_ipa)
- WARN_ON(ipa->version == IPA_VERSION_4_2);
+ WARN_ON(ipa->version >= IPA_VERSION_4_2);
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
@@ -270,15 +268,15 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
return state;
}
-/* We currently don't care what the previous state was for delay mode */
+/* We don't care what the previous state was for delay mode */
static void
ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{
+ /* Delay mode should not be used for IPA v4.2+ */
+ WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
WARN_ON(!endpoint->toward_ipa);
- /* Delay mode doesn't work properly for IPA v4.2 */
- if (endpoint->ipa->version != IPA_VERSION_4_2)
- (void)ipa_endpoint_init_ctrl(endpoint, enable);
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
}
static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
@@ -355,26 +353,29 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
return suspended;
}
-/* Enable or disable delay or suspend mode on all modem endpoints */
+/* Put all modem RX endpoints into suspend mode, and stop transmission
+ * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
+ * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
+ * control instead.
+ */
void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{
u32 endpoint_id;
- /* DELAY mode doesn't work correctly on IPA v4.2 */
- if (ipa->version == IPA_VERSION_4_2)
- return;
-
for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
if (endpoint->ee_id != GSI_EE_MODEM)
continue;
- /* Set TX delay mode or RX suspend mode */
- if (endpoint->toward_ipa)
+ if (!endpoint->toward_ipa)
+ (void)ipa_endpoint_program_suspend(endpoint, enable);
+ else if (ipa->version < IPA_VERSION_4_2)
ipa_endpoint_program_delay(endpoint, enable);
else
- (void)ipa_endpoint_program_suspend(endpoint, enable);
+ gsi_modem_channel_flow_control(&ipa->gsi,
+ endpoint->channel_id,
+ enable);
}
}
@@ -860,7 +861,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
}
static void
-ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
+ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
u32 offset;
@@ -874,6 +875,19 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+/* Assumes HOL_BLOCK is in disabled state */
+static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
+ u32 microseconds)
+{
+ ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
+ ipa_endpoint_init_hol_block_en(endpoint, true);
+}
+
+static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
+{
+ ipa_endpoint_init_hol_block_en(endpoint, false);
+}
+
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
{
u32 i;
@@ -884,9 +898,8 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
- ipa_endpoint_init_hol_block_enable(endpoint, false);
- ipa_endpoint_init_hol_block_timer(endpoint, 0);
- ipa_endpoint_init_hol_block_enable(endpoint, true);
+ ipa_endpoint_init_hol_block_disable(endpoint);
+ ipa_endpoint_init_hol_block_enable(endpoint, 0);
}
}
@@ -1141,18 +1154,19 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
{
struct sk_buff *skb;
+ if (!endpoint->netdev)
+ return;
+
skb = __dev_alloc_skb(len, GFP_ATOMIC);
- if (skb) {
- skb_put(skb, len);
- memcpy(skb->data, data, len);
- skb->truesize += extra;
- }
+ if (!skb)
+ return;
+
+ /* Copy the data into the socket buffer and receive it */
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+ skb->truesize += extra;
- /* Now receive it, or drop it if there's no netdev */
- if (endpoint->netdev)
- ipa_modem_skb_rx(endpoint->netdev, skb);
- else if (skb)
- dev_kfree_skb_any(skb);
+ ipa_modem_skb_rx(endpoint->netdev, skb);
}
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
@@ -1519,10 +1533,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
{
- if (endpoint->toward_ipa)
- ipa_endpoint_program_delay(endpoint, false);
- else
+ if (endpoint->toward_ipa) {
+ /* Newer versions of IPA use GSI channel flow control
+ * instead of endpoint DELAY mode to prevent sending data.
+ * Flow control is disabled for newly-allocated channels,
+ * and we can assume flow control is not (ever) enabled
+ * for AP TX channels.
+ */
+ if (endpoint->ipa->version < IPA_VERSION_4_2)
+ ipa_endpoint_program_delay(endpoint, false);
+ } else {
+ /* Ensure suspend mode is off on all AP RX endpoints */
(void)ipa_endpoint_program_suspend(endpoint, false);
+ }
ipa_endpoint_init_cfg(endpoint);
ipa_endpoint_init_nat(endpoint);
ipa_endpoint_init_hdr(endpoint);
@@ -1530,6 +1553,8 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_hdr_metadata_mask(endpoint);
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
+ if (!endpoint->toward_ipa)
+ ipa_endpoint_init_hol_block_disable(endpoint);
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index a448ec198bee..3757ce3de2c5 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -734,7 +734,7 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_endpoint_exit;
- ret = ipa_modem_init(ipa, modem_init);
+ ret = ipa_smp2p_init(ipa, modem_init);
if (ret)
goto err_table_exit;
@@ -776,7 +776,7 @@ err_deconfig:
ipa_deconfig(ipa);
err_power_put:
pm_runtime_put_noidle(dev);
- ipa_modem_exit(ipa);
+ ipa_smp2p_exit(ipa);
err_table_exit:
ipa_table_exit(ipa);
err_endpoint_exit:
@@ -827,7 +827,7 @@ static int ipa_remove(struct platform_device *pdev)
ipa_deconfig(ipa);
out_power_put:
pm_runtime_put_noidle(dev);
- ipa_modem_exit(ipa);
+ ipa_smp2p_exit(ipa);
ipa_table_exit(ipa);
ipa_endpoint_exit(ipa);
gsi_exit(&ipa->gsi);
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 4337b0920d3d..1e9eae208e44 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -266,9 +266,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
}
/* Now see if any required regions are not defined */
- for (mem_id = find_first_zero_bit(regions, IPA_MEM_COUNT);
- mem_id < IPA_MEM_COUNT;
- mem_id = find_next_zero_bit(regions, IPA_MEM_COUNT, mem_id + 1)) {
+ for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
if (ipa_mem_id_required(ipa, mem_id))
dev_err(dev, "required memory region %u missing\n",
mem_id);
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index d0ab4d70c303..27d87097433f 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -442,16 +442,6 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
-int ipa_modem_init(struct ipa *ipa, bool modem_init)
-{
- return ipa_smp2p_init(ipa, modem_init);
-}
-
-void ipa_modem_exit(struct ipa *ipa)
-{
- ipa_smp2p_exit(ipa);
-}
-
int ipa_modem_config(struct ipa *ipa)
{
void *notifier;
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index 5e6e3d234454..e64ccc2402e9 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -18,9 +18,6 @@ void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb);
void ipa_modem_suspend(struct net_device *netdev);
void ipa_modem_resume(struct net_device *netdev);
-int ipa_modem_init(struct ipa *ipa, bool modem_init);
-void ipa_modem_exit(struct ipa *ipa);
-
int ipa_modem_config(struct ipa *ipa);
void ipa_modem_deconfig(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 1da334f54944..2f5a58bfc529 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -419,21 +419,26 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t hash_addr;
dma_addr_t addr;
+ u32 zero_offset;
u16 hash_count;
+ u32 zero_size;
u16 hash_size;
u16 count;
u16 size;
- /* The number of filtering endpoints determines number of entries
- * in the filter table. The hashed and non-hashed filter table
- * will have the same number of entries. The size of the route
- * table region determines the number of entries it has.
- */
+ /* Compute the number of table entries to initialize */
if (filter) {
- /* Include one extra "slot" to hold the filter map itself */
+ /* The number of filtering endpoints determines number of
+ * entries in the filter table; we also add one more "slot"
+ * to hold the bitmap itself. The size of the hashed filter
+ * table is either the same as the non-hashed one, or zero.
+ */
count = 1 + hweight32(ipa->filter_map);
hash_count = hash_mem->size ? count : 0;
} else {
+ /* The size of a route table region determines the number
+ * of entries it has.
+ */
count = mem->size / sizeof(__le64);
hash_count = hash_mem->size / sizeof(__le64);
}
@@ -445,13 +450,42 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
hash_size, hash_mem->offset, hash_addr);
+ if (!filter)
+ return;
+
+ /* Zero the unused space in the filter table */
+ zero_offset = mem->offset + size;
+ zero_size = mem->size - size;
+ ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
+ ipa->zero_addr, true);
+ if (!hash_size)
+ return;
+
+ /* Zero the unused space in the hashed filter table */
+ zero_offset = hash_mem->offset + hash_size;
+ zero_size = hash_mem->size - hash_size;
+ ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
+ ipa->zero_addr, true);
}
int ipa_table_setup(struct ipa *ipa)
{
struct gsi_trans *trans;
- trans = ipa_cmd_trans_alloc(ipa, 4);
+ /* We will need at most 8 TREs:
+ * - IPv4:
+ * - One for route table initialization (non-hashed and hashed)
+ * - One for filter table initialization (non-hashed and hashed)
+ * - One to zero unused entries in the non-hashed filter table
+ * - One to zero unused entries in the hashed filter table
+ * - IPv6:
+ * - One for route table initialization (non-hashed and hashed)
+ * - One for filter table initialization (non-hashed and hashed)
+ * - One to zero unused entries in the non-hashed filter table
+ * - One to zero unused entries in the hashed filter table
+ * All platforms support at least 8 TREs in a transaction.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 8);
if (!trans) {
dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
return -EBUSY;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1d2f4e7d7324..20da0b2bd246 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -140,8 +140,8 @@ static int ipvlan_init(struct net_device *dev)
dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
dev->hw_enc_features |= dev->features;
- dev->gso_max_size = phy_dev->gso_max_size;
- dev->gso_max_segs = phy_dev->gso_max_segs;
+ netif_set_gso_max_size(dev, phy_dev->gso_max_size);
+ netif_set_gso_max_segs(dev, phy_dev->gso_max_segs);
dev->hard_header_len = phy_dev->hard_header_len;
netdev_lockdep_set_classes(dev);
@@ -763,8 +763,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- ipvlan->dev->gso_max_size = dev->gso_max_size;
- ipvlan->dev->gso_max_segs = dev->gso_max_segs;
+ netif_set_gso_max_size(ipvlan->dev, dev->gso_max_size);
+ netif_set_gso_max_segs(ipvlan->dev, dev->gso_max_segs);
netdev_update_features(ipvlan->dev);
}
break;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d2f830ec2969..6ef5f77be4d0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -900,8 +900,8 @@ static int macvlan_init(struct net_device *dev)
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
dev->vlan_features |= ALWAYS_ON_OFFLOADS;
dev->hw_enc_features |= dev->features;
- dev->gso_max_size = lowerdev->gso_max_size;
- dev->gso_max_segs = lowerdev->gso_max_segs;
+ netif_set_gso_max_size(dev, lowerdev->gso_max_size);
+ netif_set_gso_max_segs(dev, lowerdev->gso_max_segs);
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
@@ -1171,7 +1171,6 @@ static const struct net_device_ops macvlan_netdev_ops = {
#endif
.ndo_get_iflink = macvlan_dev_get_iflink,
.ndo_features_check = passthru_features_check,
- .ndo_change_proto_down = dev_change_proto_down_generic,
};
void macvlan_common_setup(struct net_device *dev)
@@ -1182,7 +1181,7 @@ void macvlan_common_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN;
dev->netdev_ops = &macvlan_netdev_ops;
dev->needs_free_netdev = true;
dev->header_ops = &macvlan_hard_header_ops;
@@ -1748,8 +1747,8 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
- vlan->dev->gso_max_size = dev->gso_max_size;
- vlan->dev->gso_max_segs = dev->gso_max_segs;
+ netif_set_gso_max_size(vlan->dev, dev->gso_max_size);
+ netif_set_gso_max_segs(vlan->dev, dev->gso_max_segs);
netdev_update_features(vlan->dev);
}
break;
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
index d8f966cedc89..2929471395ae 100644
--- a/drivers/net/mctp/Kconfig
+++ b/drivers/net/mctp/Kconfig
@@ -3,6 +3,24 @@ if MCTP
menu "MCTP Device Drivers"
+config MCTP_SERIAL
+ tristate "MCTP serial transport"
+ depends on TTY
+ select CRC_CCITT
+ help
+ This driver provides an MCTP-over-serial interface, through a
+ serial line-discipline, as defined by DMTF specification "DSP0253 -
+ MCTP Serial Transport Binding". By attaching the ldisc to a serial
+ device, we get a new net device to transport MCTP packets.
+
+ This allows communication with external MCTP endpoints which use
+ serial as their transport. It can also be used as an easy way to
+ provide MCTP connectivity between virtual machines, by forwarding
+ data between simple virtual serial devices.
+
+ Say y here if you need to connect to MCTP endpoints over serial. To
+ compile as a module, use m; the module will be called mctp-serial.
+
endmenu
endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
index e69de29bb2d1..d32622613ce4 100644
--- a/drivers/net/mctp/Makefile
+++ b/drivers/net/mctp/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
new file mode 100644
index 000000000000..eaa6fb3224bc
--- /dev/null
+++ b/drivers/net/mctp/mctp-serial.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management Component Transport Protocol (MCTP) - serial transport
+ * binding. This driver is an implementation of the DMTF specificiation
+ * "DSP0253 - Management Component Transport Protocol (MCTP) Serial Transport
+ * Binding", available at:
+ *
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0253_1.0.0.pdf
+ *
+ * This driver provides DSP0253-type MCTP-over-serial transport using a Linux
+ * tty device, by setting the N_MCTP line discipline on the tty.
+ *
+ * Copyright (c) 2021 Code Construct
+ */
+
+#include <linux/idr.h>
+#include <linux/if_arp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tty.h>
+#include <linux/workqueue.h>
+#include <linux/crc-ccitt.h>
+
+#include <linux/mctp.h>
+#include <net/mctp.h>
+#include <net/pkt_sched.h>
+
+#define MCTP_SERIAL_MTU 68 /* base mtu (64) + mctp header */
+#define MCTP_SERIAL_FRAME_MTU (MCTP_SERIAL_MTU + 6) /* + serial framing */
+
+#define MCTP_SERIAL_VERSION 0x1 /* DSP0253 defines a single version: 1 */
+
+#define BUFSIZE MCTP_SERIAL_FRAME_MTU
+
+#define BYTE_FRAME 0x7e
+#define BYTE_ESC 0x7d
+
+static DEFINE_IDA(mctp_serial_ida);
+
+enum mctp_serial_state {
+ STATE_IDLE,
+ STATE_START,
+ STATE_HEADER,
+ STATE_DATA,
+ STATE_ESCAPE,
+ STATE_TRAILER,
+ STATE_DONE,
+ STATE_ERR,
+};
+
+struct mctp_serial {
+ struct net_device *netdev;
+ struct tty_struct *tty;
+
+ int idx;
+
+ /* protects our rx & tx state machines; held during both paths */
+ spinlock_t lock;
+
+ struct work_struct tx_work;
+ enum mctp_serial_state txstate, rxstate;
+ u16 txfcs, rxfcs, rxfcs_rcvd;
+ unsigned int txlen, rxlen;
+ unsigned int txpos, rxpos;
+ unsigned char txbuf[BUFSIZE],
+ rxbuf[BUFSIZE];
+};
+
+static bool needs_escape(unsigned char c)
+{
+ return c == BYTE_ESC || c == BYTE_FRAME;
+}
+
+static int next_chunk_len(struct mctp_serial *dev)
+{
+ int i;
+
+ /* either we have no bytes to send ... */
+ if (dev->txpos == dev->txlen)
+ return 0;
+
+ /* ... or the next byte to send is an escaped byte; requiring a
+ * single-byte chunk...
+ */
+ if (needs_escape(dev->txbuf[dev->txpos]))
+ return 1;
+
+ /* ... or we have one or more bytes up to the next escape - this chunk
+ * will be those non-escaped bytes, and does not include the escaped
+ * byte.
+ */
+ for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) {
+ if (needs_escape(dev->txbuf[dev->txpos + i + 1]))
+ break;
+ }
+
+ return i;
+}
+
+static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len)
+{
+ return dev->tty->ops->write(dev->tty, buf, len);
+}
+
+static void mctp_serial_tx_work(struct work_struct *work)
+{
+ struct mctp_serial *dev = container_of(work, struct mctp_serial,
+ tx_work);
+ unsigned char c, buf[3];
+ unsigned long flags;
+ int len, txlen;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* txstate represents the next thing to send */
+ switch (dev->txstate) {
+ case STATE_START:
+ dev->txpos = 0;
+ fallthrough;
+ case STATE_HEADER:
+ buf[0] = BYTE_FRAME;
+ buf[1] = MCTP_SERIAL_VERSION;
+ buf[2] = dev->txlen;
+
+ if (!dev->txpos)
+ dev->txfcs = crc_ccitt(0, buf + 1, 2);
+
+ txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txpos += txlen;
+ if (dev->txpos == 3) {
+ dev->txstate = STATE_DATA;
+ dev->txpos = 0;
+ }
+ }
+ break;
+
+ case STATE_ESCAPE:
+ buf[0] = dev->txbuf[dev->txpos] & ~0x20;
+ txlen = write_chunk(dev, buf, 1);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txpos += txlen;
+ if (dev->txpos == dev->txlen) {
+ dev->txstate = STATE_TRAILER;
+ dev->txpos = 0;
+ }
+ }
+
+ break;
+
+ case STATE_DATA:
+ len = next_chunk_len(dev);
+ if (len) {
+ c = dev->txbuf[dev->txpos];
+ if (len == 1 && needs_escape(c)) {
+ buf[0] = BYTE_ESC;
+ buf[1] = c & ~0x20;
+ dev->txfcs = crc_ccitt_byte(dev->txfcs, c);
+ txlen = write_chunk(dev, buf, 2);
+ if (txlen == 2)
+ dev->txpos++;
+ else if (txlen == 1)
+ dev->txstate = STATE_ESCAPE;
+ else
+ dev->txstate = STATE_ERR;
+ } else {
+ txlen = write_chunk(dev,
+ dev->txbuf + dev->txpos,
+ len);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txfcs = crc_ccitt(dev->txfcs,
+ dev->txbuf +
+ dev->txpos,
+ txlen);
+ dev->txpos += txlen;
+ }
+ }
+ if (dev->txstate == STATE_DATA &&
+ dev->txpos == dev->txlen) {
+ dev->txstate = STATE_TRAILER;
+ dev->txpos = 0;
+ }
+ break;
+ }
+ dev->txstate = STATE_TRAILER;
+ dev->txpos = 0;
+ fallthrough;
+
+ case STATE_TRAILER:
+ buf[0] = dev->txfcs >> 8;
+ buf[1] = dev->txfcs & 0xff;
+ buf[2] = BYTE_FRAME;
+ txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos);
+ if (txlen <= 0) {
+ dev->txstate = STATE_ERR;
+ } else {
+ dev->txpos += txlen;
+ if (dev->txpos == 3) {
+ dev->txstate = STATE_DONE;
+ dev->txpos = 0;
+ }
+ }
+ break;
+ default:
+ netdev_err_once(dev->netdev, "invalid tx state %d\n",
+ dev->txstate);
+ }
+
+ if (dev->txstate == STATE_DONE) {
+ dev->netdev->stats.tx_packets++;
+ dev->netdev->stats.tx_bytes += dev->txlen;
+ dev->txlen = 0;
+ dev->txpos = 0;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
+ dev->txstate = STATE_IDLE;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ netif_wake_queue(dev->netdev);
+ } else {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+}
+
+static netdev_tx_t mctp_serial_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+ unsigned long flags;
+
+ WARN_ON(dev->txstate != STATE_IDLE);
+
+ if (skb->len > MCTP_SERIAL_MTU) {
+ dev->netdev->stats.tx_dropped++;
+ goto out;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ netif_stop_queue(dev->netdev);
+ skb_copy_bits(skb, 0, dev->txbuf, skb->len);
+ dev->txpos = 0;
+ dev->txlen = skb->len;
+ dev->txstate = STATE_START;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ set_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags);
+ schedule_work(&dev->tx_work);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_serial_tty_write_wakeup(struct tty_struct *tty)
+{
+ struct mctp_serial *dev = tty->disc_data;
+
+ schedule_work(&dev->tx_work);
+}
+
+static void mctp_serial_rx(struct mctp_serial *dev)
+{
+ struct mctp_skb_cb *cb;
+ struct sk_buff *skb;
+
+ if (dev->rxfcs != dev->rxfcs_rcvd) {
+ dev->netdev->stats.rx_dropped++;
+ dev->netdev->stats.rx_crc_errors++;
+ return;
+ }
+
+ skb = netdev_alloc_skb(dev->netdev, dev->rxlen);
+ if (!skb) {
+ dev->netdev->stats.rx_dropped++;
+ return;
+ }
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_put_data(skb, dev->rxbuf, dev->rxlen);
+ skb_reset_network_header(skb);
+
+ cb = __mctp_cb(skb);
+ cb->halen = 0;
+
+ netif_rx_ni(skb);
+ dev->netdev->stats.rx_packets++;
+ dev->netdev->stats.rx_bytes += dev->rxlen;
+}
+
+static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c)
+{
+ switch (dev->rxpos) {
+ case 0:
+ if (c == BYTE_FRAME)
+ dev->rxpos++;
+ else
+ dev->rxstate = STATE_ERR;
+ break;
+ case 1:
+ if (c == MCTP_SERIAL_VERSION) {
+ dev->rxpos++;
+ dev->rxfcs = crc_ccitt_byte(0, c);
+ } else {
+ dev->rxstate = STATE_ERR;
+ }
+ break;
+ case 2:
+ if (c > MCTP_SERIAL_FRAME_MTU) {
+ dev->rxstate = STATE_ERR;
+ } else {
+ dev->rxlen = c;
+ dev->rxpos = 0;
+ dev->rxstate = STATE_DATA;
+ dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
+ }
+ break;
+ }
+}
+
+static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c)
+{
+ switch (dev->rxpos) {
+ case 0:
+ dev->rxfcs_rcvd = c << 8;
+ dev->rxpos++;
+ break;
+ case 1:
+ dev->rxfcs_rcvd |= c;
+ dev->rxpos++;
+ break;
+ case 2:
+ if (c != BYTE_FRAME) {
+ dev->rxstate = STATE_ERR;
+ } else {
+ mctp_serial_rx(dev);
+ dev->rxlen = 0;
+ dev->rxpos = 0;
+ dev->rxstate = STATE_IDLE;
+ }
+ break;
+ }
+}
+
+static void mctp_serial_push(struct mctp_serial *dev, unsigned char c)
+{
+ switch (dev->rxstate) {
+ case STATE_IDLE:
+ dev->rxstate = STATE_HEADER;
+ fallthrough;
+ case STATE_HEADER:
+ mctp_serial_push_header(dev, c);
+ break;
+
+ case STATE_ESCAPE:
+ c |= 0x20;
+ fallthrough;
+ case STATE_DATA:
+ if (dev->rxstate != STATE_ESCAPE && c == BYTE_ESC) {
+ dev->rxstate = STATE_ESCAPE;
+ } else {
+ dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c);
+ dev->rxbuf[dev->rxpos] = c;
+ dev->rxpos++;
+ dev->rxstate = STATE_DATA;
+ if (dev->rxpos == dev->rxlen) {
+ dev->rxpos = 0;
+ dev->rxstate = STATE_TRAILER;
+ }
+ }
+ break;
+
+ case STATE_TRAILER:
+ mctp_serial_push_trailer(dev, c);
+ break;
+
+ case STATE_ERR:
+ if (c == BYTE_FRAME)
+ dev->rxstate = STATE_IDLE;
+ break;
+
+ default:
+ netdev_err_once(dev->netdev, "invalid rx state %d\n",
+ dev->rxstate);
+ }
+}
+
+static void mctp_serial_tty_receive_buf(struct tty_struct *tty,
+ const unsigned char *c,
+ const char *f, int len)
+{
+ struct mctp_serial *dev = tty->disc_data;
+ int i;
+
+ if (!netif_running(dev->netdev))
+ return;
+
+ /* we don't (currently) use the flag bytes, just data. */
+ for (i = 0; i < len; i++)
+ mctp_serial_push(dev, c[i]);
+}
+
+static const struct net_device_ops mctp_serial_netdev_ops = {
+ .ndo_start_xmit = mctp_serial_tx,
+};
+
+static void mctp_serial_setup(struct net_device *ndev)
+{
+ ndev->type = ARPHRD_MCTP;
+
+ /* we limit at the fixed MTU, which is also the MCTP-standard
+ * baseline MTU, so is also our minimum
+ */
+ ndev->mtu = MCTP_SERIAL_MTU;
+ ndev->max_mtu = MCTP_SERIAL_MTU;
+ ndev->min_mtu = MCTP_SERIAL_MTU;
+
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ ndev->flags = IFF_NOARP;
+ ndev->netdev_ops = &mctp_serial_netdev_ops;
+ ndev->needs_free_netdev = true;
+}
+
+static int mctp_serial_open(struct tty_struct *tty)
+{
+ struct mctp_serial *dev;
+ struct net_device *ndev;
+ char name[32];
+ int idx, rc;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ idx = ida_alloc(&mctp_serial_ida, GFP_KERNEL);
+ if (idx < 0)
+ return idx;
+
+ snprintf(name, sizeof(name), "mctpserial%d", idx);
+ ndev = alloc_netdev(sizeof(*dev), name, NET_NAME_ENUM,
+ mctp_serial_setup);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto free_ida;
+ }
+
+ dev = netdev_priv(ndev);
+ dev->idx = idx;
+ dev->tty = tty;
+ dev->netdev = ndev;
+ dev->txstate = STATE_IDLE;
+ dev->rxstate = STATE_IDLE;
+ spin_lock_init(&dev->lock);
+ INIT_WORK(&dev->tx_work, mctp_serial_tx_work);
+
+ rc = register_netdev(ndev);
+ if (rc)
+ goto free_netdev;
+
+ tty->receive_room = 64 * 1024;
+ tty->disc_data = dev;
+
+ return 0;
+
+free_netdev:
+ free_netdev(ndev);
+
+free_ida:
+ ida_free(&mctp_serial_ida, idx);
+ return rc;
+}
+
+static void mctp_serial_close(struct tty_struct *tty)
+{
+ struct mctp_serial *dev = tty->disc_data;
+ int idx = dev->idx;
+
+ unregister_netdev(dev->netdev);
+ cancel_work_sync(&dev->tx_work);
+ ida_free(&mctp_serial_ida, idx);
+}
+
+static struct tty_ldisc_ops mctp_ldisc = {
+ .owner = THIS_MODULE,
+ .num = N_MCTP,
+ .name = "mctp",
+ .open = mctp_serial_open,
+ .close = mctp_serial_close,
+ .receive_buf = mctp_serial_tty_receive_buf,
+ .write_wakeup = mctp_serial_tty_write_wakeup,
+};
+
+static int __init mctp_serial_init(void)
+{
+ return tty_register_ldisc(&mctp_ldisc);
+}
+
+static void __exit mctp_serial_exit(void)
+{
+ tty_unregister_ldisc(&mctp_ldisc);
+}
+
+module_init(mctp_serial_init);
+module_exit(mctp_serial_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
+MODULE_DESCRIPTION("MCTP Serial transport");
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 0ab6a40be611..2b84169bf3a2 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -65,7 +65,9 @@ static int nsim_set_coalesce(struct net_device *dev,
}
static void nsim_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netdevsim *ns = netdev_priv(dev);
@@ -73,7 +75,9 @@ static void nsim_get_ringparam(struct net_device *dev,
}
static int nsim_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netdevsim *ns = netdev_priv(dev);
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 7113925606f7..b4ff9c5073a3 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -16,6 +16,7 @@
#include <dt-bindings/net/ti-dp83869.h>
#define DP83869_PHY_ID 0x2000a0f1
+#define DP83561_PHY_ID 0x2000a1a4
#define DP83869_DEVADDR 0x1f
#define MII_DP83869_PHYCTRL 0x10
@@ -878,34 +879,35 @@ static int dp83869_phy_reset(struct phy_device *phydev)
return dp83869_config_init(phydev);
}
-static struct phy_driver dp83869_driver[] = {
- {
- PHY_ID_MATCH_MODEL(DP83869_PHY_ID),
- .name = "TI DP83869",
-
- .probe = dp83869_probe,
- .config_init = dp83869_config_init,
- .soft_reset = dp83869_phy_reset,
-
- /* IRQ related */
- .config_intr = dp83869_config_intr,
- .handle_interrupt = dp83869_handle_interrupt,
- .read_status = dp83869_read_status,
- .get_tunable = dp83869_get_tunable,
- .set_tunable = dp83869_set_tunable,
+#define DP83869_PHY_DRIVER(_id, _name) \
+{ \
+ PHY_ID_MATCH_MODEL(_id), \
+ .name = (_name), \
+ .probe = dp83869_probe, \
+ .config_init = dp83869_config_init, \
+ .soft_reset = dp83869_phy_reset, \
+ .config_intr = dp83869_config_intr, \
+ .handle_interrupt = dp83869_handle_interrupt, \
+ .read_status = dp83869_read_status, \
+ .get_tunable = dp83869_get_tunable, \
+ .set_tunable = dp83869_set_tunable, \
+ .get_wol = dp83869_get_wol, \
+ .set_wol = dp83869_set_wol, \
+ .suspend = genphy_suspend, \
+ .resume = genphy_resume, \
+}
- .get_wol = dp83869_get_wol,
- .set_wol = dp83869_set_wol,
+static struct phy_driver dp83869_driver[] = {
+ DP83869_PHY_DRIVER(DP83869_PHY_ID, "TI DP83869"),
+ DP83869_PHY_DRIVER(DP83561_PHY_ID, "TI DP83561-SP"),
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- },
};
module_phy_driver(dp83869_driver);
static struct mdio_device_id __maybe_unused dp83869_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83869_PHY_ID) },
+ { PHY_ID_MATCH_MODEL(DP83561_PHY_ID) },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83869_tbl);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c204067f1890..9b6f2df07211 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -176,9 +176,11 @@ static void mdiobus_release(struct device *d)
{
struct mii_bus *bus = to_mii_bus(d);
- BUG_ON(bus->state != MDIOBUS_RELEASED &&
- /* for compatibility with error handling in drivers */
- bus->state != MDIOBUS_ALLOCATED);
+ WARN(bus->state != MDIOBUS_RELEASED &&
+ /* for compatibility with error handling in drivers */
+ bus->state != MDIOBUS_ALLOCATED,
+ "%s: not in RELEASED or ALLOCATED state\n",
+ bus->id);
kfree(bus);
}
@@ -529,8 +531,9 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
bus->parent->of_node->fwnode.flags |=
FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
- BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
- bus->state != MDIOBUS_UNREGISTERED);
+ WARN(bus->state != MDIOBUS_ALLOCATED &&
+ bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in ALLOCATED or UNREGISTERED state\n", bus->id);
bus->owner = owner;
bus->dev.parent = bus->parent;
@@ -658,7 +661,8 @@ void mdiobus_free(struct mii_bus *bus)
return;
}
- BUG_ON(bus->state != MDIOBUS_UNREGISTERED);
+ WARN(bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in UNREGISTERED state\n", bus->id);
bus->state = MDIOBUS_RELEASED;
put_device(&bus->dev);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5904546acae6..eacbb0e6a24b 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -166,6 +166,259 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
+static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
+ unsigned long caps)
+{
+ if (caps & MAC_SYM_PAUSE)
+ __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
+
+ if (caps & MAC_ASYM_PAUSE)
+ __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes);
+
+ if (caps & MAC_10HD)
+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
+
+ if (caps & MAC_10FD)
+ __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
+
+ if (caps & MAC_100HD) {
+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT, linkmodes);
+ }
+
+ if (caps & MAC_100FD) {
+ __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_1000HD)
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, linkmodes);
+
+ if (caps & MAC_1000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_2500FD) {
+ __set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_5000FD)
+ __set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, linkmodes);
+
+ if (caps & MAC_10000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_25000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_40000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_50000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_56000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_100000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_200000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, linkmodes);
+ }
+
+ if (caps & MAC_400000FD) {
+ __set_bit(ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
+ }
+}
+
+/**
+ * phylink_get_linkmodes() - get acceptable link modes
+ * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * @interface: phy interface mode defined by &typedef phy_interface_t
+ * @mac_capabilities: bitmask of MAC capabilities
+ *
+ * Set all possible pause, speed and duplex linkmodes in @linkmodes that
+ * are supported by the @interface mode and @mac_capabilities. @linkmodes
+ * must have been initialised previously.
+ */
+void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
+ unsigned long mac_capabilities)
+{
+ unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_USXGMII:
+ caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ caps |= MAC_1000HD | MAC_1000FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ caps |= MAC_10HD | MAC_10FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_100BASEX:
+ caps |= MAC_100HD | MAC_100FD;
+ break;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ caps |= MAC_1000HD;
+ fallthrough;
+ case PHY_INTERFACE_MODE_TRGMII:
+ caps |= MAC_1000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ caps |= MAC_2500FD;
+ break;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ caps |= MAC_5000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ caps |= MAC_10000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ caps |= MAC_25000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ caps |= MAC_40000FD;
+ break;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ caps |= ~0;
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ break;
+ }
+
+ phylink_caps_to_linkmodes(linkmodes, caps & mac_capabilities);
+}
+EXPORT_SYMBOL_GPL(phylink_get_linkmodes);
+
+/**
+ * phylink_generic_validate() - generic validate() callback implementation
+ * @config: a pointer to a &struct phylink_config.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Generic implementation of the validate() callback that MAC drivers can
+ * use when they pass the range of supported interfaces and MAC capabilities.
+ * This makes use of phylink_get_linkmodes().
+ */
+void phylink_generic_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_get_linkmodes(mask, state->interface, config->mac_capabilities);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+EXPORT_SYMBOL_GPL(phylink_generic_validate);
+
static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
@@ -1096,7 +1349,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
mutex_unlock(&phy->lock);
phylink_dbg(pl,
- "phy: setting supported %*pb advertising %*pb\n",
+ "phy: %s setting supported %*pb advertising %*pb\n",
+ phy_modes(interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
@@ -1214,6 +1468,12 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
if (!phy_dev)
return -ENODEV;
+ /* Use PHY device/driver interface */
+ if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
+ pl->link_interface = phy_dev->interface;
+ pl->link_config.interface = pl->link_interface;
+ }
+
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
if (ret) {
@@ -2586,31 +2846,22 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state,
EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
/**
- * phylink_mii_c22_pcs_get_state() - read the MAC PCS state
- * @pcs: a pointer to a &struct mdio_device.
+ * phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
* @state: a pointer to a &struct phylink_link_state.
+ * @bmsr: The value of the %MII_BMSR register
+ * @lpa: The value of the %MII_LPA register
*
* Helper for MAC PCS supporting the 802.3 clause 22 register set for
* clause 37 negotiation and/or SGMII control.
*
- * Read the MAC PCS state from the MII device configured in @config and
- * parse the Clause 37 or Cisco SGMII link partner negotiation word into
- * the phylink @state structure. This is suitable to be directly plugged
- * into the mac_pcs_get_state() member of the struct phylink_mac_ops
- * structure.
+ * Parse the Clause 37 or Cisco SGMII link partner negotiation word into
+ * the phylink @state structure. This is suitable to be used for implementing
+ * the mac_pcs_get_state() member of the struct phylink_mac_ops structure if
+ * accessing @bmsr and @lpa cannot be done with MDIO directly.
*/
-void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
- struct phylink_link_state *state)
+void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
+ u16 bmsr, u16 lpa)
{
- int bmsr, lpa;
-
- bmsr = mdiodev_read(pcs, MII_BMSR);
- lpa = mdiodev_read(pcs, MII_LPA);
- if (bmsr < 0 || lpa < 0) {
- state->link = false;
- return;
- }
-
state->link = !!(bmsr & BMSR_LSTATUS);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
/* If there is no link or autonegotiation is disabled, the LP advertisement
@@ -2638,28 +2889,54 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
break;
}
}
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
+
+/**
+ * phylink_mii_c22_pcs_get_state() - read the MAC PCS state
+ * @pcs: a pointer to a &struct mdio_device.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Helper for MAC PCS supporting the 802.3 clause 22 register set for
+ * clause 37 negotiation and/or SGMII control.
+ *
+ * Read the MAC PCS state from the MII device configured in @config and
+ * parse the Clause 37 or Cisco SGMII link partner negotiation word into
+ * the phylink @state structure. This is suitable to be directly plugged
+ * into the mac_pcs_get_state() member of the struct phylink_mac_ops
+ * structure.
+ */
+void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ int bmsr, lpa;
+
+ bmsr = mdiodev_read(pcs, MII_BMSR);
+ lpa = mdiodev_read(pcs, MII_LPA);
+ if (bmsr < 0 || lpa < 0) {
+ state->link = false;
+ return;
+ }
+
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_get_state);
/**
- * phylink_mii_c22_pcs_set_advertisement() - configure the clause 37 PCS
+ * phylink_mii_c22_pcs_encode_advertisement() - configure the clause 37 PCS
* advertisement
- * @pcs: a pointer to a &struct mdio_device.
* @interface: the PHY interface mode being configured
* @advertising: the ethtool advertisement mask
*
* Helper for MAC PCS supporting the 802.3 clause 22 register set for
* clause 37 negotiation and/or SGMII control.
*
- * Configure the clause 37 PCS advertisement as specified by @state. This
- * does not trigger a renegotiation; phylink will do that via the
- * mac_an_restart() method of the struct phylink_mac_ops structure.
+ * Encode the clause 37 PCS advertisement as specified by @interface and
+ * @advertising.
*
- * Returns negative error code on failure to configure the advertisement,
- * zero if no change has been made, or one if the advertisement has changed.
+ * Return: The new value for @adv, or ``-EINVAL`` if it should not be changed.
*/
-int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
- phy_interface_t interface,
- const unsigned long *advertising)
+int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
+ const unsigned long *advertising)
{
u16 adv;
@@ -2673,18 +2950,15 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
advertising))
adv |= ADVERTISE_1000XPSE_ASYM;
-
- return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, adv);
-
+ return adv;
case PHY_INTERFACE_MODE_SGMII:
- return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, 0x0001);
-
+ return 0x0001;
default:
/* Nothing to do for other modes */
- return 0;
+ return -EINVAL;
}
}
-EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_set_advertisement);
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_encode_advertisement);
/**
* phylink_mii_c22_pcs_config() - configure clause 22 PCS
@@ -2702,16 +2976,18 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertising)
{
- bool changed;
+ bool changed = 0;
u16 bmcr;
- int ret;
-
- ret = phylink_mii_c22_pcs_set_advertisement(pcs, interface,
- advertising);
- if (ret < 0)
- return ret;
+ int ret, adv;
- changed = ret > 0;
+ adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
+ if (adv >= 0) {
+ ret = mdiobus_modify_changed(pcs->bus, pcs->addr,
+ MII_ADVERTISE, 0xffff, adv);
+ if (ret < 0)
+ return ret;
+ changed = ret;
+ }
/* Ensure ISOLATE bit is disabled */
if (mode == MLO_AN_INBAND &&
@@ -2724,7 +3000,7 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
if (ret < 0)
return ret;
- return changed ? 1 : 0;
+ return changed;
}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index ea8aa8c33241..1a627ba4b850 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1377,11 +1377,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = 0x03;
dev->mii.supports_gmii = 1;
- dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM;
+ dev->net->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO;
- dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM;
+ dev->net->hw_features |= dev->net->features;
+
+ netif_set_gso_max_size(dev->net, 16384);
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
@@ -1526,17 +1527,19 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
u32 tx_hdr1, tx_hdr2;
int frame_size = dev->maxpacket;
- int mss = skb_shinfo(skb)->gso_size;
int headroom;
void *ptr;
tx_hdr1 = skb->len;
- tx_hdr2 = mss;
+ tx_hdr2 = skb_shinfo(skb)->gso_size; /* Set TSO mss */
if (((skb->len + 8) % frame_size) == 0)
tx_hdr2 |= 0x80008000; /* Enable padding */
headroom = skb_headroom(skb) - 8;
+ if ((dev->net->features & NETIF_F_SG) && skb_linearize(skb))
+ return NULL;
+
if ((skb_header_cloned(skb) || headroom < 0) &&
pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) {
dev_kfree_skb_any(skb);
@@ -1547,6 +1550,8 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
put_unaligned_le32(tx_hdr1, ptr);
put_unaligned_le32(tx_hdr2, ptr + 4);
+ usbnet_set_skb_tx_stats(skb, (skb_shinfo(skb)->gso_segs ?: 1), 0);
+
return skb;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f20376c1ef3f..2bfb59ae0eaf 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -67,7 +67,7 @@
#define DEFAULT_TSO_CSUM_ENABLE (true)
#define DEFAULT_VLAN_FILTER_ENABLE (true)
#define DEFAULT_VLAN_RX_OFFLOAD (true)
-#define TX_OVERHEAD (8)
+#define TX_ALIGNMENT (4)
#define RXW_PADDING 2
#define LAN78XX_USB_VENDOR_ID (0x0424)
@@ -90,6 +90,41 @@
WAKE_MCAST | WAKE_BCAST | \
WAKE_ARP | WAKE_MAGIC)
+#define LAN78XX_NAPI_WEIGHT 64
+
+#define TX_URB_NUM 10
+#define TX_SS_URB_NUM TX_URB_NUM
+#define TX_HS_URB_NUM TX_URB_NUM
+#define TX_FS_URB_NUM TX_URB_NUM
+
+/* A single URB buffer must be large enough to hold a complete jumbo packet
+ */
+#define TX_SS_URB_SIZE (32 * 1024)
+#define TX_HS_URB_SIZE (16 * 1024)
+#define TX_FS_URB_SIZE (10 * 1024)
+
+#define RX_SS_URB_NUM 30
+#define RX_HS_URB_NUM 10
+#define RX_FS_URB_NUM 10
+#define RX_SS_URB_SIZE TX_SS_URB_SIZE
+#define RX_HS_URB_SIZE TX_HS_URB_SIZE
+#define RX_FS_URB_SIZE TX_FS_URB_SIZE
+
+#define SS_BURST_CAP_SIZE RX_SS_URB_SIZE
+#define SS_BULK_IN_DELAY 0x2000
+#define HS_BURST_CAP_SIZE RX_HS_URB_SIZE
+#define HS_BULK_IN_DELAY 0x2000
+#define FS_BURST_CAP_SIZE RX_FS_URB_SIZE
+#define FS_BULK_IN_DELAY 0x2000
+
+#define TX_CMD_LEN 8
+#define TX_SKB_MIN_LEN (TX_CMD_LEN + ETH_HLEN)
+#define LAN78XX_TSO_SIZE(dev) ((dev)->tx_urb_size - TX_SKB_MIN_LEN)
+
+#define RX_CMD_LEN 10
+#define RX_SKB_MIN_LEN (RX_CMD_LEN + ETH_HLEN)
+#define RX_MAX_FRAME_LEN(mtu) ((mtu) + ETH_HLEN + VLAN_HLEN)
+
/* USB related defines */
#define BULK_IN_PIPE 1
#define BULK_OUT_PIPE 2
@@ -385,14 +420,22 @@ struct lan78xx_net {
struct usb_interface *intf;
void *driver_priv;
- int rx_qlen;
- int tx_qlen;
+ unsigned int tx_pend_data_len;
+ size_t n_tx_urbs;
+ size_t n_rx_urbs;
+ size_t tx_urb_size;
+ size_t rx_urb_size;
+
+ struct sk_buff_head rxq_free;
struct sk_buff_head rxq;
+ struct sk_buff_head rxq_done;
+ struct sk_buff_head rxq_overflow;
+ struct sk_buff_head txq_free;
struct sk_buff_head txq;
- struct sk_buff_head done;
struct sk_buff_head txq_pend;
- struct tasklet_struct bh;
+ struct napi_struct napi;
+
struct delayed_work wq;
int msg_enable;
@@ -404,8 +447,8 @@ struct lan78xx_net {
struct mutex phy_mutex; /* for phy access */
unsigned int pipe_in, pipe_out, pipe_intr;
- u32 hard_mtu; /* count any extra framing */
- size_t rx_urb_size; /* size for rx urbs */
+ unsigned int bulk_in_delay;
+ unsigned int burst_cap;
unsigned long flags;
@@ -443,6 +486,129 @@ static int msg_level = -1;
module_param(msg_level, int, 0);
MODULE_PARM_DESC(msg_level, "Override default message level");
+static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
+{
+ if (skb_queue_empty(buf_pool))
+ return NULL;
+
+ return skb_dequeue(buf_pool);
+}
+
+static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
+ struct sk_buff *buf)
+{
+ buf->data = buf->head;
+ skb_reset_tail_pointer(buf);
+
+ buf->len = 0;
+ buf->data_len = 0;
+
+ skb_queue_tail(buf_pool, buf);
+}
+
+static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
+{
+ struct skb_data *entry;
+ struct sk_buff *buf;
+
+ while (!skb_queue_empty(buf_pool)) {
+ buf = skb_dequeue(buf_pool);
+ if (buf) {
+ entry = (struct skb_data *)buf->cb;
+ usb_free_urb(entry->urb);
+ dev_kfree_skb_any(buf);
+ }
+ }
+}
+
+static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
+ size_t n_urbs, size_t urb_size,
+ struct lan78xx_net *dev)
+{
+ struct skb_data *entry;
+ struct sk_buff *buf;
+ struct urb *urb;
+ int i;
+
+ skb_queue_head_init(buf_pool);
+
+ for (i = 0; i < n_urbs; i++) {
+ buf = alloc_skb(urb_size, GFP_ATOMIC);
+ if (!buf)
+ goto error;
+
+ if (skb_linearize(buf) != 0) {
+ dev_kfree_skb_any(buf);
+ goto error;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ dev_kfree_skb_any(buf);
+ goto error;
+ }
+
+ entry = (struct skb_data *)buf->cb;
+ entry->urb = urb;
+ entry->dev = dev;
+ entry->length = 0;
+ entry->num_of_packet = 0;
+
+ skb_queue_tail(buf_pool, buf);
+ }
+
+ return 0;
+
+error:
+ lan78xx_free_buf_pool(buf_pool);
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
+{
+ return lan78xx_get_buf(&dev->rxq_free);
+}
+
+static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
+ struct sk_buff *rx_buf)
+{
+ lan78xx_release_buf(&dev->rxq_free, rx_buf);
+}
+
+static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
+{
+ lan78xx_free_buf_pool(&dev->rxq_free);
+}
+
+static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
+{
+ return lan78xx_alloc_buf_pool(&dev->rxq_free,
+ dev->n_rx_urbs, dev->rx_urb_size, dev);
+}
+
+static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
+{
+ return lan78xx_get_buf(&dev->txq_free);
+}
+
+static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
+ struct sk_buff *tx_buf)
+{
+ lan78xx_release_buf(&dev->txq_free, tx_buf);
+}
+
+static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
+{
+ lan78xx_free_buf_pool(&dev->txq_free);
+}
+
+static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
+{
+ return lan78xx_alloc_buf_pool(&dev->txq_free,
+ dev->n_tx_urbs, dev->tx_urb_size, dev);
+}
+
static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
{
u32 *buf;
@@ -1200,6 +1366,8 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
return 0;
}
+static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
+
static int lan78xx_mac_reset(struct lan78xx_net *dev)
{
unsigned long start_time = jiffies;
@@ -1331,7 +1499,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
jiffies + STAT_UPDATE_TIMER);
}
- tasklet_schedule(&dev->bh);
+ lan78xx_rx_urb_submit_all(dev);
+
+ napi_schedule(&dev->napi);
}
return 0;
@@ -2371,37 +2541,24 @@ found:
static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
{
struct lan78xx_net *dev = netdev_priv(netdev);
- int ll_mtu = new_mtu + netdev->hard_header_len;
- int old_hard_mtu = dev->hard_mtu;
- int old_rx_urb_size = dev->rx_urb_size;
+ int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
int ret;
/* no second zero-length packet read wanted after mtu-sized packets */
- if ((ll_mtu % dev->maxpacket) == 0)
+ if ((max_frame_len % dev->maxpacket) == 0)
return -EDOM;
ret = usb_autopm_get_interface(dev->intf);
if (ret < 0)
return ret;
- lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
-
- netdev->mtu = new_mtu;
-
- dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
- if (dev->rx_urb_size == old_hard_mtu) {
- dev->rx_urb_size = dev->hard_mtu;
- if (dev->rx_urb_size > old_rx_urb_size) {
- if (netif_running(dev->net)) {
- unlink_urbs(dev, &dev->rxq);
- tasklet_schedule(&dev->bh);
- }
- }
- }
+ ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
+ if (!ret)
+ netdev->mtu = new_mtu;
usb_autopm_put_interface(dev->intf);
- return 0;
+ return ret;
}
static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
@@ -2557,6 +2714,44 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
}
+static int lan78xx_urb_config_init(struct lan78xx_net *dev)
+{
+ int result = 0;
+
+ switch (dev->udev->speed) {
+ case USB_SPEED_SUPER:
+ dev->rx_urb_size = RX_SS_URB_SIZE;
+ dev->tx_urb_size = TX_SS_URB_SIZE;
+ dev->n_rx_urbs = RX_SS_URB_NUM;
+ dev->n_tx_urbs = TX_SS_URB_NUM;
+ dev->bulk_in_delay = SS_BULK_IN_DELAY;
+ dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
+ break;
+ case USB_SPEED_HIGH:
+ dev->rx_urb_size = RX_HS_URB_SIZE;
+ dev->tx_urb_size = TX_HS_URB_SIZE;
+ dev->n_rx_urbs = RX_HS_URB_NUM;
+ dev->n_tx_urbs = TX_HS_URB_NUM;
+ dev->bulk_in_delay = HS_BULK_IN_DELAY;
+ dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
+ break;
+ case USB_SPEED_FULL:
+ dev->rx_urb_size = RX_FS_URB_SIZE;
+ dev->tx_urb_size = TX_FS_URB_SIZE;
+ dev->n_rx_urbs = RX_FS_URB_NUM;
+ dev->n_tx_urbs = TX_FS_URB_NUM;
+ dev->bulk_in_delay = FS_BULK_IN_DELAY;
+ dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+ break;
+ default:
+ netdev_warn(dev->net, "USB bus speed not supported\n");
+ result = -EIO;
+ break;
+ }
+
+ return result;
+}
+
static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
{
return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
@@ -2764,28 +2959,11 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* Init LTM */
lan78xx_init_ltm(dev);
- if (dev->udev->speed == USB_SPEED_SUPER) {
- buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
- dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
- dev->rx_qlen = 4;
- dev->tx_qlen = 4;
- } else if (dev->udev->speed == USB_SPEED_HIGH) {
- buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
- dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
- dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
- dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
- } else {
- buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
- dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
- dev->rx_qlen = 4;
- dev->tx_qlen = 4;
- }
-
- ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+ ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
if (ret < 0)
return ret;
- ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+ ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
if (ret < 0)
return ret;
@@ -2898,7 +3076,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
ret = lan78xx_set_rx_max_frame_length(dev,
- dev->net->mtu + VLAN_ETH_HLEN);
+ RX_MAX_FRAME_LEN(dev->net->mtu));
return ret;
}
@@ -2978,6 +3156,8 @@ static int lan78xx_open(struct net_device *net)
dev->link_on = false;
+ napi_enable(&dev->napi);
+
lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
done:
mutex_unlock(&dev->dev_mutex);
@@ -3011,15 +3191,16 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
dev->wait = NULL;
remove_wait_queue(&unlink_wakeup, &wait);
- while (!skb_queue_empty(&dev->done)) {
- struct skb_data *entry;
- struct sk_buff *skb;
+ /* empty Rx done, Rx overflow and Tx pend queues
+ */
+ while (!skb_queue_empty(&dev->rxq_done)) {
+ struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
- skb = skb_dequeue(&dev->done);
- entry = (struct skb_data *)(skb->cb);
- usb_free_urb(entry->urb);
- dev_kfree_skb(skb);
+ lan78xx_release_rx_buf(dev, skb);
}
+
+ skb_queue_purge(&dev->rxq_overflow);
+ skb_queue_purge(&dev->txq_pend);
}
static int lan78xx_stop(struct net_device *net)
@@ -3035,7 +3216,7 @@ static int lan78xx_stop(struct net_device *net)
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue(net);
- tasklet_kill(&dev->bh);
+ napi_disable(&dev->napi);
lan78xx_terminate_urbs(dev);
@@ -3071,48 +3252,6 @@ static int lan78xx_stop(struct net_device *net)
return 0;
}
-static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
- struct sk_buff *skb, gfp_t flags)
-{
- u32 tx_cmd_a, tx_cmd_b;
- void *ptr;
-
- if (skb_cow_head(skb, TX_OVERHEAD)) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- if (skb_linearize(skb)) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
-
- tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
-
- tx_cmd_b = 0;
- if (skb_is_gso(skb)) {
- u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
-
- tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
-
- tx_cmd_a |= TX_CMD_A_LSO_;
- }
-
- if (skb_vlan_tag_present(skb)) {
- tx_cmd_a |= TX_CMD_A_IVTG_;
- tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
- }
-
- ptr = skb_push(skb, 8);
- put_unaligned_le32(tx_cmd_a, ptr);
- put_unaligned_le32(tx_cmd_b, ptr + 4);
-
- return skb;
-}
-
static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
struct sk_buff_head *list, enum skb_state state)
{
@@ -3126,12 +3265,13 @@ static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
__skb_unlink(skb, list);
spin_unlock(&list->lock);
- spin_lock(&dev->done.lock);
+ spin_lock(&dev->rxq_done.lock);
+
+ __skb_queue_tail(&dev->rxq_done, skb);
+ if (skb_queue_len(&dev->rxq_done) == 1)
+ napi_schedule(&dev->napi);
- __skb_queue_tail(&dev->done, skb);
- if (skb_queue_len(&dev->done) == 1)
- tasklet_schedule(&dev->bh);
- spin_unlock_irqrestore(&dev->done.lock, flags);
+ spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
return old_state;
}
@@ -3146,7 +3286,7 @@ static void tx_complete(struct urb *urb)
dev->net->stats.tx_packets += entry->num_of_packet;
dev->net->stats.tx_bytes += entry->length;
} else {
- dev->net->stats.tx_errors++;
+ dev->net->stats.tx_errors += entry->num_of_packet;
switch (urb->status) {
case -EPIPE:
@@ -3179,7 +3319,15 @@ static void tx_complete(struct urb *urb)
usb_autopm_put_interface_async(dev->intf);
- defer_bh(dev, skb, &dev->txq, tx_done);
+ skb_unlink(skb, &dev->txq);
+
+ lan78xx_release_tx_buf(dev, skb);
+
+ /* Re-schedule NAPI if Tx data pending but no URBs in progress.
+ */
+ if (skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->txq_pend))
+ napi_schedule(&dev->napi);
}
static void lan78xx_queue_skb(struct sk_buff_head *list,
@@ -3191,35 +3339,96 @@ static void lan78xx_queue_skb(struct sk_buff_head *list,
entry->state = state;
}
+static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
+{
+ return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
+}
+
+static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
+{
+ return dev->tx_pend_data_len;
+}
+
+static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
+ struct sk_buff *skb,
+ unsigned int *tx_pend_data_len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->txq_pend.lock, flags);
+
+ __skb_queue_tail(&dev->txq_pend, skb);
+
+ dev->tx_pend_data_len += skb->len;
+ *tx_pend_data_len = dev->tx_pend_data_len;
+
+ spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
+}
+
+static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
+ struct sk_buff *skb,
+ unsigned int *tx_pend_data_len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->txq_pend.lock, flags);
+
+ __skb_queue_head(&dev->txq_pend, skb);
+
+ dev->tx_pend_data_len += skb->len;
+ *tx_pend_data_len = dev->tx_pend_data_len;
+
+ spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
+}
+
+static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
+ struct sk_buff **skb,
+ unsigned int *tx_pend_data_len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->txq_pend.lock, flags);
+
+ *skb = __skb_dequeue(&dev->txq_pend);
+ if (*skb)
+ dev->tx_pend_data_len -= (*skb)->len;
+ *tx_pend_data_len = dev->tx_pend_data_len;
+
+ spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
+}
+
static netdev_tx_t
lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct sk_buff *skb2 = NULL;
+ unsigned int tx_pend_data_len;
if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
schedule_delayed_work(&dev->wq, 0);
- if (skb) {
- skb_tx_timestamp(skb);
- skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
- }
+ skb_tx_timestamp(skb);
- if (skb2) {
- skb_queue_tail(&dev->txq_pend, skb2);
+ lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
- /* throttle TX patch at slower than SUPER SPEED USB */
- if ((dev->udev->speed < USB_SPEED_SUPER) &&
- (skb_queue_len(&dev->txq_pend) > 10))
- netif_stop_queue(net);
- } else {
- netif_dbg(dev, tx_err, dev->net,
- "lan78xx_tx_prep return NULL\n");
- dev->net->stats.tx_errors++;
- dev->net->stats.tx_dropped++;
- }
+ /* Set up a Tx URB if none is in progress */
+
+ if (skb_queue_empty(&dev->txq))
+ napi_schedule(&dev->napi);
+
+ /* Stop stack Tx queue if we have enough data to fill
+ * all the free Tx URBs.
+ */
+ if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
+ netif_stop_queue(net);
+
+ netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
+ tx_pend_data_len, lan78xx_tx_urb_space(dev));
- tasklet_schedule(&dev->bh);
+ /* Kick off transmission of pending data */
+
+ if (!skb_queue_empty(&dev->txq_free))
+ napi_schedule(&dev->napi);
+ }
return NETDEV_TX_OK;
}
@@ -3276,9 +3485,6 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
goto out1;
}
- dev->net->hard_header_len += TX_OVERHEAD;
- dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-
/* Init all registers */
ret = lan78xx_reset(dev);
if (ret) {
@@ -3357,8 +3563,6 @@ static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
{
- int status;
-
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += skb->len;
@@ -3371,21 +3575,21 @@ static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
if (skb_defer_rx_timestamp(skb))
return;
- status = netif_rx(skb);
- if (status != NET_RX_SUCCESS)
- netif_dbg(dev, rx_err, dev->net,
- "netif_rx status %d\n", status);
+ napi_gro_receive(&dev->napi, skb);
}
-static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
+static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
+ int budget, int *work_done)
{
- if (skb->len < dev->net->hard_header_len)
+ if (skb->len < RX_SKB_MIN_LEN)
return 0;
+ /* Extract frames from the URB buffer and pass each one to
+ * the stack in a new NAPI SKB.
+ */
while (skb->len > 0) {
u32 rx_cmd_a, rx_cmd_b, align_count, size;
u16 rx_cmd_c;
- struct sk_buff *skb2;
unsigned char *packet;
rx_cmd_a = get_unaligned_le32(skb->data);
@@ -3407,41 +3611,36 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x", rx_cmd_a);
} else {
- /* last frame in this batch */
- if (skb->len == size) {
- lan78xx_rx_csum_offload(dev, skb,
- rx_cmd_a, rx_cmd_b);
- lan78xx_rx_vlan_offload(dev, skb,
- rx_cmd_a, rx_cmd_b);
-
- skb_trim(skb, skb->len - 4); /* remove fcs */
- skb->truesize = size + sizeof(struct sk_buff);
-
- return 1;
- }
+ u32 frame_len = size - ETH_FCS_LEN;
+ struct sk_buff *skb2;
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!skb2)) {
- netdev_warn(dev->net, "Error allocating skb");
+ skb2 = napi_alloc_skb(&dev->napi, frame_len);
+ if (!skb2)
return 0;
- }
- skb2->len = size;
- skb2->data = packet;
- skb_set_tail_pointer(skb2, size);
+ memcpy(skb2->data, packet, frame_len);
+
+ skb_put(skb2, frame_len);
lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
- skb_trim(skb2, skb2->len - 4); /* remove fcs */
- skb2->truesize = size + sizeof(struct sk_buff);
-
- lan78xx_skb_return(dev, skb2);
+ /* Processing of the URB buffer must complete once
+ * it has started. If the NAPI work budget is exhausted
+ * while frames remain they are added to the overflow
+ * queue for delivery in the next NAPI polling cycle.
+ */
+ if (*work_done < budget) {
+ lan78xx_skb_return(dev, skb2);
+ ++(*work_done);
+ } else {
+ skb_queue_tail(&dev->rxq_overflow, skb2);
+ }
}
skb_pull(skb, size);
- /* padding bytes before the next frame starts */
+ /* skip padding bytes before the next frame starts */
if (skb->len)
skb_pull(skb, align_count);
}
@@ -3449,85 +3648,13 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
return 1;
}
-static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
+static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
+ int budget, int *work_done)
{
- if (!lan78xx_rx(dev, skb)) {
+ if (!lan78xx_rx(dev, skb, budget, work_done)) {
+ netif_dbg(dev, rx_err, dev->net, "drop\n");
dev->net->stats.rx_errors++;
- goto done;
- }
-
- if (skb->len) {
- lan78xx_skb_return(dev, skb);
- return;
}
-
- netif_dbg(dev, rx_err, dev->net, "drop\n");
- dev->net->stats.rx_errors++;
-done:
- skb_queue_tail(&dev->done, skb);
-}
-
-static void rx_complete(struct urb *urb);
-
-static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
-{
- struct sk_buff *skb;
- struct skb_data *entry;
- unsigned long lockflags;
- size_t size = dev->rx_urb_size;
- int ret = 0;
-
- skb = netdev_alloc_skb_ip_align(dev->net, size);
- if (!skb) {
- usb_free_urb(urb);
- return -ENOMEM;
- }
-
- entry = (struct skb_data *)skb->cb;
- entry->urb = urb;
- entry->dev = dev;
- entry->length = 0;
-
- usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
- skb->data, size, rx_complete, skb);
-
- spin_lock_irqsave(&dev->rxq.lock, lockflags);
-
- if (netif_device_present(dev->net) &&
- netif_running(dev->net) &&
- !test_bit(EVENT_RX_HALT, &dev->flags) &&
- !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- switch (ret) {
- case 0:
- lan78xx_queue_skb(&dev->rxq, skb, rx_start);
- break;
- case -EPIPE:
- lan78xx_defer_kevent(dev, EVENT_RX_HALT);
- break;
- case -ENODEV:
- case -ENOENT:
- netif_dbg(dev, ifdown, dev->net, "device gone\n");
- netif_device_detach(dev->net);
- break;
- case -EHOSTUNREACH:
- ret = -ENOLINK;
- break;
- default:
- netif_dbg(dev, rx_err, dev->net,
- "rx submit, %d\n", ret);
- tasklet_schedule(&dev->bh);
- }
- } else {
- netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
- ret = -ENOLINK;
- }
- spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
- if (ret) {
- dev_kfree_skb_any(skb);
- usb_free_urb(urb);
- }
- return ret;
}
static void rx_complete(struct urb *urb)
@@ -3538,13 +3665,18 @@ static void rx_complete(struct urb *urb)
int urb_status = urb->status;
enum skb_state state;
+ netif_dbg(dev, rx_status, dev->net,
+ "rx done: status %d", urb->status);
+
skb_put(skb, urb->actual_length);
state = rx_done;
- entry->urb = NULL;
+
+ if (urb != entry->urb)
+ netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
switch (urb_status) {
case 0:
- if (skb->len < dev->net->hard_header_len) {
+ if (skb->len < RX_SKB_MIN_LEN) {
state = rx_cleanup;
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
@@ -3562,16 +3694,12 @@ static void rx_complete(struct urb *urb)
netif_dbg(dev, ifdown, dev->net,
"rx shutdown, code %d\n", urb_status);
state = rx_cleanup;
- entry->urb = urb;
- urb = NULL;
break;
case -EPROTO:
case -ETIME:
case -EILSEQ:
dev->net->stats.rx_errors++;
state = rx_cleanup;
- entry->urb = urb;
- urb = NULL;
break;
/* data overrun ... flush fifo? */
@@ -3587,203 +3715,327 @@ static void rx_complete(struct urb *urb)
}
state = defer_bh(dev, skb, &dev->rxq, state);
+}
- if (urb) {
- if (netif_running(dev->net) &&
- !test_bit(EVENT_RX_HALT, &dev->flags) &&
- state != unlink_start) {
- rx_submit(dev, urb, GFP_ATOMIC);
- return;
+static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
+{
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+ size_t size = dev->rx_urb_size;
+ struct urb *urb = entry->urb;
+ unsigned long lockflags;
+ int ret = 0;
+
+ usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
+ skb->data, size, rx_complete, skb);
+
+ spin_lock_irqsave(&dev->rxq.lock, lockflags);
+
+ if (netif_device_present(dev->net) &&
+ netif_running(dev->net) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags) &&
+ !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ ret = usb_submit_urb(urb, flags);
+ switch (ret) {
+ case 0:
+ lan78xx_queue_skb(&dev->rxq, skb, rx_start);
+ break;
+ case -EPIPE:
+ lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+ break;
+ case -ENODEV:
+ case -ENOENT:
+ netif_dbg(dev, ifdown, dev->net, "device gone\n");
+ netif_device_detach(dev->net);
+ break;
+ case -EHOSTUNREACH:
+ ret = -ENOLINK;
+ napi_schedule(&dev->napi);
+ break;
+ default:
+ netif_dbg(dev, rx_err, dev->net,
+ "rx submit, %d\n", ret);
+ napi_schedule(&dev->napi);
+ break;
}
- usb_free_urb(urb);
+ } else {
+ netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+ ret = -ENOLINK;
}
- netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
+ spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
+
+ if (ret)
+ lan78xx_release_rx_buf(dev, skb);
+
+ return ret;
}
-static void lan78xx_tx_bh(struct lan78xx_net *dev)
+static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
{
- int length;
- struct urb *urb = NULL;
- struct skb_data *entry;
- unsigned long flags;
- struct sk_buff_head *tqp = &dev->txq_pend;
- struct sk_buff *skb, *skb2;
- int ret;
- int count, pos;
- int skb_totallen, pkt_cnt;
-
- skb_totallen = 0;
- pkt_cnt = 0;
- count = 0;
- length = 0;
- spin_lock_irqsave(&tqp->lock, flags);
- skb_queue_walk(tqp, skb) {
- if (skb_is_gso(skb)) {
- if (!skb_queue_is_first(tqp, skb)) {
- /* handle previous packets first */
- break;
- }
- count = 1;
- length = skb->len - TX_OVERHEAD;
- __skb_unlink(skb, tqp);
- spin_unlock_irqrestore(&tqp->lock, flags);
- goto gso_skb;
- }
+ struct sk_buff *rx_buf;
- if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
+ /* Ensure the maximum number of Rx URBs is submitted
+ */
+ while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
+ if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
break;
- skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
- pkt_cnt++;
- }
- spin_unlock_irqrestore(&tqp->lock, flags);
-
- /* copy to a single skb */
- skb = alloc_skb(skb_totallen, GFP_ATOMIC);
- if (!skb)
- goto drop;
-
- skb_put(skb, skb_totallen);
-
- for (count = pos = 0; count < pkt_cnt; count++) {
- skb2 = skb_dequeue(tqp);
- if (skb2) {
- length += (skb2->len - TX_OVERHEAD);
- memcpy(skb->data + pos, skb2->data, skb2->len);
- pos += roundup(skb2->len, sizeof(u32));
- dev_kfree_skb(skb2);
- }
}
+}
-gso_skb:
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- goto drop;
+static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
+ struct sk_buff *rx_buf)
+{
+ /* reset SKB data pointers */
- entry = (struct skb_data *)skb->cb;
- entry->urb = urb;
- entry->dev = dev;
- entry->length = length;
- entry->num_of_packet = count;
+ rx_buf->data = rx_buf->head;
+ skb_reset_tail_pointer(rx_buf);
+ rx_buf->len = 0;
+ rx_buf->data_len = 0;
- spin_lock_irqsave(&dev->txq.lock, flags);
- ret = usb_autopm_get_interface_async(dev->intf);
- if (ret < 0) {
- spin_unlock_irqrestore(&dev->txq.lock, flags);
- goto drop;
- }
+ rx_submit(dev, rx_buf, GFP_ATOMIC);
+}
- usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
- skb->data, skb->len, tx_complete, skb);
+static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
+{
+ u32 tx_cmd_a;
+ u32 tx_cmd_b;
- if (length % dev->maxpacket == 0) {
- /* send USB_ZERO_PACKET */
- urb->transfer_flags |= URB_ZERO_PACKET;
- }
+ tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
-#ifdef CONFIG_PM
- /* if this triggers the device is still a sleep */
- if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
- /* transmission will be done in resume */
- usb_anchor_urb(urb, &dev->deferred);
- /* no use to process more packets */
- netif_stop_queue(dev->net);
- usb_put_urb(urb);
- spin_unlock_irqrestore(&dev->txq.lock, flags);
- netdev_dbg(dev->net, "Delaying transmission for resumption\n");
- return;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
+
+ tx_cmd_b = 0;
+ if (skb_is_gso(skb)) {
+ u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
+
+ tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
+
+ tx_cmd_a |= TX_CMD_A_LSO_;
}
-#endif
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- switch (ret) {
- case 0:
- netif_trans_update(dev->net);
- lan78xx_queue_skb(&dev->txq, skb, tx_start);
- if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
- netif_stop_queue(dev->net);
- break;
- case -EPIPE:
- netif_stop_queue(dev->net);
- lan78xx_defer_kevent(dev, EVENT_TX_HALT);
- usb_autopm_put_interface_async(dev->intf);
- break;
- case -ENODEV:
- case -ENOENT:
- netif_dbg(dev, tx_err, dev->net,
- "tx: submit urb err %d (disconnected?)", ret);
- netif_device_detach(dev->net);
- break;
- default:
- usb_autopm_put_interface_async(dev->intf);
- netif_dbg(dev, tx_err, dev->net,
- "tx: submit urb err %d\n", ret);
- break;
+ if (skb_vlan_tag_present(skb)) {
+ tx_cmd_a |= TX_CMD_A_IVTG_;
+ tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
}
- spin_unlock_irqrestore(&dev->txq.lock, flags);
+ put_unaligned_le32(tx_cmd_a, buffer);
+ put_unaligned_le32(tx_cmd_b, buffer + 4);
+}
- if (ret) {
- netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
-drop:
- dev->net->stats.tx_dropped++;
- if (skb)
+static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
+ struct sk_buff *tx_buf)
+{
+ struct skb_data *entry = (struct skb_data *)tx_buf->cb;
+ int remain = dev->tx_urb_size;
+ u8 *tx_data = tx_buf->data;
+ u32 urb_len = 0;
+
+ entry->num_of_packet = 0;
+ entry->length = 0;
+
+ /* Work through the pending SKBs and copy the data of each SKB into
+ * the URB buffer if there room for all the SKB data.
+ *
+ * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
+ */
+ while (remain >= TX_SKB_MIN_LEN) {
+ unsigned int pending_bytes;
+ unsigned int align_bytes;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
+
+ if (!skb)
+ break;
+
+ align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
+ TX_ALIGNMENT;
+ len = align_bytes + TX_CMD_LEN + skb->len;
+ if (len > remain) {
+ lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
+ break;
+ }
+
+ tx_data += align_bytes;
+
+ lan78xx_fill_tx_cmd_words(skb, tx_data);
+ tx_data += TX_CMD_LEN;
+
+ len = skb->len;
+ if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
+ struct net_device_stats *stats = &dev->net->stats;
+
+ stats->tx_dropped++;
dev_kfree_skb_any(skb);
- usb_free_urb(urb);
- } else {
- netif_dbg(dev, tx_queued, dev->net,
- "> tx, len %d, type 0x%x\n", length, skb->protocol);
+ tx_data -= TX_CMD_LEN;
+ continue;
+ }
+
+ tx_data += len;
+ entry->length += len;
+ entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
+
+ dev_kfree_skb_any(skb);
+
+ urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
+
+ remain = dev->tx_urb_size - urb_len;
}
+
+ skb_put(tx_buf, urb_len);
+
+ return entry;
}
-static void lan78xx_rx_bh(struct lan78xx_net *dev)
+static void lan78xx_tx_bh(struct lan78xx_net *dev)
{
- struct urb *urb;
- int i;
+ int ret;
- if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
- for (i = 0; i < 10; i++) {
- if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
- break;
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (urb)
- if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
- return;
+ /* Start the stack Tx queue if it was stopped
+ */
+ netif_tx_lock(dev->net);
+ if (netif_queue_stopped(dev->net)) {
+ if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
+ netif_wake_queue(dev->net);
+ }
+ netif_tx_unlock(dev->net);
+
+ /* Go through the Tx pending queue and set up URBs to transfer
+ * the data to the device. Stop if no more pending data or URBs,
+ * or if an error occurs when a URB is submitted.
+ */
+ do {
+ struct skb_data *entry;
+ struct sk_buff *tx_buf;
+ unsigned long flags;
+
+ if (skb_queue_empty(&dev->txq_pend))
+ break;
+
+ tx_buf = lan78xx_get_tx_buf(dev);
+ if (!tx_buf)
+ break;
+
+ entry = lan78xx_tx_buf_fill(dev, tx_buf);
+
+ spin_lock_irqsave(&dev->txq.lock, flags);
+ ret = usb_autopm_get_interface_async(dev->intf);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ goto out;
}
- if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
- tasklet_schedule(&dev->bh);
- }
- if (skb_queue_len(&dev->txq) < dev->tx_qlen)
- netif_wake_queue(dev->net);
+ usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
+ tx_buf->data, tx_buf->len, tx_complete,
+ tx_buf);
+
+ if (tx_buf->len % dev->maxpacket == 0) {
+ /* send USB_ZERO_PACKET */
+ entry->urb->transfer_flags |= URB_ZERO_PACKET;
+ }
+
+#ifdef CONFIG_PM
+ /* if device is asleep stop outgoing packet processing */
+ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ usb_anchor_urb(entry->urb, &dev->deferred);
+ netif_stop_queue(dev->net);
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ netdev_dbg(dev->net,
+ "Delaying transmission for resumption\n");
+ return;
+ }
+#endif
+ ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
+ switch (ret) {
+ case 0:
+ netif_trans_update(dev->net);
+ lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
+ break;
+ case -EPIPE:
+ netif_stop_queue(dev->net);
+ lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+ usb_autopm_put_interface_async(dev->intf);
+ break;
+ case -ENODEV:
+ case -ENOENT:
+ netif_dbg(dev, tx_err, dev->net,
+ "tx submit urb err %d (disconnected?)", ret);
+ netif_device_detach(dev->net);
+ break;
+ default:
+ usb_autopm_put_interface_async(dev->intf);
+ netif_dbg(dev, tx_err, dev->net,
+ "tx submit urb err %d\n", ret);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+
+ if (ret) {
+ netdev_warn(dev->net, "failed to tx urb %d\n", ret);
+out:
+ dev->net->stats.tx_dropped += entry->num_of_packet;
+ lan78xx_release_tx_buf(dev, tx_buf);
+ }
+ } while (ret == 0);
}
-static void lan78xx_bh(struct tasklet_struct *t)
+static int lan78xx_bh(struct lan78xx_net *dev, int budget)
{
- struct lan78xx_net *dev = from_tasklet(dev, t, bh);
- struct sk_buff *skb;
+ struct sk_buff_head done;
+ struct sk_buff *rx_buf;
struct skb_data *entry;
+ unsigned long flags;
+ int work_done = 0;
+
+ /* Pass frames received in the last NAPI cycle before
+ * working on newly completed URBs.
+ */
+ while (!skb_queue_empty(&dev->rxq_overflow)) {
+ lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
+ ++work_done;
+ }
+
+ /* Take a snapshot of the done queue and move items to a
+ * temporary queue. Rx URB completions will continue to add
+ * to the done queue.
+ */
+ __skb_queue_head_init(&done);
- while ((skb = skb_dequeue(&dev->done))) {
- entry = (struct skb_data *)(skb->cb);
+ spin_lock_irqsave(&dev->rxq_done.lock, flags);
+ skb_queue_splice_init(&dev->rxq_done, &done);
+ spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
+
+ /* Extract receive frames from completed URBs and
+ * pass them to the stack. Re-submit each completed URB.
+ */
+ while ((work_done < budget) &&
+ (rx_buf = __skb_dequeue(&done))) {
+ entry = (struct skb_data *)(rx_buf->cb);
switch (entry->state) {
case rx_done:
- entry->state = rx_cleanup;
- rx_process(dev, skb);
- continue;
- case tx_done:
- usb_free_urb(entry->urb);
- dev_kfree_skb(skb);
- continue;
+ rx_process(dev, rx_buf, budget, &work_done);
+ break;
case rx_cleanup:
- usb_free_urb(entry->urb);
- dev_kfree_skb(skb);
- continue;
+ break;
default:
- netdev_dbg(dev->net, "skb state %d\n", entry->state);
- return;
+ netdev_dbg(dev->net, "rx buf state %d\n",
+ entry->state);
+ break;
}
+
+ lan78xx_rx_urb_resubmit(dev, rx_buf);
}
+ /* If budget was consumed before processing all the URBs put them
+ * back on the front of the done queue. They will be first to be
+ * processed in the next NAPI cycle.
+ */
+ spin_lock_irqsave(&dev->rxq_done.lock, flags);
+ skb_queue_splice(&done, &dev->rxq_done);
+ spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
+
if (netif_device_present(dev->net) && netif_running(dev->net)) {
/* reset update timer delta */
if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
@@ -3792,12 +4044,61 @@ static void lan78xx_bh(struct tasklet_struct *t)
jiffies + STAT_UPDATE_TIMER);
}
- if (!skb_queue_empty(&dev->txq_pend))
- lan78xx_tx_bh(dev);
+ /* Submit all free Rx URBs */
if (!test_bit(EVENT_RX_HALT, &dev->flags))
- lan78xx_rx_bh(dev);
+ lan78xx_rx_urb_submit_all(dev);
+
+ /* Submit new Tx URBs */
+
+ lan78xx_tx_bh(dev);
}
+
+ return work_done;
+}
+
+static int lan78xx_poll(struct napi_struct *napi, int budget)
+{
+ struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
+ int result = budget;
+ int work_done;
+
+ /* Don't do any work if the device is suspended */
+
+ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ /* Process completed URBs and submit new URBs */
+
+ work_done = lan78xx_bh(dev, budget);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+
+ /* Start a new polling cycle if data was received or
+ * data is waiting to be transmitted.
+ */
+ if (!skb_queue_empty(&dev->rxq_done)) {
+ napi_schedule(napi);
+ } else if (netif_carrier_ok(dev->net)) {
+ if (skb_queue_empty(&dev->txq) &&
+ !skb_queue_empty(&dev->txq_pend)) {
+ napi_schedule(napi);
+ } else {
+ netif_tx_lock(dev->net);
+ if (netif_queue_stopped(dev->net)) {
+ netif_wake_queue(dev->net);
+ napi_schedule(napi);
+ }
+ netif_tx_unlock(dev->net);
+ }
+ }
+ result = work_done;
+ }
+
+ return result;
}
static void lan78xx_delayedwork(struct work_struct *work)
@@ -3843,7 +4144,7 @@ static void lan78xx_delayedwork(struct work_struct *work)
status);
} else {
clear_bit(EVENT_RX_HALT, &dev->flags);
- tasklet_schedule(&dev->bh);
+ napi_schedule(&dev->napi);
}
}
@@ -3937,6 +4238,8 @@ static void lan78xx_disconnect(struct usb_interface *intf)
set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
+ netif_napi_del(&dev->napi);
+
udev = interface_to_usbdev(intf);
net = dev->net;
@@ -3961,6 +4264,9 @@ static void lan78xx_disconnect(struct usb_interface *intf)
lan78xx_unbind(dev, intf);
+ lan78xx_free_tx_resources(dev);
+ lan78xx_free_rx_resources(dev);
+
usb_kill_urb(dev->urb_intr);
usb_free_urb(dev->urb_intr);
@@ -3973,14 +4279,16 @@ static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
struct lan78xx_net *dev = netdev_priv(net);
unlink_urbs(dev, &dev->txq);
- tasklet_schedule(&dev->bh);
+ napi_schedule(&dev->napi);
}
static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
- if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+ struct lan78xx_net *dev = netdev_priv(netdev);
+
+ if (skb->len > LAN78XX_TSO_SIZE(dev))
features &= ~NETIF_F_GSO_MASK;
features = vlan_features_check(skb, features);
@@ -4046,12 +4354,31 @@ static int lan78xx_probe(struct usb_interface *intf,
skb_queue_head_init(&dev->rxq);
skb_queue_head_init(&dev->txq);
- skb_queue_head_init(&dev->done);
+ skb_queue_head_init(&dev->rxq_done);
skb_queue_head_init(&dev->txq_pend);
+ skb_queue_head_init(&dev->rxq_overflow);
mutex_init(&dev->phy_mutex);
mutex_init(&dev->dev_mutex);
- tasklet_setup(&dev->bh, lan78xx_bh);
+ ret = lan78xx_urb_config_init(dev);
+ if (ret < 0)
+ goto out2;
+
+ ret = lan78xx_alloc_tx_resources(dev);
+ if (ret < 0)
+ goto out2;
+
+ ret = lan78xx_alloc_rx_resources(dev);
+ if (ret < 0)
+ goto out3;
+
+ /* MTU range: 68 - 9000 */
+ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+
+ netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
+
+ netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT);
+
INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
init_usb_anchor(&dev->deferred);
@@ -4066,27 +4393,27 @@ static int lan78xx_probe(struct usb_interface *intf,
if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
ret = -ENODEV;
- goto out2;
+ goto out4;
}
dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
ret = -ENODEV;
- goto out2;
+ goto out4;
}
dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
ret = -ENODEV;
- goto out2;
+ goto out4;
}
ep_intr = &intf->cur_altsetting->endpoint[2];
if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
ret = -ENODEV;
- goto out2;
+ goto out4;
}
dev->pipe_intr = usb_rcvintpipe(dev->udev,
@@ -4094,30 +4421,25 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = lan78xx_bind(dev, intf);
if (ret < 0)
- goto out2;
-
- if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
- netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
-
- /* MTU range: 68 - 9000 */
- netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
- netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
+ goto out4;
period = ep_intr->desc.bInterval;
maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
buf = kmalloc(maxp, GFP_KERNEL);
- if (buf) {
- dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->urb_intr) {
- ret = -ENOMEM;
- kfree(buf);
- goto out3;
- } else {
- usb_fill_int_urb(dev->urb_intr, dev->udev,
- dev->pipe_intr, buf, maxp,
- intr_complete, dev, period);
- dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
- }
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out5;
+ }
+
+ dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->urb_intr) {
+ ret = -ENOMEM;
+ goto out6;
+ } else {
+ usb_fill_int_urb(dev->urb_intr, dev->udev,
+ dev->pipe_intr, buf, maxp,
+ intr_complete, dev, period);
+ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
@@ -4125,7 +4447,7 @@ static int lan78xx_probe(struct usb_interface *intf,
/* Reject broken descriptors. */
if (dev->maxpacket == 0) {
ret = -ENODEV;
- goto out4;
+ goto out6;
}
/* driver requires remote-wakeup capability during autosuspend. */
@@ -4133,12 +4455,12 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = lan78xx_phy_init(dev);
if (ret < 0)
- goto out4;
+ goto out7;
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out5;
+ goto out8;
}
usb_set_intfdata(intf, dev);
@@ -4153,12 +4475,19 @@ static int lan78xx_probe(struct usb_interface *intf,
return 0;
-out5:
+out8:
phy_disconnect(netdev->phydev);
-out4:
+out7:
usb_free_urb(dev->urb_intr);
-out3:
+out6:
+ kfree(buf);
+out5:
lan78xx_unbind(dev, intf);
+out4:
+ netif_napi_del(&dev->napi);
+ lan78xx_free_rx_resources(dev);
+out3:
+ lan78xx_free_tx_resources(dev);
out2:
free_netdev(netdev);
out1:
@@ -4579,8 +4908,7 @@ static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
if (!netif_device_present(dev->net) ||
!netif_carrier_ok(dev->net) ||
pipe_halted) {
- usb_free_urb(urb);
- dev_kfree_skb(skb);
+ lan78xx_release_tx_buf(dev, skb);
continue;
}
@@ -4590,15 +4918,14 @@ static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
netif_trans_update(dev->net);
lan78xx_queue_skb(&dev->txq, skb, tx_start);
} else {
- usb_free_urb(urb);
- dev_kfree_skb(skb);
-
if (ret == -EPIPE) {
netif_stop_queue(dev->net);
pipe_halted = true;
} else if (ret == -ENODEV) {
netif_device_detach(dev->net);
}
+
+ lan78xx_release_tx_buf(dev, skb);
}
}
@@ -4630,8 +4957,7 @@ static int lan78xx_resume(struct usb_interface *intf)
if (ret < 0) {
if (ret == -ENODEV)
netif_device_detach(dev->net);
-
- netdev_warn(dev->net, "Failed to submit intr URB");
+ netdev_warn(dev->net, "Failed to submit intr URB");
}
}
@@ -4650,14 +4976,14 @@ static int lan78xx_resume(struct usb_interface *intf)
if (!pipe_halted &&
netif_device_present(dev->net) &&
- (skb_queue_len(&dev->txq) < dev->tx_qlen))
+ (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
netif_start_queue(dev->net);
ret = lan78xx_start_tx_path(dev);
if (ret < 0)
goto out;
- tasklet_schedule(&dev->bh);
+ napi_schedule(&dev->napi);
if (!timer_pending(&dev->stat_monitor)) {
dev->delta = 1;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f9877a3e83ac..b88f07c77a6b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -8983,7 +8983,9 @@ static int rtl8152_set_tunable(struct net_device *netdev,
}
static void rtl8152_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -8992,7 +8994,9 @@ static void rtl8152_get_ringparam(struct net_device *netdev,
}
static int rtl8152_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct r8152 *tp = netdev_priv(netdev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 50eb43e5bf45..38f6da24f460 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -134,29 +134,22 @@ static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
- char *p = (char *)buf;
+ u8 *p = buf;
int i, j;
switch(stringset) {
case ETH_SS_STATS:
memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
p += sizeof(ethtool_stats_keys);
- for (i = 0; i < dev->real_num_rx_queues; i++) {
- for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
- snprintf(p, ETH_GSTRING_LEN,
- "rx_queue_%u_%.18s",
- i, veth_rq_stats_desc[j].desc);
- p += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < dev->real_num_tx_queues; i++) {
- for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
- snprintf(p, ETH_GSTRING_LEN,
- "tx_queue_%u_%.18s",
- i, veth_tq_stats_desc[j].desc);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ for (j = 0; j < VETH_RQ_STATS_LEN; j++)
+ ethtool_sprintf(&p, "rx_queue_%u_%.18s",
+ i, veth_rq_stats_desc[j].desc);
+
+ for (i = 0; i < dev->real_num_tx_queues; i++)
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++)
+ ethtool_sprintf(&p, "tx_queue_%u_%.18s",
+ i, veth_tq_stats_desc[j].desc);
break;
}
}
@@ -1689,8 +1682,8 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (ifmp && (dev->ifindex != 0))
peer->ifindex = ifmp->ifi_index;
- peer->gso_max_size = dev->gso_max_size;
- peer->gso_max_segs = dev->gso_max_segs;
+ netif_set_gso_max_size(peer, dev->gso_max_size);
+ netif_set_gso_max_segs(peer, dev->gso_max_segs);
err = register_netdevice(peer);
put_net(net);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1771d6e5224f..c74af526d79b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2174,7 +2174,9 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
}
static void virtnet_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2694,7 +2696,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
txqueue, sq->name, sq->vq->index, sq->vq->name,
- jiffies_to_usecs(jiffies - txq->trans_start));
+ jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
}
static const struct net_device_ops virtnet_netdev = {
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 16f3a2057b90..3172d46c0335 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -575,10 +575,11 @@ vmxnet3_get_link_ksettings(struct net_device *netdev,
return 0;
}
-
static void
vmxnet3_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -595,10 +596,11 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_jumbo_pending = adapter->rx_ring2_size;
}
-
static int
vmxnet3_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 141635a35c28..fecff0a46612 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -17,6 +17,7 @@
#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
+#include <net/gro.h>
#include <net/ipv6_stubs.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -3233,7 +3234,6 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_fdb_dump = vxlan_fdb_dump,
.ndo_fdb_get = vxlan_fdb_get,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
- .ndo_change_proto_down = dev_change_proto_down_generic,
};
static const struct net_device_ops vxlan_netdev_raw_ops = {
@@ -3304,7 +3304,7 @@ static void vxlan_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
netif_keep_dst(dev);
- dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_QUEUE | IFF_CHANGE_PROTO_DOWN;
/* MTU range: 68 - 65535 */
dev->min_mtu = ETH_MIN_MTU;
@@ -3810,8 +3810,8 @@ static void vxlan_config_apply(struct net_device *dev,
if (lowerdev) {
dst->remote_ifindex = conf->remote_ifindex;
- dev->gso_max_size = lowerdev->gso_max_size;
- dev->gso_max_segs = lowerdev->gso_max_segs;
+ netif_set_gso_max_size(dev, lowerdev->gso_max_size);
+ netif_set_gso_max_segs(dev, lowerdev->gso_max_segs);
needed_headroom = lowerdev->hard_header_len;
needed_headroom += lowerdev->needed_headroom;
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index cda1b4ce6b21..5ae2d27b5da9 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -306,9 +306,8 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
else
bd_status = R_E_S | R_I_S | R_W_S;
- iowrite16be(bd_status, &priv->rx_bd_base[i].status);
- iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
- &priv->rx_bd_base[i].buf);
+ priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
+ priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
}
for (i = 0; i < TX_BD_RING_LEN; i++) {
@@ -317,10 +316,10 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
else
bd_status = T_I_S | T_TC_S | T_W_S;
- iowrite16be(bd_status, &priv->tx_bd_base[i].status);
- iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
- &priv->tx_bd_base[i].buf);
+ priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
+ priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
}
+ dma_wmb();
return 0;
@@ -352,10 +351,10 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
- struct qe_bd __iomem *bd;
+ struct qe_bd *bd;
u16 bd_status;
unsigned long flags;
- u16 *proto_head;
+ __be16 *proto_head;
switch (dev->type) {
case ARPHRD_RAWHDLC:
@@ -368,14 +367,14 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
skb_push(skb, HDLC_HEAD_LEN);
- proto_head = (u16 *)skb->data;
+ proto_head = (__be16 *)skb->data;
*proto_head = htons(DEFAULT_HDLC_HEAD);
dev->stats.tx_bytes += skb->len;
break;
case ARPHRD_PPP:
- proto_head = (u16 *)skb->data;
+ proto_head = (__be16 *)skb->data;
if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
@@ -398,9 +397,10 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&priv->lock, flags);
+ dma_rmb();
/* Start from the next BD that should be filled */
bd = priv->curtx_bd;
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
/* Save the skb pointer so we can free it later */
priv->tx_skbuff[priv->skb_curtx] = skb;
@@ -415,8 +415,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
/* set bd status and length */
bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
- iowrite16be(skb->len, &bd->length);
- iowrite16be(bd_status, &bd->status);
+ bd->length = cpu_to_be16(skb->len);
+ bd->status = cpu_to_be16(bd_status);
/* Move to next BD in the ring */
if (!(bd_status & T_W_S))
@@ -458,8 +458,9 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
u16 bd_status;
int tx_restart = 0;
+ dma_rmb();
bd = priv->dirty_tx;
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
/* Normal processing. */
while ((bd_status & T_R_S) == 0) {
@@ -503,7 +504,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
bd += 1;
else
bd = priv->tx_bd_base;
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
}
priv->dirty_tx = bd;
@@ -524,8 +525,9 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
u16 length, howmany = 0;
u8 *bdbuffer;
+ dma_rmb();
bd = priv->currx_bd;
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
/* while there are received buffers and BD is full (~R_E) */
while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
@@ -549,7 +551,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
}
bdbuffer = priv->rx_buffer +
(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
- length = ioread16be(&bd->length);
+ length = be16_to_cpu(bd->length);
switch (dev->type) {
case ARPHRD_RAWHDLC:
@@ -593,7 +595,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
netif_receive_skb(skb);
recycle:
- iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
+ bd->status = cpu_to_be16((bd_status & R_W_S) | R_E_S | R_I_S);
/* update to point at the next bd */
if (bd_status & R_W_S) {
@@ -608,8 +610,9 @@ recycle:
bd += 1;
}
- bd_status = ioread16be(&bd->status);
+ bd_status = be16_to_cpu(bd->status);
}
+ dma_rmb();
priv->currx_bd = bd;
return howmany;
@@ -721,7 +724,7 @@ static int uhdlc_open(struct net_device *dev)
/* Enable the TDM port */
if (priv->tsa)
- utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+ qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
priv->hdlc_busy = 1;
netif_device_attach(priv->ndev);
@@ -812,7 +815,7 @@ static int uhdlc_close(struct net_device *dev)
(u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
if (priv->tsa)
- utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
+ qe_clrbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
@@ -848,7 +851,7 @@ static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
#ifdef CONFIG_PM
static void store_clk_config(struct ucc_hdlc_private *priv)
{
- struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+ struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
/* store si clk */
priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
@@ -863,7 +866,7 @@ static void store_clk_config(struct ucc_hdlc_private *priv)
static void resume_clk_config(struct ucc_hdlc_private *priv)
{
- struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+ struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
@@ -990,9 +993,8 @@ static int uhdlc_resume(struct device *dev)
else
bd_status = R_E_S | R_I_S | R_W_S;
- iowrite16be(bd_status, &priv->rx_bd_base[i].status);
- iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
- &priv->rx_bd_base[i].buf);
+ priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
+ priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
}
for (i = 0; i < TX_BD_RING_LEN; i++) {
@@ -1001,10 +1003,10 @@ static int uhdlc_resume(struct device *dev)
else
bd_status = T_I_S | T_TC_S | T_W_S;
- iowrite16be(bd_status, &priv->tx_bd_base[i].status);
- iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
- &priv->tx_bd_base[i].buf);
+ priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
+ priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
}
+ dma_wmb();
/* if hdlc is busy enable TX and RX */
if (priv->hdlc_busy == 1) {
@@ -1018,7 +1020,7 @@ static int uhdlc_resume(struct device *dev)
/* Enable the TDM port */
if (priv->tsa)
- utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+ qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
}
napi_enable(&priv->napi);
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 4ef2944a68bc..52da5e963003 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -79,9 +79,7 @@ static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
u8 sw_hash = skb->sw_hash;
u32 hash = skb->hash;
skb_scrub_packet(skb, true);
- memset(&skb->headers_start, 0,
- offsetof(struct sk_buff, headers_end) -
- offsetof(struct sk_buff, headers_start));
+ memset(&skb->headers, 0, sizeof(skb->headers));
if (encapsulating) {
skb->l4_hash = l4_hash;
skb->sw_hash = sw_hash;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 23037bfc9e4c..5727c7c00a28 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -2303,7 +2303,7 @@ static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
ssid);
}
-static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
+static int ipw_send_adapter_address(struct ipw_priv *priv, const u8 * mac)
{
if (!priv || !mac) {
IPW_ERROR("Invalid args\n");
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index f006a3d72b40..88c72d1827a0 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -332,7 +332,7 @@ void mwifiex_set_trans_start(struct net_device *dev)
int i;
for (i = 0; i < dev->num_tx_queues; i++)
- netdev_get_tx_queue(dev, i)->trans_start = jiffies;
+ txq_trans_cond_update(netdev_get_tx_queue(dev, i));
netif_trans_update(dev);
}
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 690572e01a2a..4712cd7dff9f 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -574,6 +574,7 @@ static int wilc_mac_open(struct net_device *ndev)
struct wilc *wl = vif->wilc;
int ret = 0;
struct mgmt_frame_regs mgmt_regs = {};
+ u8 addr[ETH_ALEN] __aligned(2);
if (!wl || !wl->dev) {
netdev_err(ndev, "device not ready\n");
@@ -596,10 +597,9 @@ static int wilc_mac_open(struct net_device *ndev)
vif->idx);
if (is_valid_ether_addr(ndev->dev_addr)) {
- wilc_set_mac_address(vif, ndev->dev_addr);
+ ether_addr_copy(addr, ndev->dev_addr);
+ wilc_set_mac_address(vif, addr);
} else {
- u8 addr[ETH_ALEN];
-
wilc_get_mac_address(vif, addr);
eth_hw_addr_set(ndev, addr);
}
diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile
index b838034bb120..5c2528beca2a 100644
--- a/drivers/net/wwan/iosm/Makefile
+++ b/drivers/net/wwan/iosm/Makefile
@@ -21,6 +21,7 @@ iosm-y = \
iosm_ipc_mux_codec.o \
iosm_ipc_devlink.o \
iosm_ipc_flash.o \
- iosm_ipc_coredump.o
+ iosm_ipc_coredump.o \
+ iosm_ipc_trace.o
obj-$(CONFIG_IOSM) := iosm.o
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index cff3b43ca4d7..1be07114c85d 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -10,6 +10,7 @@
#include "iosm_ipc_flash.h"
#include "iosm_ipc_imem.h"
#include "iosm_ipc_port.h"
+#include "iosm_ipc_trace.h"
/* Check the wwan ips if it is valid with Channel as input. */
static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
@@ -265,9 +266,14 @@ static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
switch (pipe->channel->ctype) {
case IPC_CTYPE_CTRL:
port_id = pipe->channel->channel_id;
+ ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
+ IPC_CB(skb)->mapping,
+ IPC_CB(skb)->direction);
if (port_id == IPC_MEM_CTRL_CHL_ID_7)
ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
skb);
+ else if (port_id == ipc_imem->trace->chl_id)
+ ipc_trace_port_rx(ipc_imem->trace, skb);
else
wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
skb);
@@ -548,6 +554,12 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
ctrl_chl_idx++;
}
+ ipc_imem->trace = ipc_imem_trace_channel_init(ipc_imem);
+ if (!ipc_imem->trace) {
+ dev_err(ipc_imem->dev, "trace channel init failed");
+ return;
+ }
+
ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
false);
@@ -1163,6 +1175,7 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
ipc_mux_deinit(ipc_imem->mux);
+ ipc_trace_deinit(ipc_imem->trace);
ipc_wwan_deinit(ipc_imem->wwan);
ipc_port_deinit(ipc_imem->ipc_port);
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 6be6708b4eec..cec38009c44a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -304,6 +304,7 @@ enum ipc_phase {
* @sio: IPC SIO data structure pointer
* @ipc_port: IPC PORT data structure pointer
* @pcie: IPC PCIe
+ * @trace: IPC trace data structure pointer
* @dev: Pointer to device structure
* @ipc_requested_state: Expected IPC state on CP.
* @channels: Channel list with UL/DL pipe pairs.
@@ -349,6 +350,7 @@ struct iosm_imem {
struct iosm_mux *mux;
struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS];
struct iosm_pcie *pcie;
+ struct iosm_trace *trace;
struct device *dev;
enum ipc_mem_device_ipc_state ipc_requested_state;
struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS];
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 825e8e5ffb2a..43f1796a8984 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -11,6 +11,7 @@
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_port.h"
#include "iosm_ipc_task_queue.h"
+#include "iosm_ipc_trace.h"
/* Open a packet data online channel between the network layer and CP. */
int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
@@ -107,6 +108,23 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
"failed to register the ipc_wwan interfaces");
}
+/**
+ * ipc_imem_trace_channel_init - Initializes trace channel.
+ * @ipc_imem: Pointer to iosm_imem struct.
+ *
+ * Returns: Pointer to trace instance on success else NULL
+ */
+struct iosm_trace *ipc_imem_trace_channel_init(struct iosm_imem *ipc_imem)
+{
+ struct ipc_chnl_cfg chnl_cfg = { 0 };
+
+ ipc_chnl_cfg_get(&chnl_cfg, IPC_MEM_CTRL_CHL_ID_3);
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg,
+ IRQ_MOD_OFF);
+
+ return ipc_trace_init(ipc_imem);
+}
+
/* Map SKB to DMA for transfer */
static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
struct sk_buff *skb)
@@ -182,11 +200,14 @@ channel_unavailable:
return false;
}
-/* Release a sio link to CP. */
-void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev)
+/**
+ * ipc_imem_sys_port_close - Release a sio link to CP.
+ * @ipc_imem: Imem instance.
+ * @channel: Channel instance.
+ */
+void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel)
{
- struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
- struct ipc_mem_channel *channel = ipc_cdev->channel;
enum ipc_phase curr_phase;
int status = 0;
u32 tail = 0;
@@ -643,6 +664,6 @@ int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
memcpy(data, skb->data, skb->len);
devlink_read_fail:
- ipc_pcie_kfree_skb(devlink->pcie, skb);
+ dev_kfree_skb(skb);
return rc;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
index f0c88ac5643c..e36ee2782629 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -43,12 +43,8 @@
*/
struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
int chl_id, int hp_id);
-
-/**
- * ipc_imem_sys_cdev_close - Release a sio link to CP.
- * @ipc_cdev: iosm sio instance.
- */
-void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev);
+void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel);
/**
* ipc_imem_sys_cdev_write - Route the uplink buffer to CP.
@@ -145,4 +141,5 @@ int ipc_imem_sys_devlink_read(struct iosm_devlink *ipc_devlink, u8 *data,
*/
int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
unsigned char *buf, int count);
+struct iosm_trace *ipc_imem_trace_channel_init(struct iosm_imem *ipc_imem);
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c
index beb944847398..b6d81c627277 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_port.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
@@ -27,7 +27,7 @@ static void ipc_port_ctrl_stop(struct wwan_port *port)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
- ipc_imem_sys_cdev_close(ipc_port);
+ ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel);
}
/* transfer control data to modem */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c
new file mode 100644
index 000000000000..c5fa12599c2b
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include <linux/wwan.h>
+#include "iosm_ipc_trace.h"
+
+/* sub buffer size and number of sub buffer */
+#define IOSM_TRC_SUB_BUFF_SIZE 131072
+#define IOSM_TRC_N_SUB_BUFF 32
+
+#define IOSM_TRC_FILE_PERM 0600
+
+#define IOSM_TRC_DEBUGFS_TRACE "trace"
+#define IOSM_TRC_DEBUGFS_TRACE_CTRL "trace_ctrl"
+
+/**
+ * ipc_trace_port_rx - Receive trace packet from cp and write to relay buffer
+ * @ipc_trace: Pointer to the ipc trace data-struct
+ * @skb: Pointer to struct sk_buff
+ */
+void ipc_trace_port_rx(struct iosm_trace *ipc_trace, struct sk_buff *skb)
+{
+ if (ipc_trace->ipc_rchan)
+ relay_write(ipc_trace->ipc_rchan, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
+}
+
+/* Creates relay file in debugfs. */
+static struct dentry *
+ipc_trace_create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ *is_global = 1;
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+/* Removes relay file from debugfs. */
+static int ipc_trace_remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+static int ipc_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ if (relay_buf_full(buf)) {
+ pr_err_ratelimited("Relay_buf full dropping traces");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Relay interface callbacks */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = ipc_trace_subbuf_start_handler,
+ .create_buf_file = ipc_trace_create_buf_file_handler,
+ .remove_buf_file = ipc_trace_remove_buf_file_handler,
+};
+
+/* Copy the trace control mode to user buffer */
+static ssize_t ipc_trace_ctrl_file_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iosm_trace *ipc_trace = filp->private_data;
+ char buf[16];
+ int len;
+
+ mutex_lock(&ipc_trace->trc_mutex);
+ len = snprintf(buf, sizeof(buf), "%d\n", ipc_trace->mode);
+ mutex_unlock(&ipc_trace->trc_mutex);
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, len);
+}
+
+/* Open and close the trace channel depending on user input */
+static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct iosm_trace *ipc_trace = filp->private_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(buffer, count, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ipc_trace->trc_mutex);
+ if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) {
+ ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem,
+ ipc_trace->chl_id,
+ IPC_HP_CDEV_OPEN);
+ if (!ipc_trace->channel) {
+ ret = -EIO;
+ goto unlock;
+ }
+ ipc_trace->mode = TRACE_ENABLE;
+ } else if (val == TRACE_DISABLE && ipc_trace->mode != TRACE_DISABLE) {
+ ipc_trace->mode = TRACE_DISABLE;
+ /* close trace channel */
+ ipc_imem_sys_port_close(ipc_trace->ipc_imem,
+ ipc_trace->channel);
+ relay_flush(ipc_trace->ipc_rchan);
+ }
+ ret = count;
+unlock:
+ mutex_unlock(&ipc_trace->trc_mutex);
+ return ret;
+}
+
+static const struct file_operations ipc_trace_fops = {
+ .open = simple_open,
+ .write = ipc_trace_ctrl_file_write,
+ .read = ipc_trace_ctrl_file_read,
+};
+
+/**
+ * ipc_trace_init - Create trace interface & debugfs entries
+ * @ipc_imem: Pointer to iosm_imem structure
+ *
+ * Returns: Pointer to trace instance on success else NULL
+ */
+struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem)
+{
+ struct iosm_trace *ipc_trace = kzalloc(sizeof(*ipc_trace), GFP_KERNEL);
+ struct dentry *debugfs_pdev;
+
+ if (!ipc_trace)
+ return NULL;
+
+ ipc_trace->mode = TRACE_DISABLE;
+ ipc_trace->dev = ipc_imem->dev;
+ ipc_trace->ipc_imem = ipc_imem;
+ ipc_trace->chl_id = IPC_MEM_CTRL_CHL_ID_3;
+
+ mutex_init(&ipc_trace->trc_mutex);
+ debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev);
+
+ ipc_trace->ctrl_file = debugfs_create_file(IOSM_TRC_DEBUGFS_TRACE_CTRL,
+ IOSM_TRC_FILE_PERM,
+ debugfs_pdev,
+ ipc_trace, &ipc_trace_fops);
+
+ ipc_trace->ipc_rchan = relay_open(IOSM_TRC_DEBUGFS_TRACE,
+ debugfs_pdev,
+ IOSM_TRC_SUB_BUFF_SIZE,
+ IOSM_TRC_N_SUB_BUFF,
+ &relay_callbacks, NULL);
+
+ return ipc_trace;
+}
+
+/**
+ * ipc_trace_deinit - Closing relayfs, removing debugfs entries
+ * @ipc_trace: Pointer to the iosm_trace data struct
+ */
+void ipc_trace_deinit(struct iosm_trace *ipc_trace)
+{
+ debugfs_remove(ipc_trace->ctrl_file);
+ relay_close(ipc_trace->ipc_rchan);
+ mutex_destroy(&ipc_trace->trc_mutex);
+ kfree(ipc_trace);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.h b/drivers/net/wwan/iosm/iosm_ipc_trace.h
new file mode 100644
index 000000000000..53346183af9c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_TRACE_H
+#define IOSM_IPC_TRACE_H
+
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem_ops.h"
+
+/**
+ * enum trace_ctrl_mode - State of trace channel
+ * @TRACE_DISABLE: mode for disable trace
+ * @TRACE_ENABLE: mode for enable trace
+ */
+enum trace_ctrl_mode {
+ TRACE_DISABLE = 0,
+ TRACE_ENABLE,
+};
+
+/**
+ * struct iosm_trace - Struct for trace interface
+ * @ipc_rchan: Pointer to relay channel
+ * @ctrl_file: Pointer to trace control file
+ * @ipc_imem: Imem instance
+ * @dev: Pointer to device struct
+ * @channel: Channel instance
+ * @chl_id: Channel Indentifier
+ * @trc_mutex: Mutex used for read and write mode
+ * @mode: Mode for enable and disable trace
+ */
+
+struct iosm_trace {
+ struct rchan *ipc_rchan;
+ struct dentry *ctrl_file;
+ struct iosm_imem *ipc_imem;
+ struct device *dev;
+ struct ipc_mem_channel *channel;
+ enum ipc_channel_id chl_id;
+ struct mutex trc_mutex; /* Mutex used for read and write mode */
+ enum trace_ctrl_mode mode;
+};
+
+struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem);
+void ipc_trace_deinit(struct iosm_trace *ipc_trace);
+void ipc_trace_port_rx(struct iosm_trace *ipc_trace, struct sk_buff *skb);
+#endif
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index d293ab688044..5bf62dc35ac7 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -3,6 +3,7 @@
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/idr.h>
@@ -25,6 +26,7 @@ static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
static struct class *wwan_class;
static int wwan_major;
+static struct dentry *wwan_debugfs_dir;
#define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
#define to_wwan_port(d) container_of(d, struct wwan_port, dev)
@@ -40,6 +42,7 @@ static int wwan_major;
* @port_id: Current available port ID to pick.
* @ops: wwan device ops
* @ops_ctxt: context to pass to ops
+ * @debugfs_dir: WWAN device debugfs dir
*/
struct wwan_device {
unsigned int id;
@@ -47,6 +50,7 @@ struct wwan_device {
atomic_t port_id;
const struct wwan_ops *ops;
void *ops_ctxt;
+ struct dentry *debugfs_dir;
};
/**
@@ -142,6 +146,18 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name)
return to_wwan_dev(dev);
}
+struct dentry *wwan_get_debugfs_dir(struct device *parent)
+{
+ struct wwan_device *wwandev;
+
+ wwandev = wwan_dev_get_by_parent(parent);
+ if (IS_ERR(wwandev))
+ return ERR_CAST(wwandev);
+
+ return wwandev->debugfs_dir;
+}
+EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
+
/* This function allocates and registers a new WWAN device OR if a WWAN device
* already exist for the given parent, it gets a reference and return it.
* This function is not exported (for now), it is called indirectly via
@@ -150,6 +166,7 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name)
static struct wwan_device *wwan_create_dev(struct device *parent)
{
struct wwan_device *wwandev;
+ const char *wwandev_name;
int err, id;
/* The 'find-alloc-register' operation must be protected against
@@ -189,6 +206,10 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
goto done_unlock;
}
+ wwandev_name = kobject_name(&wwandev->dev.kobj);
+ wwandev->debugfs_dir = debugfs_create_dir(wwandev_name,
+ wwan_debugfs_dir);
+
done_unlock:
mutex_unlock(&wwan_register_lock);
@@ -218,10 +239,12 @@ static void wwan_remove_dev(struct wwan_device *wwandev)
else
ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
- if (!ret)
+ if (!ret) {
+ debugfs_remove_recursive(wwandev->debugfs_dir);
device_unregister(&wwandev->dev);
- else
+ } else {
put_device(&wwandev->dev);
+ }
mutex_unlock(&wwan_register_lock);
}
@@ -1117,6 +1140,8 @@ static int __init wwan_init(void)
goto destroy;
}
+ wwan_debugfs_dir = debugfs_create_dir("wwan", NULL);
+
return 0;
destroy:
@@ -1128,6 +1153,7 @@ unregister:
static void __exit wwan_exit(void)
{
+ debugfs_remove_recursive(wwan_debugfs_dir);
__unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
rtnl_link_unregister(&wwan_rtnl_link_ops);
class_destroy(wwan_class);
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index f78670bf41e0..28a9e1eb9bcf 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -205,9 +205,7 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
r = fdp_nci_i2c_read(phy, &skb);
- if (r == -EREMOTEIO)
- return IRQ_HANDLED;
- else if (r == -ENOMEM || r == -EBADMSG)
+ if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG)
return IRQ_HANDLED;
if (skb != NULL)
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
index f650e19a315c..6bc0bc24d357 100644
--- a/drivers/pcmcia/pcmcia_cis.c
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -386,7 +386,7 @@ size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
}
EXPORT_SYMBOL(pcmcia_get_tuple);
-
+#ifdef CONFIG_NET
/*
* pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
*
@@ -431,3 +431,4 @@ int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
}
EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
+#endif /* CONFIG_NET */
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index bbd6f2ad6f24..34672e868a1e 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -141,6 +141,7 @@
#define COMPHY_FW_SPEED_1250 0
#define COMPHY_FW_SPEED_3125 2
#define COMPHY_FW_SPEED_5000 3
+#define COMPHY_FW_SPEED_515625 4
#define COMPHY_FW_SPEED_103125 6
#define COMPHY_FW_PORT_OFFSET 8
#define COMPHY_FW_PORT_MASK GENMASK(11, 8)
@@ -220,6 +221,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI),
+ ETH_CONF(2, 0, PHY_INTERFACE_MODE_5GBASER, 0x1, COMPHY_FW_MODE_XFI),
ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GBASER, 0x1, COMPHY_FW_MODE_XFI),
GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H),
GEN_CONF(2, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA),
@@ -234,6 +236,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
/* lane 4 */
ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_2500BASEX),
+ ETH_CONF(4, 0, PHY_INTERFACE_MODE_5GBASER, 0x2, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GBASER, 0x2, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D),
@@ -241,6 +244,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = {
GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_2500BASEX),
+ ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, -1, COMPHY_FW_MODE_XFI),
ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI),
/* lane 5 */
ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
@@ -790,6 +794,11 @@ static int mvebu_comphy_power_on(struct phy *phy)
lane->id);
fw_speed = COMPHY_FW_SPEED_3125;
break;
+ case PHY_INTERFACE_MODE_5GBASER:
+ dev_dbg(priv->dev, "set lane %d to 5GBASE-R mode\n",
+ lane->id);
+ fw_speed = COMPHY_FW_SPEED_515625;
+ break;
case PHY_INTERFACE_MODE_10GBASER:
dev_dbg(priv->dev, "set lane %d to 10GBASE-R mode\n",
lane->id);
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
index 675575ef162e..cce11daf3245 100644
--- a/drivers/s390/net/ctcm_dbug.h
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -65,6 +65,7 @@ extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS];
int ctcm_register_dbf_views(void);
void ctcm_unregister_dbf_views(void);
+__printf(3, 4)
void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...);
static inline const char *strtail(const char *s, int n)
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index de2423c72b02..5db591cf7215 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1406,7 +1406,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
if (new_skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
- "%s(%d): skb allocation failed",
+ "%s(%s): skb allocation failed",
CTCM_FUNTAIL, dev->name);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
goto again;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 2a6479740600..a61d38a1b4ed 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1808,19 +1808,20 @@ lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
return;
}
/* What kind of frame is it? */
- if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
+ if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) {
/* Control frame. */
lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
- else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
- lcs_hdr->type == LCS_FRAME_TYPE_TR ||
- lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
+ } else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
+ lcs_hdr->type == LCS_FRAME_TYPE_TR ||
+ lcs_hdr->type == LCS_FRAME_TYPE_FDDI) {
/* Normal network packet. */
lcs_get_skb(card, (char *)(lcs_hdr + 1),
lcs_hdr->offset - offset -
sizeof(struct lcs_header));
- else
+ } else {
/* Unknown frame type. */
; // FIXME: error message ?
+ }
/* Proceed to next frame. */
offset = lcs_hdr->offset;
lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 26c55f67289f..d32aa8b705db 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -194,9 +194,6 @@ static void qeth_clear_working_pool_list(struct qeth_card *card)
&card->qdio.in_buf_pool.entry_list, list)
list_del(&pool_entry->list);
- if (!queue)
- return;
-
for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
queue->bufs[i].pool_entry = NULL;
}
@@ -275,8 +272,8 @@ int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
QETH_CARD_TEXT(card, 2, "realcbp");
- /* Defer until queue is allocated: */
- if (!card->qdio.in_q)
+ /* Defer until pool is allocated: */
+ if (list_empty(&pool->entry_list))
goto out;
/* Remove entries from the pool: */
@@ -2557,14 +2554,9 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
- QETH_CARD_TEXT(card, 2, "inq");
- card->qdio.in_q = qeth_alloc_qdio_queue();
- if (!card->qdio.in_q)
- goto out_nomem;
-
/* inbound buffer pool */
if (qeth_alloc_buffer_pool(card))
- goto out_freeinq;
+ goto out_buffer_pool;
/* outbound */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
@@ -2605,10 +2597,7 @@ out_freeoutq:
card->qdio.out_qs[i] = NULL;
}
qeth_free_buffer_pool(card);
-out_freeinq:
- qeth_free_qdio_queue(card->qdio.in_q);
- card->qdio.in_q = NULL;
-out_nomem:
+out_buffer_pool:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
return -ENOMEM;
}
@@ -2623,11 +2612,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
qeth_free_cq(card);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
- if (card->qdio.in_q->bufs[j].rx_skb)
+ if (card->qdio.in_q->bufs[j].rx_skb) {
consume_skb(card->qdio.in_q->bufs[j].rx_skb);
+ card->qdio.in_q->bufs[j].rx_skb = NULL;
+ }
}
- qeth_free_qdio_queue(card->qdio.in_q);
- card->qdio.in_q = NULL;
+
/* inbound buffer pool */
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
@@ -6447,6 +6437,12 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
qeth_determine_capabilities(card);
qeth_set_blkt_defaults(card);
+ card->qdio.in_q = qeth_alloc_qdio_queue();
+ if (!card->qdio.in_q) {
+ rc = -ENOMEM;
+ goto err_rx_queue;
+ }
+
card->qdio.no_out_queues = card->dev->num_tx_queues;
rc = qeth_update_from_chp_desc(card);
if (rc)
@@ -6473,6 +6469,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
err_setup_disc:
err_chp_desc:
+ qeth_free_qdio_queue(card->qdio.in_q);
+err_rx_queue:
free_netdev(card->dev);
err_card:
qeth_core_free_card(card);
@@ -6494,6 +6492,7 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
qeth_free_qdio_queues(card);
+ qeth_free_qdio_queue(card->qdio.in_q);
free_netdev(card->dev);
qeth_core_free_card(card);
put_device(&gdev->dev);
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 46d0fe0d0e8a..b0b36b2132fe 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -144,7 +144,9 @@ static int qeth_set_coalesce(struct net_device *dev,
}
static void qeth_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct qeth_card *card = dev->ml_priv;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index d2726d01c757..aabbea48223d 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2515,7 +2515,7 @@ void rtllib_stop_all_queues(struct rtllib_device *ieee)
unsigned int i;
for (i = 0; i < ieee->dev->num_tx_queues; i++)
- netdev_get_tx_queue(ieee->dev, i)->trans_start = jiffies;
+ txq_trans_cond_update(netdev_get_tx_queue(ieee->dev, i));
netif_tx_stop_all_queues(ieee->dev);
}
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 1dfadb2e878d..76a0f16e56cf 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -130,18 +130,24 @@ static inline bool init_section_intersects(void *virt, size_t size)
/**
* is_kernel_core_data - checks if the pointer address is located in the
- * .data section
+ * .data or .bss section
*
* @addr: address to check
*
- * Returns: true if the address is located in .data, false otherwise.
+ * Returns: true if the address is located in .data or .bss, false otherwise.
* Note: On some archs it may return true for core RODATA, and false
* for others. But will always be true for core RW data.
*/
static inline bool is_kernel_core_data(unsigned long addr)
{
- return addr >= (unsigned long)_sdata &&
- addr < (unsigned long)_edata;
+ if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
+ return true;
+
+ if (addr >= (unsigned long)__bss_start &&
+ addr < (unsigned long)__bss_stop)
+ return true;
+
+ return false;
}
/**
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e7a163a3146b..3de38d7f0ced 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2164,6 +2164,7 @@ extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
+extern const struct bpf_func_proto bpf_find_vma_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index 47d9abfbdb55..919c0fde1c51 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -73,7 +73,7 @@ asm( \
__BTF_ID_LIST(name, local) \
extern u32 name[];
-#define BTF_ID_LIST_GLOBAL(name) \
+#define BTF_ID_LIST_GLOBAL(name, n) \
__BTF_ID_LIST(name, globl)
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
@@ -83,7 +83,7 @@ __BTF_ID_LIST(name, globl)
BTF_ID_LIST(name) \
BTF_ID(prefix, typename)
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
- BTF_ID_LIST_GLOBAL(name) \
+ BTF_ID_LIST_GLOBAL(name, 1) \
BTF_ID(prefix, typename)
/*
@@ -149,7 +149,7 @@ extern struct btf_id_set name;
#define BTF_ID_LIST(name) static u32 name[5];
#define BTF_ID(prefix, name)
#define BTF_ID_UNUSED
-#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
+#define BTF_ID_LIST_GLOBAL(name, n) u32 name[n];
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1];
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
@@ -189,6 +189,18 @@ MAX_BTF_SOCK_TYPE,
extern u32 btf_sock_ids[];
#endif
-extern u32 btf_task_struct_ids[];
+#define BTF_TRACING_TYPE_xxx \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
+
+enum {
+#define BTF_TRACING_TYPE(name, type) name,
+BTF_TRACING_TYPE_xxx
+#undef BTF_TRACING_TYPE
+MAX_BTF_TRACING_TYPE,
+};
+
+extern u32 btf_tracing_ids[];
#endif
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 845a0ffc16ee..a26f37a27167 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -67,6 +67,22 @@ enum {
ETH_RSS_HASH_FUNCS_COUNT
};
+/**
+ * struct kernel_ethtool_ringparam - RX/TX ring configuration
+ * @rx_buf_len: Current length of buffers on the rx ring.
+ */
+struct kernel_ethtool_ringparam {
+ u32 rx_buf_len;
+};
+
+/**
+ * enum ethtool_supported_ring_param - indicator caps for setting ring params
+ * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
+ */
+enum ethtool_supported_ring_param {
+ ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
+};
+
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
@@ -432,6 +448,7 @@ struct ethtool_module_power_mode_params {
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
* @supported_coalesce_params: supported types of interrupt coalescing.
+ * @supported_ring_params: supported ring params.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
* implemented, the @driver and @bus_info fields will be filled in
@@ -613,6 +630,7 @@ struct ethtool_module_power_mode_params {
struct ethtool_ops {
u32 cap_link_lanes_supported:1;
u32 supported_coalesce_params;
+ u32 supported_ring_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -638,9 +656,13 @@ struct ethtool_ops {
struct kernel_ethtool_coalesce *,
struct netlink_ext_ack *);
void (*get_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
int (*set_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
void (*get_pause_stats)(struct net_device *dev,
struct ethtool_pause_stats *pause_stats);
void (*get_pauseparam)(struct net_device *,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 24b7ed2677af..b6a216eb217a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1374,6 +1374,7 @@ struct bpf_sk_lookup_kern {
const struct in6_addr *daddr;
} v6;
struct sock *selected_sk;
+ u32 ingress_ifindex;
bool no_reuseport;
};
@@ -1436,7 +1437,7 @@ extern struct static_key_false bpf_sk_lookup_enabled;
static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 dport,
- struct sock **psk)
+ const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL;
@@ -1452,6 +1453,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
.v4.daddr = daddr,
.sport = sport,
.dport = dport,
+ .ingress_ifindex = ifindex,
};
u32 act;
@@ -1474,7 +1476,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
const __be16 sport,
const struct in6_addr *daddr,
const u16 dport,
- struct sock **psk)
+ const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL;
@@ -1490,6 +1492,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
.v6.daddr = daddr,
.sport = sport,
.dport = dport,
+ .ingress_ifindex = ifindex,
};
u32 act;
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index a85c9f0bd470..53f0efa0bccf 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -105,6 +105,7 @@
#define SDIO_VENDOR_ID_MEDIATEK 0x037a
#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668
+#define SDIO_DEVICE_ID_MEDIATEK_MT7961 0x7961
#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347
diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h
index e32f6712aee0..1289593411d3 100644
--- a/include/linux/net/intel/iidc.h
+++ b/include/linux/net/intel/iidc.h
@@ -26,6 +26,11 @@ enum iidc_reset_type {
IIDC_GLOBR,
};
+enum iidc_rdma_protocol {
+ IIDC_RDMA_PROTOCOL_IWARP = BIT(0),
+ IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1),
+};
+
#define IIDC_MAX_USER_PRIORITY 8
/* Struct to hold per RDMA Qset info */
@@ -70,8 +75,6 @@ int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type);
int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable);
void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos);
-#define IIDC_RDMA_ROCE_NAME "roce"
-
/* Structure representing auxiliary driver tailored information about the core
* PCI dev, each auxiliary driver using the IIDC interface will have an
* instance of this struct dedicated to it.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3ec42495a43a..db3bff1ae7fd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -592,7 +592,7 @@ struct netdev_queue {
* Number of TX timeouts for this queue
* (/sys/class/net/DEV/Q/trans_timeout)
*/
- unsigned long trans_timeout;
+ atomic_long_t trans_timeout;
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
@@ -1297,11 +1297,6 @@ struct netdev_net_notifier {
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
- * void (*ndo_change_proto_down)(struct net_device *dev,
- * bool proto_down);
- * This function is used to pass protocol port error state information
- * to the switch driver. The switch driver can react to the proto_down
- * by doing a phys down on the associated switch port.
* int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
@@ -1542,8 +1537,6 @@ struct net_device_ops {
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
- int (*ndo_change_proto_down)(struct net_device *dev,
- bool proto_down);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_set_rx_headroom)(struct net_device *dev,
@@ -1612,6 +1605,7 @@ struct net_device_ops {
* @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
* @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
* skb_headlen(skb) == 0 (data starts from frag0)
+ * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1646,6 +1640,7 @@ enum netdev_priv_flags {
IFF_L3MDEV_RX_HANDLER = 1<<29,
IFF_LIVE_RENAME_OK = 1<<30,
IFF_TX_SKB_NO_LINEAR = 1<<31,
+ IFF_CHANGE_PROTO_DOWN = BIT_ULL(32),
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1942,6 +1937,8 @@ enum netdev_ml_priv_type {
* @unlink_list: As netif_addr_lock() can be called recursively,
* keep a list of interfaces to be deleted.
*
+ * @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1980,7 +1977,7 @@ struct net_device {
/* Read-mostly cache-line for fast-path access */
unsigned int flags;
- unsigned int priv_flags;
+ unsigned long long priv_flags;
const struct net_device_ops *netdev_ops;
int ifindex;
unsigned short gflags;
@@ -2117,7 +2114,7 @@ struct net_device {
* Cache lines mostly used on receive path (including eth_type_trans())
*/
/* Interface address info used in eth_type_trans() */
- unsigned char *dev_addr;
+ const unsigned char *dev_addr;
struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
@@ -2268,6 +2265,8 @@ struct net_device {
/* protected by rtnl_lock */
struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
+
+ u8 dev_addr_shadow[MAX_ADDR_LEN];
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2520,109 +2519,6 @@ static inline void netif_napi_del(struct napi_struct *napi)
synchronize_net();
}
-struct napi_gro_cb {
- /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
-
- /* Length of frag0. */
- unsigned int frag0_len;
-
- /* This indicates where we are processing relative to skb->data. */
- int data_offset;
-
- /* This is non-zero if the packet cannot be merged with the new skb. */
- u16 flush;
-
- /* Save the IP ID here and check when we get to the transport layer */
- u16 flush_id;
-
- /* Number of segments aggregated. */
- u16 count;
-
- /* Start offset for remote checksum offload */
- u16 gro_remcsum_start;
-
- /* jiffies when first packet was created/queued */
- unsigned long age;
-
- /* Used in ipv6_gro_receive() and foo-over-udp */
- u16 proto;
-
- /* This is non-zero if the packet may be of the same flow. */
- u8 same_flow:1;
-
- /* Used in tunnel GRO receive */
- u8 encap_mark:1;
-
- /* GRO checksum is valid */
- u8 csum_valid:1;
-
- /* Number of checksums via CHECKSUM_UNNECESSARY */
- u8 csum_cnt:3;
-
- /* Free the skb? */
- u8 free:2;
-#define NAPI_GRO_FREE 1
-#define NAPI_GRO_FREE_STOLEN_HEAD 2
-
- /* Used in foo-over-udp, set in udp[46]_gro_receive */
- u8 is_ipv6:1;
-
- /* Used in GRE, set in fou/gue_gro_receive */
- u8 is_fou:1;
-
- /* Used to determine if flush_id can be ignored */
- u8 is_atomic:1;
-
- /* Number of gro_receive callbacks this packet already went through */
- u8 recursion_counter:4;
-
- /* GRO is done by frag_list pointer chaining. */
- u8 is_flist:1;
-
- /* used to support CHECKSUM_COMPLETE for tunneling protocols */
- __wsum csum;
-
- /* used in skb_gro_receive() slow path */
- struct sk_buff *last;
-};
-
-#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
-
-#define GRO_RECURSION_LIMIT 15
-static inline int gro_recursion_inc_test(struct sk_buff *skb)
-{
- return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
-}
-
-typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
-static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
- struct list_head *head,
- struct sk_buff *skb)
-{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(head, skb);
-}
-
-typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
- struct sk_buff *);
-static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
- struct sock *sk,
- struct list_head *head,
- struct sk_buff *skb)
-{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(sk, head, skb);
-}
-
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
bool ignore_outgoing;
@@ -3005,254 +2901,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
-int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
-int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
-
-static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
-{
- return NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline unsigned int skb_gro_len(const struct sk_buff *skb)
-{
- return skb->len - NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
-{
- NAPI_GRO_CB(skb)->data_offset += len;
-}
-
-static inline void *skb_gro_header_fast(struct sk_buff *skb,
- unsigned int offset)
-{
- return NAPI_GRO_CB(skb)->frag0 + offset;
-}
-
-static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
-{
- return NAPI_GRO_CB(skb)->frag0_len < hlen;
-}
-
-static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
-{
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
-}
-
-static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
- unsigned int offset)
-{
- if (!pskb_may_pull(skb, hlen))
- return NULL;
-
- skb_gro_frag0_invalidate(skb);
- return skb->data + offset;
-}
-
-static inline void *skb_gro_network_header(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
- skb_network_offset(skb);
-}
-static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
- const void *start, unsigned int len)
-{
- if (NAPI_GRO_CB(skb)->csum_valid)
- NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
- csum_partial(start, len, 0));
-}
-
-/* GRO checksum functions. These are logical equivalents of the normal
- * checksum functions (in skbuff.h) except that they operate on the GRO
- * offsets and fields in sk_buff.
- */
-
-__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
-
-static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
-}
-
-static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
- bool zero_okay,
- __sum16 check)
-{
- return ((skb->ip_summed != CHECKSUM_PARTIAL ||
- skb_checksum_start_offset(skb) <
- skb_gro_offset(skb)) &&
- !skb_at_gro_remcsum_start(skb) &&
- NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- (!zero_okay || check));
-}
-
-static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
- __wsum psum)
-{
- if (NAPI_GRO_CB(skb)->csum_valid &&
- !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
- return 0;
-
- NAPI_GRO_CB(skb)->csum = psum;
-
- return __skb_gro_checksum_complete(skb);
-}
-
-static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
-{
- if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
- /* Consume a checksum from CHECKSUM_UNNECESSARY */
- NAPI_GRO_CB(skb)->csum_cnt--;
- } else {
- /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
- * verified a new top level checksum or an encapsulated one
- * during GRO. This saves work if we fallback to normal path.
- */
- __skb_incr_checksum_unnecessary(skb);
- }
-}
-
-#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
- compute_pseudo) \
-({ \
- __sum16 __ret = 0; \
- if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
- __ret = __skb_gro_checksum_validate_complete(skb, \
- compute_pseudo(skb, proto)); \
- if (!__ret) \
- skb_gro_incr_csum_unnecessary(skb); \
- __ret; \
-})
-
-#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
-
-#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
- compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
-
-#define skb_gro_checksum_simple_validate(skb) \
- __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
-
-static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- !NAPI_GRO_CB(skb)->csum_valid);
-}
-
-static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
- __wsum pseudo)
-{
- NAPI_GRO_CB(skb)->csum = ~pseudo;
- NAPI_GRO_CB(skb)->csum_valid = 1;
-}
-
-#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
-do { \
- if (__skb_gro_checksum_convert_check(skb)) \
- __skb_gro_checksum_convert(skb, \
- compute_pseudo(skb, proto)); \
-} while (0)
-
-struct gro_remcsum {
- int offset;
- __wsum delta;
-};
-
-static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
-{
- grc->offset = 0;
- grc->delta = 0;
-}
-
-static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- unsigned int off, size_t hdrlen,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
-{
- __wsum delta;
- size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
-
- BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
-
- if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
- return ptr;
- }
-
- ptr = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, off + plen)) {
- ptr = skb_gro_header_slow(skb, off + plen, off);
- if (!ptr)
- return NULL;
- }
-
- delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
- start, offset);
-
- /* Adjust skb->csum since we changed the packet */
- NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
-
- grc->offset = off + hdrlen + offset;
- grc->delta = delta;
-
- return ptr;
-}
-
-static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
- struct gro_remcsum *grc)
-{
- void *ptr;
- size_t plen = grc->offset + sizeof(u16);
-
- if (!grc->delta)
- return;
-
- ptr = skb_gro_header_fast(skb, grc->offset);
- if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
- ptr = skb_gro_header_slow(skb, plen, grc->offset);
- if (!ptr)
- return;
- }
-
- remcsum_unadjust((__sum16 *)ptr, grc->delta);
-}
-
-#ifdef CONFIG_XFRM_OFFLOAD
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
-{
- if (PTR_ERR(pp) != -EINPROGRESS)
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff *pp,
- int flush,
- struct gro_remcsum *grc)
-{
- if (PTR_ERR(pp) != -EINPROGRESS) {
- NAPI_GRO_CB(skb)->flush |= flush;
- skb_gro_remcsum_cleanup(skb, grc);
- skb->remcsum_offload = 0;
- }
-}
-#else
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
-{
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff *pp,
- int flush,
- struct gro_remcsum *grc)
-{
- NAPI_GRO_CB(skb)->flush |= flush;
- skb_gro_remcsum_cleanup(skb, grc);
- skb->remcsum_offload = 0;
-}
-#endif
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -4007,6 +3656,7 @@ int netif_rx_ni(struct sk_buff *skb);
int netif_rx_any_context(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
+void netif_receive_skb_list_internal(struct list_head *head);
void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
@@ -4080,7 +3730,6 @@ int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
-int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
u32 value);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
@@ -4444,10 +4093,21 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
spin_unlock_bh(&txq->_xmit_lock);
}
+/*
+ * txq->trans_start can be read locklessly from dev_watchdog()
+ */
static inline void txq_trans_update(struct netdev_queue *txq)
{
if (txq->xmit_lock_owner != -1)
- txq->trans_start = jiffies;
+ WRITE_ONCE(txq->trans_start, jiffies);
+}
+
+static inline void txq_trans_cond_update(struct netdev_queue *txq)
+{
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(txq->trans_start) != now)
+ WRITE_ONCE(txq->trans_start, now);
}
/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
@@ -4455,8 +4115,7 @@ static inline void netif_trans_update(struct net_device *dev)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
- if (txq->trans_start != jiffies)
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
}
/**
@@ -4465,27 +4124,7 @@ static inline void netif_trans_update(struct net_device *dev)
*
* Get network device transmit lock
*/
-static inline void netif_tx_lock(struct net_device *dev)
-{
- unsigned int i;
- int cpu;
-
- spin_lock(&dev->tx_global_lock);
- cpu = smp_processor_id();
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* We are the only thread of execution doing a
- * freeze, but we have to grab the _xmit_lock in
- * order to synchronize with threads which are in
- * the ->hard_start_xmit() handler and already
- * checked the frozen bit.
- */
- __netif_tx_lock(txq, cpu);
- set_bit(__QUEUE_STATE_FROZEN, &txq->state);
- __netif_tx_unlock(txq);
- }
-}
+void netif_tx_lock(struct net_device *dev);
static inline void netif_tx_lock_bh(struct net_device *dev)
{
@@ -4493,22 +4132,7 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
netif_tx_lock(dev);
}
-static inline void netif_tx_unlock(struct net_device *dev)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* No need to grab the _xmit_lock here. If the
- * queue is not stopped for another reason, we
- * force a schedule.
- */
- clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
- netif_schedule_queue(txq);
- }
- spin_unlock(&dev->tx_global_lock);
-}
+void netif_tx_unlock(struct net_device *dev);
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
@@ -4642,10 +4266,13 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
+void dev_addr_mod(struct net_device *dev, unsigned int offset,
+ const void *addr, size_t len);
+
static inline void
__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
{
- memcpy(dev->dev_addr, addr, len);
+ dev_addr_mod(dev, 0, addr, len);
}
static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
@@ -4653,19 +4280,13 @@ static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
__dev_addr_set(dev, addr, dev->addr_len);
}
-static inline void
-dev_addr_mod(struct net_device *dev, unsigned int offset,
- const void *addr, size_t len)
-{
- memcpy(&dev->dev_addr[offset], addr, len);
-}
-
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
void dev_addr_flush(struct net_device *dev);
int dev_addr_init(struct net_device *dev);
+void dev_addr_check(struct net_device *dev);
/* Functions used for unicast addresses handling */
int dev_uc_add(struct net_device *dev, const unsigned char *addr);
@@ -5104,7 +4725,15 @@ static inline bool netif_needs_gso(struct sk_buff *skb,
static inline void netif_set_gso_max_size(struct net_device *dev,
unsigned int size)
{
- dev->gso_max_size = size;
+ /* dev->gso_max_size is read locklessly from sk_setup_caps() */
+ WRITE_ONCE(dev->gso_max_size, size);
+}
+
+static inline void netif_set_gso_max_segs(struct net_device *dev,
+ unsigned int segs)
+{
+ /* dev->gso_max_segs is read locklessly from sk_setup_caps() */
+ WRITE_ONCE(dev->gso_max_segs, segs);
}
static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
@@ -5291,7 +4920,7 @@ void netdev_info(const struct net_device *dev, const char *format, ...);
#define netdev_level_once(level, dev, fmt, ...) \
do { \
- static bool __print_once __read_mostly; \
+ static bool __section(".data.once") __print_once; \
\
if (!__print_once) { \
__print_once = true; \
diff --git a/include/linux/once.h b/include/linux/once.h
index d361fb14ac3a..f54523052bbc 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -38,7 +38,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE(func, ...) \
({ \
bool ___ret = false; \
- static bool ___done = false; \
+ static bool __section(".data.once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
unsigned long ___flags; \
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 96e43fbb2dd8..1e57cdd95da3 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -99,7 +99,7 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay
* @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay
* @PHY_INTERFACE_MODE_RTBI: Reduced TBI
- * @PHY_INTERFACE_MODE_SMII: ??? MII
+ * @PHY_INTERFACE_MODE_SMII: Serial MII
* @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
* @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
* @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index f037470b6fb3..01224235df0f 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -20,6 +20,29 @@ enum {
MLO_AN_PHY = 0, /* Conventional PHY */
MLO_AN_FIXED, /* Fixed-link mode */
MLO_AN_INBAND, /* In-band protocol */
+
+ MAC_SYM_PAUSE = BIT(0),
+ MAC_ASYM_PAUSE = BIT(1),
+ MAC_10HD = BIT(2),
+ MAC_10FD = BIT(3),
+ MAC_10 = MAC_10HD | MAC_10FD,
+ MAC_100HD = BIT(4),
+ MAC_100FD = BIT(5),
+ MAC_100 = MAC_100HD | MAC_100FD,
+ MAC_1000HD = BIT(6),
+ MAC_1000FD = BIT(7),
+ MAC_1000 = MAC_1000HD | MAC_1000FD,
+ MAC_2500FD = BIT(8),
+ MAC_5000FD = BIT(9),
+ MAC_10000FD = BIT(10),
+ MAC_20000FD = BIT(11),
+ MAC_25000FD = BIT(12),
+ MAC_40000FD = BIT(13),
+ MAC_50000FD = BIT(14),
+ MAC_56000FD = BIT(15),
+ MAC_100000FD = BIT(16),
+ MAC_200000FD = BIT(17),
+ MAC_400000FD = BIT(18),
};
static inline bool phylink_autoneg_inband(unsigned int mode)
@@ -69,6 +92,7 @@ enum phylink_op_type {
* if MAC link is at %MLO_AN_FIXED mode.
* @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx
* are supported by the MAC/PCS.
+ * @mac_capabilities: MAC pause/speed/duplex capabilities.
*/
struct phylink_config {
struct device *dev;
@@ -79,6 +103,7 @@ struct phylink_config {
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
+ unsigned long mac_capabilities;
};
/**
@@ -442,6 +467,12 @@ void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed, int duplex);
#endif
+void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
+ unsigned long mac_capabilities);
+void phylink_generic_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+
struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
phy_interface_t iface,
const struct phylink_mac_ops *mac_ops);
@@ -496,11 +527,12 @@ void phylink_set_port_modes(unsigned long *bits);
void phylink_set_10g_modes(unsigned long *mask);
void phylink_helper_basex_speed(struct phylink_link_state *state);
+void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
+ u16 bmsr, u16 lpa);
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state);
-int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
- phy_interface_t interface,
- const unsigned long *advertising);
+int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
+ const unsigned long *advertising);
int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertising);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index e3c9a25a853a..22652e5fbc38 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -290,6 +290,11 @@ typedef void (*regmap_unlock)(void *);
* read operation on a bus such as SPI, I2C, etc. Most of the
* devices do not need this.
* @reg_write: Same as above for writing.
+ * @reg_update_bits: Optional callback that if filled will be used to perform
+ * all the update_bits(rmw) operation. Should only be provided
+ * if the function require special handling with lock and reg
+ * handling and the operation cannot be represented as a simple
+ * update_bits operation on a bus such as SPI, I2C, etc.
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of struct regmap_config).
@@ -372,6 +377,8 @@ struct regmap_config {
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
bool fast_io;
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
index bf21591a9e5e..3f7427b9e935 100644
--- a/include/linux/siphash.h
+++ b/include/linux/siphash.h
@@ -21,6 +21,8 @@ typedef struct {
u64 key[2];
} siphash_key_t;
+#define siphash_aligned_key_t siphash_key_t __aligned(16)
+
static inline bool siphash_key_is_zero(const siphash_key_t *key)
{
return !(key->key[0] | key->key[1]);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c8cb7e697d47..eae4bd3237a4 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -36,6 +36,7 @@
#include <linux/splice.h>
#include <linux/in6.h>
#include <linux/if_packet.h>
+#include <linux/llist.h>
#include <net/flow.h>
#include <net/page_pool.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
@@ -626,6 +627,7 @@ typedef unsigned char *sk_buff_data_t;
* for retransmit timer
* @rbnode: RB tree node, alternative to next/prev for netem/tcp
* @list: queue head
+ * @ll_node: anchor in an llist (eg socket defer_list)
* @sk: Socket we are owned by
* @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
* fragmentation management
@@ -743,6 +745,7 @@ struct sk_buff {
};
struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
struct list_head list;
+ struct llist_node ll_node;
};
union {
@@ -792,7 +795,7 @@ struct sk_buff {
#else
#define CLONED_MASK 1
#endif
-#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+#define CLONED_OFFSET offsetof(struct sk_buff, __cloned_offset)
/* private: */
__u8 __cloned_offset[0];
@@ -808,25 +811,15 @@ struct sk_buff {
__u8 active_extensions;
#endif
- /* fields enclosed in headers_start/headers_end are copied
+ /* Fields enclosed in headers group are copied
* using a single memcpy() in __copy_skb_header()
*/
- /* private: */
- __u32 headers_start[0];
- /* public: */
-
-/* if you move pkt_type around you also must adapt those constants */
-#ifdef __BIG_ENDIAN_BITFIELD
-#define PKT_TYPE_MAX (7 << 5)
-#else
-#define PKT_TYPE_MAX 7
-#endif
-#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
+ struct_group(headers,
/* private: */
__u8 __pkt_type_offset[0];
/* public: */
- __u8 pkt_type:3;
+ __u8 pkt_type:3; /* see PKT_TYPE_MAX */
__u8 ignore_df:1;
__u8 nf_trace:1;
__u8 ip_summed:2;
@@ -842,16 +835,10 @@ struct sk_buff {
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
-#ifdef __BIG_ENDIAN_BITFIELD
-#define PKT_VLAN_PRESENT_BIT 7
-#else
-#define PKT_VLAN_PRESENT_BIT 0
-#endif
-#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
/* private: */
__u8 __pkt_vlan_present_offset[0];
/* public: */
- __u8 vlan_present:1;
+ __u8 vlan_present:1; /* See PKT_VLAN_PRESENT_BIT */
__u8 csum_complete_sw:1;
__u8 csum_level:2;
__u8 csum_not_inet:1;
@@ -932,9 +919,7 @@ struct sk_buff {
u64 kcov_handle;
#endif
- /* private: */
- __u32 headers_end[0];
- /* public: */
+ ); /* end headers group */
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
@@ -950,6 +935,22 @@ struct sk_buff {
#endif
};
+/* if you move pkt_type around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX (7 << 5)
+#else
+#define PKT_TYPE_MAX 7
+#endif
+#define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
+
+/* if you move pkt_vlan_present around you also must adapt these constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_VLAN_PRESENT_BIT 7
+#else
+#define PKT_VLAN_PRESENT_BIT 0
+#endif
+#define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset)
+
#ifdef __KERNEL__
/*
* Handling routines are only of interest to the kernel
@@ -3484,7 +3485,11 @@ __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- __skb_postpull_rcsum(skb, start, len, 0);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = ~csum_partial(start, len, ~skb->csum);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
}
static __always_inline void
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 584d94be9c8b..18a717fe62eb 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -507,12 +507,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return !!psock->saved_data_ready;
}
-static inline bool sk_is_tcp(const struct sock *sk)
-{
- return sk->sk_type == SOCK_STREAM &&
- sk->sk_protocol == IPPROTO_TCP;
-}
-
static inline bool sk_is_udp(const struct sock *sk)
{
return sk->sk_type == SOCK_DGRAM &&
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index a6f03b36fc4f..89b8e208cd7b 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -241,6 +241,7 @@ struct plat_stmmacenet_data {
unsigned int clk_ref_rate;
unsigned int mult_fact_100ns;
s32 ptp_max_adj;
+ u32 cdc_error_adj;
struct reset_control *stmmac_rst;
struct reset_control *stmmac_ahb_rst;
struct stmmac_axi *axi;
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index 9fac819f92e3..1646aa3e6779 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -171,4 +171,6 @@ int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
void wwan_unregister_ops(struct device *parent);
+struct dentry *wwan_get_debugfs_dir(struct device *parent);
+
#endif /* __WWAN_H */
diff --git a/include/net/arp.h b/include/net/arp.h
index 4950191f6b2b..031374ac2f22 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -53,13 +53,7 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
rcu_read_lock_bh();
n = __ipv4_neigh_lookup_noref(dev, key);
- if (n) {
- unsigned long now = jiffies;
-
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
- }
+ neigh_confirm(n);
rcu_read_unlock_bh();
}
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 3271870fd85e..2f31e571f34c 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -380,6 +380,7 @@ typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
#define HCI_REQ_SKB BIT(1)
struct hci_ctrl {
+ struct sock *sk;
u16 opcode;
u8 req_flags;
u8 req_event;
@@ -405,6 +406,7 @@ struct bt_skb_cb {
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
#define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
+#define hci_skb_sk(skb) bt_cb((skb))->hci.sk
static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
{
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 63065bc01b76..84db6b275231 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -566,6 +566,7 @@ enum {
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
+#define HCI_ERROR_CANCELLED_BY_HOST 0x44
/* Flow control modes */
#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index dd8840e70e25..2560cfe80db8 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -30,6 +30,7 @@
#include <linux/rculist.h>
#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_sync.h>
#include <net/bluetooth/hci_sock.h>
/* HCI priority */
@@ -475,6 +476,9 @@ struct hci_dev {
struct work_struct power_on;
struct delayed_work power_off;
struct work_struct error_reset;
+ struct work_struct cmd_sync_work;
+ struct list_head cmd_sync_work_list;
+ struct mutex cmd_sync_work_lock;
__u16 discov_timeout;
struct delayed_work discov_off;
@@ -489,10 +493,7 @@ struct hci_dev {
struct work_struct tx_work;
struct work_struct discov_update;
- struct work_struct bg_scan_update;
struct work_struct scan_update;
- struct work_struct connectable_update;
- struct work_struct discoverable_update;
struct delayed_work le_scan_disable;
struct delayed_work le_scan_restart;
@@ -519,7 +520,6 @@ struct hci_dev {
bool advertising_paused;
struct notifier_block suspend_notifier;
- struct work_struct suspend_prepare;
enum suspended_state suspend_state_next;
enum suspended_state suspend_state;
bool scanning_paused;
@@ -528,9 +528,6 @@ struct hci_dev {
bdaddr_t wake_addr;
u8 wake_addr_type;
- wait_queue_head_t suspend_wait_q;
- DECLARE_BITMAP(suspend_tasks, __SUSPEND_NUM_TASKS);
-
struct hci_conn_hash conn_hash;
struct list_head mgmt_pending;
@@ -603,6 +600,7 @@ struct hci_dev {
#if IS_ENABLED(CONFIG_BT_AOSPEXT)
bool aosp_capable;
+ bool aosp_quality_report;
#endif
int (*open)(struct hci_dev *hdev);
@@ -1461,8 +1459,11 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+#define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+
/* Use LL Privacy based address resolution if supported */
-#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+#define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
/* Use enhanced synchronous connection if command is supported */
#define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08)
@@ -1690,10 +1691,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb);
-struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout);
-struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u8 event, u32 timeout);
int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param);
@@ -1704,9 +1701,6 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
-struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout);
-
u32 hci_conn_get_phy(struct hci_conn *conn);
/* ----- HCI Sockets ----- */
@@ -1806,7 +1800,6 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 entered);
void mgmt_auth_failed(struct hci_conn *conn, u8 status);
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
-void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
@@ -1831,8 +1824,6 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
u16 max_interval, u16 latency, u16 timeout);
void mgmt_smp_complete(struct hci_conn *conn, bool complete);
bool mgmt_get_connectable(struct hci_dev *hdev);
-void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status);
-void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status);
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev);
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
new file mode 100644
index 000000000000..0336c1bc5d25
--- /dev/null
+++ b/include/net/bluetooth/hci_sync.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
+typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
+ int err);
+
+struct hci_cmd_sync_work_entry {
+ struct list_head list;
+ hci_cmd_sync_work_func_t func;
+ void *data;
+ hci_cmd_sync_work_destroy_t destroy;
+};
+
+/* Function with sync suffix shall not be called with hdev->lock held as they
+ * wait the command to complete and in the meantime an event could be received
+ * which could attempt to acquire hdev->lock causing a deadlock.
+ */
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout);
+struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk);
+int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
+int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk);
+
+void hci_cmd_sync_init(struct hci_dev *hdev);
+void hci_cmd_sync_clear(struct hci_dev *hdev);
+
+int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+
+int hci_update_eir_sync(struct hci_dev *hdev);
+int hci_update_class_sync(struct hci_dev *hdev);
+
+int hci_update_eir_sync(struct hci_dev *hdev);
+int hci_update_class_sync(struct hci_dev *hdev);
+int hci_update_name_sync(struct hci_dev *hdev);
+int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode);
+
+int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
+ bool rpa, u8 *own_addr_type);
+
+int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance);
+int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance);
+int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ bool force);
+
+int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance);
+int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance);
+int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance);
+int hci_enable_advertising_sync(struct hci_dev *hdev);
+int hci_enable_advertising(struct hci_dev *hdev);
+
+int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force);
+int hci_disable_advertising_sync(struct hci_dev *hdev);
+
+int hci_update_passive_scan_sync(struct hci_dev *hdev);
+int hci_update_passive_scan(struct hci_dev *hdev);
+int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle);
+int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type);
+int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val);
+int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp);
+
+int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable);
+int hci_update_scan_sync(struct hci_dev *hdev);
+
+int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul);
+int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ struct sock *sk);
+struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool ext,
+ struct sock *sk);
+
+int hci_reset_sync(struct hci_dev *hdev);
+int hci_dev_open_sync(struct hci_dev *hdev);
+int hci_dev_close_sync(struct hci_dev *hdev);
+
+int hci_powered_update_sync(struct hci_dev *hdev);
+int hci_set_powered_sync(struct hci_dev *hdev, u8 val);
+
+int hci_update_discoverable_sync(struct hci_dev *hdev);
+int hci_update_discoverable(struct hci_dev *hdev);
+
+int hci_update_connectable_sync(struct hci_dev *hdev);
+
+int hci_start_discovery_sync(struct hci_dev *hdev);
+int hci_stop_discovery_sync(struct hci_dev *hdev);
+
+int hci_suspend_sync(struct hci_dev *hdev);
+int hci_resume_sync(struct hci_dev *hdev);
diff --git a/include/net/devlink.h b/include/net/devlink.h
index aab3d007c577..e3c88fabd700 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -485,6 +485,7 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -534,6 +535,9 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME "enable_vnet"
#define DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE DEVLINK_PARAM_TYPE_BOOL
+#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME "enable_iwarp"
+#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE DEVLINK_PARAM_TYPE_BOOL
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
diff --git a/include/net/gro.h b/include/net/gro.h
index 01edaf3fdda0..b1139fca7c43 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -4,9 +4,368 @@
#define _NET_IPV6_GRO_H
#include <linux/indirect_call_wrapper.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/skbuff.h>
+#include <net/udp.h>
-struct list_head;
-struct sk_buff;
+struct napi_gro_cb {
+ /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
+ void *frag0;
+
+ /* Length of frag0. */
+ unsigned int frag0_len;
+
+ /* This indicates where we are processing relative to skb->data. */
+ int data_offset;
+
+ /* This is non-zero if the packet cannot be merged with the new skb. */
+ u16 flush;
+
+ /* Save the IP ID here and check when we get to the transport layer */
+ u16 flush_id;
+
+ /* Number of segments aggregated. */
+ u16 count;
+
+ /* Start offset for remote checksum offload */
+ u16 gro_remcsum_start;
+
+ /* jiffies when first packet was created/queued */
+ unsigned long age;
+
+ /* Used in ipv6_gro_receive() and foo-over-udp */
+ u16 proto;
+
+ /* This is non-zero if the packet may be of the same flow. */
+ u8 same_flow:1;
+
+ /* Used in tunnel GRO receive */
+ u8 encap_mark:1;
+
+ /* GRO checksum is valid */
+ u8 csum_valid:1;
+
+ /* Number of checksums via CHECKSUM_UNNECESSARY */
+ u8 csum_cnt:3;
+
+ /* Free the skb? */
+ u8 free:2;
+#define NAPI_GRO_FREE 1
+#define NAPI_GRO_FREE_STOLEN_HEAD 2
+
+ /* Used in foo-over-udp, set in udp[46]_gro_receive */
+ u8 is_ipv6:1;
+
+ /* Used in GRE, set in fou/gue_gro_receive */
+ u8 is_fou:1;
+
+ /* Used to determine if flush_id can be ignored */
+ u8 is_atomic:1;
+
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* GRO is done by frag_list pointer chaining. */
+ u8 is_flist:1;
+
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
+
+ /* used in skb_gro_receive() slow path */
+ struct sk_buff *last;
+};
+
+#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
+static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
+typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
+ struct sk_buff *);
+static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(sk, head, skb);
+}
+
+static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
+{
+ return NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline unsigned int skb_gro_len(const struct sk_buff *skb)
+{
+ return skb->len - NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
+{
+ NAPI_GRO_CB(skb)->data_offset += len;
+}
+
+static inline void *skb_gro_header_fast(struct sk_buff *skb,
+ unsigned int offset)
+{
+ return NAPI_GRO_CB(skb)->frag0 + offset;
+}
+
+static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+{
+ return NAPI_GRO_CB(skb)->frag0_len < hlen;
+}
+
+static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
+{
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+}
+
+static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
+{
+ if (!pskb_may_pull(skb, hlen))
+ return NULL;
+
+ skb_gro_frag0_invalidate(skb);
+ return skb->data + offset;
+}
+
+static inline void *skb_gro_network_header(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
+ skb_network_offset(skb);
+}
+
+static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+
+ return csum_tcpudp_nofold(iph->saddr, iph->daddr,
+ skb_gro_len(skb), proto, 0);
+}
+
+static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid)
+ NAPI_GRO_CB(skb)->csum = ~csum_partial(start, len,
+ ~NAPI_GRO_CB(skb)->csum);
+}
+
+/* GRO checksum functions. These are logical equivalents of the normal
+ * checksum functions (in skbuff.h) except that they operate on the GRO
+ * offsets and fields in sk_buff.
+ */
+
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
+
+static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
+}
+
+static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
+ bool zero_okay,
+ __sum16 check)
+{
+ return ((skb->ip_summed != CHECKSUM_PARTIAL ||
+ skb_checksum_start_offset(skb) <
+ skb_gro_offset(skb)) &&
+ !skb_at_gro_remcsum_start(skb) &&
+ NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ (!zero_okay || check));
+}
+
+static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
+ __wsum psum)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid &&
+ !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
+ return 0;
+
+ NAPI_GRO_CB(skb)->csum = psum;
+
+ return __skb_gro_checksum_complete(skb);
+}
+
+static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
+{
+ if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
+ /* Consume a checksum from CHECKSUM_UNNECESSARY */
+ NAPI_GRO_CB(skb)->csum_cnt--;
+ } else {
+ /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
+ * verified a new top level checksum or an encapsulated one
+ * during GRO. This saves work if we fallback to normal path.
+ */
+ __skb_incr_checksum_unnecessary(skb);
+ }
+}
+
+#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
+ compute_pseudo) \
+({ \
+ __sum16 __ret = 0; \
+ if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
+ __ret = __skb_gro_checksum_validate_complete(skb, \
+ compute_pseudo(skb, proto)); \
+ if (!__ret) \
+ skb_gro_incr_csum_unnecessary(skb); \
+ __ret; \
+})
+
+#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
+
+#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
+ compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
+
+#define skb_gro_checksum_simple_validate(skb) \
+ __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
+
+static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ !NAPI_GRO_CB(skb)->csum_valid);
+}
+
+static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
+ __wsum pseudo)
+{
+ NAPI_GRO_CB(skb)->csum = ~pseudo;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+}
+
+#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
+do { \
+ if (__skb_gro_checksum_convert_check(skb)) \
+ __skb_gro_checksum_convert(skb, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
+struct gro_remcsum {
+ int offset;
+ __wsum delta;
+};
+
+static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
+{
+ grc->offset = 0;
+ grc->delta = 0;
+}
+
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ unsigned int off, size_t hdrlen,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
+{
+ __wsum delta;
+ size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+
+ BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+ if (!nopartial) {
+ NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+ return ptr;
+ }
+
+ ptr = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, off + plen)) {
+ ptr = skb_gro_header_slow(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
+ }
+
+ delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+ start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+ grc->offset = off + hdrlen + offset;
+ grc->delta = delta;
+
+ return ptr;
+}
+
+static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
+ struct gro_remcsum *grc)
+{
+ void *ptr;
+ size_t plen = grc->offset + sizeof(u16);
+
+ if (!grc->delta)
+ return;
+
+ ptr = skb_gro_header_fast(skb, grc->offset);
+ if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+ ptr = skb_gro_header_slow(skb, plen, grc->offset);
+ if (!ptr)
+ return;
+ }
+
+ remcsum_unadjust((__sum16 *)ptr, grc->delta);
+}
+
+#ifdef CONFIG_XFRM_OFFLOAD
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS)
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS) {
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+ }
+}
+#else
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+}
+#endif
INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
struct sk_buff *));
@@ -15,6 +374,14 @@ INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
struct sk_buff *));
INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
+
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
+
#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
({ \
unlikely(gro_recursion_inc_test(skb)) ? \
@@ -22,4 +389,54 @@ INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
})
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct udphdr *uh, struct sock *sk);
+int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
+
+static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
+{
+ struct udphdr *uh;
+ unsigned int hlen, off;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*uh);
+ uh = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, hlen))
+ uh = skb_gro_header_slow(skb, hlen, off);
+
+ return uh;
+}
+
+static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ const struct ipv6hdr *iph = skb_gro_network_header(skb);
+
+ return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ skb_gro_len(skb), proto, 0));
+}
+
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static inline void gro_normal_list(struct napi_struct *napi)
+{
+ if (!napi->rx_count)
+ return;
+ netif_receive_skb_list_internal(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
+{
+ list_add_tail(&skb->list, &napi->rx_list);
+ napi->rx_count += segs;
+ if (napi->rx_count >= gro_normal_batch)
+ gro_normal_list(napi);
+}
+
+
#endif /* _NET_IPV6_GRO_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index fa6a87246a7b..4ad47d9f9d27 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -304,7 +304,7 @@ static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
(EPOLLIN | EPOLLRDNORM) : 0;
}
-int inet_csk_listen_start(struct sock *sk, int backlog);
+int inet_csk_listen_start(struct sock *sk);
void inet_csk_listen_stop(struct sock *sk);
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 9e1111f5915b..234d70ae5f4c 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -372,4 +372,16 @@ static inline bool inet_can_nonlocal_bind(struct net *net,
inet->freebind || inet->transparent;
}
+static inline bool inet_addr_valid_or_nonlocal(struct net *net,
+ struct inet_sock *inet,
+ __be32 addr,
+ int addr_type)
+{
+ return inet_can_nonlocal_bind(net, inet) ||
+ addr == htonl(INADDR_ANY) ||
+ addr_type == RTN_LOCAL ||
+ addr_type == RTN_MULTICAST ||
+ addr_type == RTN_BROADCAST;
+}
+
#endif /* _INET_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index b71e88507c4a..81e23a102a0d 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -568,14 +568,6 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
-static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
-{
- const struct iphdr *iph = skb_gro_network_header(skb);
-
- return csum_tcpudp_nofold(iph->saddr, iph->daddr,
- skb_gro_len(skb), proto, 0);
-}
-
/*
* Map a multicast IP onto multicast MAC for type ethernet.
*/
@@ -791,5 +783,6 @@ int ip_sock_set_mtu_discover(struct sock *sk, int val);
void ip_sock_set_pktinfo(struct sock *sk);
void ip_sock_set_recverr(struct sock *sk);
void ip_sock_set_tos(struct sock *sk, int val);
+void __ip_sock_set_tos(struct sock *sk, int val);
#endif /* _IP_H */
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index b3f4eaa88672..c8a96b888277 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -43,14 +43,6 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0));
}
-static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
-{
- const struct ipv6hdr *iph = skb_gro_network_header(skb);
-
- return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
- skb_gro_len(skb), proto, 0));
-}
-
static __inline__ __sum16 tcp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
@@ -65,15 +57,9 @@ static inline void __tcp_v6_send_check(struct sk_buff *skb,
{
struct tcphdr *th = tcp_hdr(skb);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct tcphdr, check);
- } else {
- th->check = tcp_v6_check(skb->len, saddr, daddr,
- csum_partial(th, th->doff << 2,
- skb->csum));
- }
+ th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
}
static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 5efd0b71dc67..ca2d6b60e1ec 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -263,19 +263,19 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
-static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb)
+static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
{
- unsigned int mtu;
-
- struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
+ const struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
+ const struct dst_entry *dst = skb_dst(skb);
+ unsigned int mtu;
if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
- mtu = READ_ONCE(skb_dst(skb)->dev->mtu);
- mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
- } else
- mtu = dst_mtu(skb_dst(skb));
-
+ mtu = READ_ONCE(dst->dev->mtu);
+ mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
+ } else {
+ mtu = dst_mtu(dst);
+ }
return mtu;
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index c19bf51ded1d..53ac7707ca70 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -345,9 +345,9 @@ struct ipcm6_cookie {
struct sockcm_cookie sockc;
__s16 hlimit;
__s16 tclass;
+ __u16 gso_size;
__s8 dontfrag;
struct ipv6_txoptions *opt;
- __u16 gso_size;
};
static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index ff06246dbbb9..df85d19fbf84 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -112,10 +112,12 @@ enum iucv_tx_notify {
struct iucv_sock {
struct sock sk;
- char src_user_id[8];
- char src_name[8];
- char dst_user_id[8];
- char dst_name[8];
+ struct_group(init,
+ char src_user_id[8];
+ char src_name[8];
+ char dst_user_id[8];
+ char dst_name[8];
+ );
struct list_head accept_q;
spinlock_t accept_q_lock;
struct sock *parent;
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 04341d86585d..53cb8de0e589 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -411,13 +411,7 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
rcu_read_lock_bh();
n = __ipv6_neigh_lookup_noref(dev, pkey);
- if (n) {
- unsigned long now = jiffies;
-
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
- }
+ neigh_confirm(n);
rcu_read_unlock_bh();
}
@@ -428,13 +422,7 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
rcu_read_lock_bh();
n = __ipv6_neigh_lookup_noref_stub(dev, pkey);
- if (n) {
- unsigned long now = jiffies;
-
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
- }
+ neigh_confirm(n);
rcu_read_unlock_bh();
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 38a0c1d24570..55af812c3ed5 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -321,6 +321,17 @@ static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
}
+static inline void neigh_confirm(struct neighbour *n)
+{
+ if (n) {
+ unsigned long now = jiffies;
+
+ /* avoid dirtying neighbour */
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
+ }
+}
+
void neigh_table_init(int index, struct neigh_table *tbl);
int neigh_table_clear(int index, struct neigh_table *tbl);
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 36c2d998a43c..552bc25b1933 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -12,7 +12,6 @@ struct netns_core {
int sysctl_somaxconn;
#ifdef CONFIG_PROC_FS
- int __percpu *sock_inuse;
struct prot_inuse __percpu *prot_inuse;
#endif
};
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 899c29c326ba..e7d1ed7a3dfb 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -984,12 +984,10 @@ struct sctp_transport {
} cacc;
struct {
- __u32 last_rtx_chunks;
__u16 pmtu;
__u16 probe_size;
__u16 probe_high;
- __u8 probe_count:3;
- __u8 raise_count:5;
+ __u8 probe_count;
__u8 state;
} pl; /* plpmtud related */
@@ -1011,6 +1009,7 @@ void sctp_transport_reset_t3_rtx(struct sctp_transport *);
void sctp_transport_reset_hb_timer(struct sctp_transport *);
void sctp_transport_reset_reconf_timer(struct sctp_transport *transport);
void sctp_transport_reset_probe_timer(struct sctp_transport *transport);
+void sctp_transport_reset_raise_timer(struct sctp_transport *transport);
int sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -1025,7 +1024,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
void sctp_transport_immediate_rtx(struct sctp_transport *);
void sctp_transport_dst_release(struct sctp_transport *t);
void sctp_transport_dst_confirm(struct sctp_transport *t);
-bool sctp_transport_pl_send(struct sctp_transport *t);
+void sctp_transport_pl_send(struct sctp_transport *t);
bool sctp_transport_pl_recv(struct sctp_transport *t);
diff --git a/include/net/sock.h b/include/net/sock.h
index b32906e1ab55..e7c231373875 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -63,6 +63,7 @@
#include <linux/indirect_call_wrapper.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/llist.h>
#include <net/dst.h>
#include <net/checksum.h>
#include <net/tcp_states.h>
@@ -284,15 +285,14 @@ struct bpf_local_storage;
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
- * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
- * @sk_route_forced_caps: static, forced route capabilities
- * (set in tcp_init_sock())
+ * @sk_gso_disabled: if set, NETIF_F_GSO_MASK is forbidden.
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
* @sk_gso_max_segs: Maximum number of GSO segments
* @sk_pacing_shift: scaling factor for TCP Small Queues
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
+ * @defer_list: head of llist storing skbs to be freed
* @sk_callback_lock: used with the callbacks in the end of this struct
* @sk_error_queue: rarely used
* @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
@@ -391,6 +391,11 @@ struct sock {
#define sk_flags __sk_common.skc_flags
#define sk_rxhash __sk_common.skc_rxhash
+ /* early demux fields */
+ struct dst_entry *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
socket_lock_t sk_lock;
atomic_t sk_drops;
int sk_rcvlowat;
@@ -410,6 +415,8 @@ struct sock {
struct sk_buff *head;
struct sk_buff *tail;
} sk_backlog;
+ struct llist_head defer_list;
+
#define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc;
@@ -431,9 +438,6 @@ struct sock {
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
- struct dst_entry *sk_rx_dst;
- int sk_rx_dst_ifindex;
- u32 sk_rx_dst_cookie;
struct dst_entry __rcu *sk_dst_cache;
atomic_t sk_omem_alloc;
@@ -460,8 +464,6 @@ struct sock {
unsigned long sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
- netdev_features_t sk_route_nocaps;
- netdev_features_t sk_route_forced_caps;
int sk_gso_type;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
@@ -471,7 +473,7 @@ struct sock {
* Because of non atomicity rules, all
* changes are protected by socket lock.
*/
- u8 sk_padding : 1,
+ u8 sk_gso_disabled : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
sk_no_check_rx : 1,
@@ -493,6 +495,7 @@ struct sock {
u16 sk_busy_poll_budget;
#endif
spinlock_t sk_peer_lock;
+ int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
@@ -502,7 +505,6 @@ struct sock {
seqlock_t sk_stamp_seq;
#endif
u16 sk_tsflags;
- int sk_bind_phc;
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
@@ -1022,12 +1024,18 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
+
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
if (sk_memalloc_socks() && skb_pfmemalloc(skb))
return __sk_backlog_rcv(sk, skb);
- return sk->sk_backlog_rcv(sk, skb);
+ return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
+ tcp_v6_do_rcv,
+ tcp_v4_do_rcv,
+ sk, skb);
}
static inline void sk_incoming_cpu_update(struct sock *sk)
@@ -1210,7 +1218,9 @@ struct proto {
unsigned int inuse_idx;
#endif
+#if IS_ENABLED(CONFIG_MPTCP)
int (*forward_alloc_get)(const struct sock *sk);
+#endif
bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*sock_is_readable)(struct sock *sk);
@@ -1299,10 +1309,11 @@ INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int
static inline int sk_forward_alloc_get(const struct sock *sk)
{
- if (!sk->sk_prot->forward_alloc_get)
- return sk->sk_forward_alloc;
-
- return sk->sk_prot->forward_alloc_get(sk);
+#if IS_ENABLED(CONFIG_MPTCP)
+ if (sk->sk_prot->forward_alloc_get)
+ return sk->sk_prot->forward_alloc_get(sk);
+#endif
+ return sk->sk_forward_alloc;
}
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
@@ -1419,13 +1430,32 @@ proto_memory_pressure(struct proto *prot)
#ifdef CONFIG_PROC_FS
-/* Called with local bh disabled */
-void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
+#define PROTO_INUSE_NR 64 /* should be enough for the first time */
+struct prot_inuse {
+ int all;
+ int val[PROTO_INUSE_NR];
+};
+
+static inline void sock_prot_inuse_add(const struct net *net,
+ const struct proto *prot, int val)
+{
+ this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
+}
+
+static inline void sock_inuse_add(const struct net *net, int val)
+{
+ this_cpu_add(net->core.prot_inuse->all, val);
+}
+
int sock_prot_inuse_get(struct net *net, struct proto *proto);
int sock_inuse_get(struct net *net);
#else
-static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
- int inc)
+static inline void sock_prot_inuse_add(const struct net *net,
+ const struct proto *prot, int val)
+{
+}
+
+static inline void sock_inuse_add(const struct net *net, int val)
{
}
#endif
@@ -2105,13 +2135,10 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
{
if (skb_get_dst_pending_confirm(skb)) {
struct sock *sk = skb->sk;
- unsigned long now = jiffies;
- /* avoid dirtying neighbour */
- if (READ_ONCE(n->confirmed) != now)
- WRITE_ONCE(n->confirmed, now);
if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
+ neigh_confirm(n);
}
}
@@ -2124,10 +2151,10 @@ static inline bool sk_can_gso(const struct sock *sk)
void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
-static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
+static inline void sk_gso_disable(struct sock *sk)
{
- sk->sk_route_nocaps |= flags;
- sk->sk_route_caps &= ~flags;
+ sk->sk_gso_disabled = 1;
+ sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
@@ -2638,6 +2665,11 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
+static inline bool sk_is_tcp(const struct sock *sk)
+{
+ return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
+}
+
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4da22b41bde6..44e442bf23f9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1368,6 +1368,16 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
}
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
+void __sk_defer_free_flush(struct sock *sk);
+
+static inline void sk_defer_free_flush(struct sock *sk)
+{
+ if (llist_empty(&sk->defer_list))
+ return;
+ __sk_defer_free_flush(sk);
+}
+
int tcp_filter(struct sock *sk, struct sk_buff *skb);
void tcp_set_state(struct sock *sk, int state);
void tcp_done(struct sock *sk);
@@ -2172,9 +2182,13 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
u16 segs_in;
segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
- tp->segs_in += segs_in;
+
+ /* We update these fields while other threads might
+ * read them from tcp_get_info()
+ */
+ WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
if (skb->len > tcp_hdrlen(skb))
- tp->data_segs_in += segs_in;
+ WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
}
/*
diff --git a/include/net/udp.h b/include/net/udp.h
index 909ecf447e0f..f1c2a88c9005 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -167,36 +167,12 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
__be16 dport);
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
- struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
- struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *));
INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
-struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
- struct udphdr *uh, struct sock *sk);
-int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
-
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features, bool is_ipv6);
-static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
-{
- struct udphdr *uh;
- unsigned int hlen, off;
-
- off = skb_gro_offset(skb);
- hlen = off + sizeof(*uh);
- uh = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen))
- uh = skb_gro_header_slow(skb, hlen, off);
-
- return uh;
-}
-
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
static inline int udp_lib_hash(struct sock *sk)
{
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index fef3a36b0210..33f2e8c9e88b 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -555,12 +555,26 @@ struct ocelot_ops {
u16 (*wm_enc)(u16 value);
u16 (*wm_dec)(u16 value);
void (*wm_stat)(u32 val, u32 *inuse, u32 *maxuse);
+ void (*psfp_init)(struct ocelot *ocelot);
+ int (*psfp_filter_add)(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f);
+ int (*psfp_filter_del)(struct ocelot *ocelot, struct flow_cls_offload *f);
+ int (*psfp_stats_get)(struct ocelot *ocelot, struct flow_cls_offload *f,
+ struct flow_stats *stats);
+ void (*cut_through_fwd)(struct ocelot *ocelot);
+};
+
+struct ocelot_vcap_policer {
+ struct list_head pol_list;
+ u16 base;
+ u16 max;
+ u16 base2;
+ u16 max2;
};
struct ocelot_vcap_block {
struct list_head rules;
int count;
- int pol_lpr;
};
struct ocelot_bridge_vlan {
@@ -581,6 +595,12 @@ enum ocelot_port_tag_config {
OCELOT_PORT_TAG_TRUNK = 3,
};
+struct ocelot_psfp_list {
+ struct list_head stream_list;
+ struct list_head sfi_list;
+ struct list_head sgi_list;
+};
+
enum ocelot_sb {
OCELOT_SB_BUF,
OCELOT_SB_REF,
@@ -593,6 +613,19 @@ enum ocelot_sb_pool {
OCELOT_SB_POOL_NUM,
};
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACv4,
+ ENTRYTYPE_MACv6,
+};
+
#define OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION BIT(0)
#define OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP BIT(1)
@@ -623,6 +656,8 @@ struct ocelot_port {
struct net_device *bridge;
u8 stp_state;
+
+ int speed;
};
struct ocelot {
@@ -667,8 +702,11 @@ struct ocelot {
struct list_head dummy_rules;
struct ocelot_vcap_block block[3];
+ struct ocelot_vcap_policer vcap_pol;
struct vcap_props *vcap;
+ struct ocelot_psfp_list psfp;
+
/* Workqueue to check statistics for overflow with its lock */
struct mutex stats_lock;
u64 *stats;
@@ -677,6 +715,8 @@ struct ocelot {
/* Lock for serializing access to the MAC table */
struct mutex mact_lock;
+ /* Lock for serializing forwarding domain changes */
+ struct mutex fwd_domain_lock;
struct workqueue_struct *owq;
@@ -776,7 +816,9 @@ void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
struct netlink_ext_ack *extack);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
-void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot);
+u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot);
+u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port);
+void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining);
int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags val);
void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
@@ -870,6 +912,19 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
bool tx_pause, bool rx_pause,
unsigned long quirks);
+int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type *type);
+int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type,
+ int sfid, int ssid);
+
+int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol);
+int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix);
+
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
int ocelot_mrp_add(struct ocelot *ocelot, int port,
const struct switchdev_obj_mrp *mrp);
diff --git a/include/soc/mscc/ocelot_ana.h b/include/soc/mscc/ocelot_ana.h
index 1669481d9779..67e0ae05a5ab 100644
--- a/include/soc/mscc/ocelot_ana.h
+++ b/include/soc/mscc/ocelot_ana.h
@@ -227,6 +227,11 @@
#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
+#define SFIDACCESS_CMD_IDLE 0
+#define SFIDACCESS_CMD_READ 1
+#define SFIDACCESS_CMD_WRITE 2
+#define SFIDACCESS_CMD_INIT 3
+
#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
@@ -255,6 +260,11 @@
#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 21) & GENMASK(24, 21))
#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(24, 21)
#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(24, 21)) >> 21)
+#define ANA_SG_CONFIG_REG_3_IPV_VALID BIT(24)
+#define ANA_SG_CONFIG_REG_3_IPV_INVALID(x) (((x) << 24) & GENMASK(24, 24))
+#define ANA_SG_CONFIG_REG_3_INIT_IPV(x) (((x) << 21) & GENMASK(23, 21))
+#define ANA_SG_CONFIG_REG_3_INIT_IPV_M GENMASK(23, 21)
+#define ANA_SG_CONFIG_REG_3_INIT_IPV_X(x) (((x) & GENMASK(23, 21)) >> 21)
#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(25)
#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index 4d1dfa1136b2..709cbc198fd2 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -656,6 +656,7 @@ enum ocelot_vcap_filter_type {
OCELOT_VCAP_FILTER_DUMMY,
OCELOT_VCAP_FILTER_PAG,
OCELOT_VCAP_FILTER_OFFLOAD,
+ OCELOT_PSFP_FILTER_OFFLOAD,
};
struct ocelot_vcap_id {
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index ba5af15e25f5..6297eafdc40f 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4938,6 +4938,25 @@ union bpf_attr {
* **-ENOENT** if symbol is not found.
*
* **-EPERM** if caller does not have permission to obtain kernel address.
+ *
+ * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * Find vma of *task* that contains *addr*, call *callback_fn*
+ * function with *task*, *vma*, and *callback_ctx*.
+ * The *callback_fn* should be a static function and
+ * the *callback_ctx* should be a pointer to the stack.
+ * The *flags* is used to control certain aspects of the helper.
+ * Currently, the *flags* must be 0.
+ *
+ * The expected callback signature is
+ *
+ * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
+ *
+ * Return
+ * 0 on success.
+ * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
+ * **-EBUSY** if failed to try lock mmap_lock.
+ * **-EINVAL** for invalid **flags**.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5120,6 +5139,7 @@ union bpf_attr {
FN(trace_vprintk), \
FN(skc_to_unix_sock), \
FN(kallsyms_lookup_name), \
+ FN(find_vma), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -6296,6 +6316,7 @@ struct bpf_sk_lookup {
__u32 local_ip4; /* Network byte order */
__u32 local_ip6[4]; /* Network byte order */
__u32 local_port; /* Host byte order */
+ __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
};
/*
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index deb12f755f0f..b0d8fea1951d 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -43,7 +43,7 @@ struct btf_type {
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
- * FUNC, FUNC_PROTO, VAR and DECL_TAG.
+ * FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG.
* "type" is a type_id referring to another type.
*/
union {
@@ -75,6 +75,7 @@ enum {
BTF_KIND_DATASEC = 15, /* Section */
BTF_KIND_FLOAT = 16, /* Floating point */
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
+ BTF_KIND_TYPE_TAG = 18, /* Type Tag */
NR_BTF_KINDS,
BTF_KIND_MAX = NR_BTF_KINDS - 1,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index a2223b685451..7bc4b8def12c 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -231,6 +231,7 @@ enum tunable_id {
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
+ ETHTOOL_TX_COPYBREAK_BUF_SIZE,
/*
* Add your fresh new tunable attribute above and remember to update
* tunable_strings[] in net/ethtool/common.c
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 999777d32dcf..cca6e474a085 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -329,6 +329,7 @@ enum {
ETHTOOL_A_RINGS_RX_MINI, /* u32 */
ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */
ETHTOOL_A_RINGS_TX, /* u32 */
+ ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */
/* add new constants above here */
__ETHTOOL_A_RINGS_CNT,
diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h
index 376cccf397be..a58deb3061eb 100644
--- a/include/uapi/linux/tty.h
+++ b/include/uapi/linux/tty.h
@@ -38,5 +38,6 @@
#define N_NCI 25 /* NFC NCI UART */
#define N_SPEAKUP 26 /* Speakup communication with synths */
#define N_NULL 27 /* Null ldisc used for error handling */
+#define N_MCTP 28 /* MCTP-over-serial */
#endif /* _UAPI_LINUX_TTY_H */
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index ebfa8bc90892..bb69aea1a777 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -323,7 +323,7 @@ const struct bpf_func_proto bpf_task_storage_get_proto = {
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
- .arg2_btf_id = &btf_task_struct_ids[0],
+ .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
};
@@ -334,5 +334,5 @@ const struct bpf_func_proto bpf_task_storage_delete_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
- .arg2_btf_id = &btf_task_struct_ids[0],
+ .arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
};
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index dbc3ad07e21b..6b9d23be1e99 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -282,6 +282,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_DATASEC] = "DATASEC",
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
+ [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
};
const char *btf_type_str(const struct btf_type *t)
@@ -418,6 +419,7 @@ static bool btf_type_is_modifier(const struct btf_type *t)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPE_TAG:
return true;
}
@@ -1737,6 +1739,7 @@ __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPE_TAG:
id = type->type;
type = btf_type_by_id(btf, type->type);
break;
@@ -2345,6 +2348,8 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
const struct btf_type *t,
u32 meta_left)
{
+ const char *value;
+
if (btf_type_vlen(t)) {
btf_verifier_log_type(env, t, "vlen != 0");
return -EINVAL;
@@ -2360,7 +2365,7 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
- /* typedef type must have a valid name, and other ref types,
+ /* typedef/type_tag type must have a valid name, and other ref types,
* volatile, const, restrict, should have a null name.
*/
if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
@@ -2369,6 +2374,12 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
+ } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
+ value = btf_name_by_offset(env->btf, t->name_off);
+ if (!value || !value[0]) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
} else {
if (t->name_off) {
btf_verifier_log_type(env, t, "Invalid name");
@@ -4059,6 +4070,7 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
[BTF_KIND_DATASEC] = &datasec_ops,
[BTF_KIND_FLOAT] = &float_ops,
[BTF_KIND_DECL_TAG] = &decl_tag_ops,
+ [BTF_KIND_TYPE_TAG] = &modifier_ops,
};
static s32 btf_check_meta(struct btf_verifier_env *env,
@@ -6342,7 +6354,10 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
.arg4_type = ARG_ANYTHING,
};
-BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
+BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
+#define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
+BTF_TRACING_TYPE_xxx
+#undef BTF_TRACING_TYPE
/* BTF ID set registration API for modules */
diff --git a/kernel/bpf/mmap_unlock_work.h b/kernel/bpf/mmap_unlock_work.h
new file mode 100644
index 000000000000..5d18d7d85bef
--- /dev/null
+++ b/kernel/bpf/mmap_unlock_work.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021 Facebook
+ */
+
+#ifndef __MMAP_UNLOCK_WORK_H__
+#define __MMAP_UNLOCK_WORK_H__
+#include <linux/irq_work.h>
+
+/* irq_work to run mmap_read_unlock() in irq_work */
+struct mmap_unlock_irq_work {
+ struct irq_work irq_work;
+ struct mm_struct *mm;
+};
+
+DECLARE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
+
+/*
+ * We cannot do mmap_read_unlock() when the irq is disabled, because of
+ * risk to deadlock with rq_lock. To look up vma when the irqs are
+ * disabled, we need to run mmap_read_unlock() in irq_work. We use a
+ * percpu variable to do the irq_work. If the irq_work is already used
+ * by another lookup, we fall over.
+ */
+static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work **work_ptr)
+{
+ struct mmap_unlock_irq_work *work = NULL;
+ bool irq_work_busy = false;
+
+ if (irqs_disabled()) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ work = this_cpu_ptr(&mmap_unlock_work);
+ if (irq_work_is_busy(&work->irq_work)) {
+ /* cannot queue more up_read, fallback */
+ irq_work_busy = true;
+ }
+ } else {
+ /*
+ * PREEMPT_RT does not allow to trylock mmap sem in
+ * interrupt disabled context. Force the fallback code.
+ */
+ irq_work_busy = true;
+ }
+ }
+
+ *work_ptr = work;
+ return irq_work_busy;
+}
+
+static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
+{
+ if (!work) {
+ mmap_read_unlock(mm);
+ } else {
+ work->mm = mm;
+
+ /* The lock will be released once we're out of interrupt
+ * context. Tell lockdep that we've released it now so
+ * it doesn't complain that we forgot to release it.
+ */
+ rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
+ irq_work_queue(&work->irq_work);
+ }
+}
+
+#endif /* __MMAP_UNLOCK_WORK_H__ */
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 6e75bbee39f0..49e567209c6b 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -7,10 +7,10 @@
#include <linux/kernel.h>
#include <linux/stacktrace.h>
#include <linux/perf_event.h>
-#include <linux/irq_work.h>
#include <linux/btf_ids.h>
#include <linux/buildid.h>
#include "percpu_freelist.h"
+#include "mmap_unlock_work.h"
#define STACK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
@@ -31,25 +31,6 @@ struct bpf_stack_map {
struct stack_map_bucket *buckets[];
};
-/* irq_work to run up_read() for build_id lookup in nmi context */
-struct stack_map_irq_work {
- struct irq_work irq_work;
- struct mm_struct *mm;
-};
-
-static void do_up_read(struct irq_work *entry)
-{
- struct stack_map_irq_work *work;
-
- if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
- return;
-
- work = container_of(entry, struct stack_map_irq_work, irq_work);
- mmap_read_unlock_non_owner(work->mm);
-}
-
-static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
-
static inline bool stack_map_use_build_id(struct bpf_map *map)
{
return (map->map_flags & BPF_F_STACK_BUILD_ID);
@@ -149,35 +130,13 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
u64 *ips, u32 trace_nr, bool user)
{
int i;
+ struct mmap_unlock_irq_work *work = NULL;
+ bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
struct vm_area_struct *vma;
- bool irq_work_busy = false;
- struct stack_map_irq_work *work = NULL;
-
- if (irqs_disabled()) {
- if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
- work = this_cpu_ptr(&up_read_work);
- if (irq_work_is_busy(&work->irq_work)) {
- /* cannot queue more up_read, fallback */
- irq_work_busy = true;
- }
- } else {
- /*
- * PREEMPT_RT does not allow to trylock mmap sem in
- * interrupt disabled context. Force the fallback code.
- */
- irq_work_busy = true;
- }
- }
- /*
- * We cannot do up_read() when the irq is disabled, because of
- * risk to deadlock with rq_lock. To do build_id lookup when the
- * irqs are disabled, we need to run up_read() in irq_work. We use
- * a percpu variable to do the irq_work. If the irq_work is
- * already used by another lookup, we fall back to report ips.
- *
- * Same fallback is used for kernel stack (!user) on a stackmap
- * with build_id.
+ /* If the irq_work is in use, fall back to report ips. Same
+ * fallback is used for kernel stack (!user) on a stackmap with
+ * build_id.
*/
if (!user || !current || !current->mm || irq_work_busy ||
!mmap_read_trylock(current->mm)) {
@@ -203,19 +162,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
- vma->vm_start;
id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
}
-
- if (!work) {
- mmap_read_unlock(current->mm);
- } else {
- work->mm = current->mm;
-
- /* The lock will be released once we're out of interrupt
- * context. Tell lockdep that we've released it now so
- * it doesn't complain that we forgot to release it.
- */
- rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_);
- irq_work_queue(&work->irq_work);
- }
+ bpf_mmap_unlock_mm(work, current->mm);
}
static struct perf_callchain_entry *
@@ -542,7 +489,7 @@ const struct bpf_func_proto bpf_get_task_stack_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
- .arg1_btf_id = &btf_task_struct_ids[0],
+ .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
@@ -719,16 +666,3 @@ const struct bpf_map_ops stack_trace_map_ops = {
.map_btf_name = "bpf_stack_map",
.map_btf_id = &stack_trace_map_btf_id,
};
-
-static int __init stack_map_init(void)
-{
- int cpu;
- struct stack_map_irq_work *work;
-
- for_each_possible_cpu(cpu) {
- work = per_cpu_ptr(&up_read_work, cpu);
- init_irq_work(&work->irq_work, do_up_read);
- }
- return 0;
-}
-subsys_initcall(stack_map_init);
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index b48750bfba5a..d94696198ef8 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -8,6 +8,7 @@
#include <linux/fdtable.h>
#include <linux/filter.h>
#include <linux/btf_ids.h>
+#include "mmap_unlock_work.h"
struct bpf_iter_seq_task_common {
struct pid_namespace *ns;
@@ -524,10 +525,6 @@ static const struct seq_operations task_vma_seq_ops = {
.show = task_vma_seq_show,
};
-BTF_ID_LIST(btf_task_file_ids)
-BTF_ID(struct, file)
-BTF_ID(struct, vm_area_struct)
-
static const struct bpf_iter_seq_info task_seq_info = {
.seq_ops = &task_seq_ops,
.init_seq_private = init_seq_pidns,
@@ -586,23 +583,88 @@ static struct bpf_iter_reg task_vma_reg_info = {
.seq_info = &task_vma_seq_info,
};
+BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
+ bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags)
+{
+ struct mmap_unlock_irq_work *work = NULL;
+ struct vm_area_struct *vma;
+ bool irq_work_busy = false;
+ struct mm_struct *mm;
+ int ret = -ENOENT;
+
+ if (flags)
+ return -EINVAL;
+
+ if (!task)
+ return -ENOENT;
+
+ mm = task->mm;
+ if (!mm)
+ return -ENOENT;
+
+ irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
+
+ if (irq_work_busy || !mmap_read_trylock(mm))
+ return -EBUSY;
+
+ vma = find_vma(mm, start);
+
+ if (vma && vma->vm_start <= start && vma->vm_end > start) {
+ callback_fn((u64)(long)task, (u64)(long)vma,
+ (u64)(long)callback_ctx, 0, 0);
+ ret = 0;
+ }
+ bpf_mmap_unlock_mm(work, mm);
+ return ret;
+}
+
+const struct bpf_func_proto bpf_find_vma_proto = {
+ .func = bpf_find_vma,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_FUNC,
+ .arg4_type = ARG_PTR_TO_STACK_OR_NULL,
+ .arg5_type = ARG_ANYTHING,
+};
+
+DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
+
+static void do_mmap_read_unlock(struct irq_work *entry)
+{
+ struct mmap_unlock_irq_work *work;
+
+ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
+ return;
+
+ work = container_of(entry, struct mmap_unlock_irq_work, irq_work);
+ mmap_read_unlock_non_owner(work->mm);
+}
+
static int __init task_iter_init(void)
{
- int ret;
+ struct mmap_unlock_irq_work *work;
+ int ret, cpu;
+
+ for_each_possible_cpu(cpu) {
+ work = per_cpu_ptr(&mmap_unlock_work, cpu);
+ init_irq_work(&work->irq_work, do_mmap_read_unlock);
+ }
- task_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0];
+ task_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
ret = bpf_iter_reg_target(&task_reg_info);
if (ret)
return ret;
- task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0];
- task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[0];
+ task_file_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
+ task_file_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_FILE];
ret = bpf_iter_reg_target(&task_file_reg_info);
if (ret)
return ret;
- task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0];
- task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
+ task_vma_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
+ task_vma_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA];
return bpf_iter_reg_target(&task_vma_reg_info);
}
late_initcall(task_iter_init);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 50efda51515b..88fe97d35166 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6130,6 +6130,33 @@ static int set_timer_callback_state(struct bpf_verifier_env *env,
return 0;
}
+static int set_find_vma_callback_state(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee,
+ int insn_idx)
+{
+ /* bpf_find_vma(struct task_struct *task, u64 addr,
+ * void *callback_fn, void *callback_ctx, u64 flags)
+ * (callback_fn)(struct task_struct *task,
+ * struct vm_area_struct *vma, void *callback_ctx);
+ */
+ callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
+
+ callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
+ __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
+ callee->regs[BPF_REG_2].btf = btf_vmlinux;
+ callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
+
+ /* pointer to stack or null */
+ callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
+
+ /* unused */
+ __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
+ __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
+ callee->in_callback_fn = true;
+ return 0;
+}
+
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
@@ -6487,6 +6514,13 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL;
}
+ if (func_id == BPF_FUNC_find_vma) {
+ err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
+ set_find_vma_callback_state);
+ if (err < 0)
+ return -EINVAL;
+ }
+
if (func_id == BPF_FUNC_snprintf) {
err = check_bpf_snprintf_call(env, regs);
if (err < 0)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index ae9755037b7e..1cd647c010da 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -764,7 +764,7 @@ const struct bpf_func_proto bpf_get_current_task_btf_proto = {
.func = bpf_get_current_task_btf,
.gpl_only = true,
.ret_type = RET_PTR_TO_BTF_ID,
- .ret_btf_id = &btf_task_struct_ids[0],
+ .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
};
BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
@@ -779,7 +779,7 @@ const struct bpf_func_proto bpf_task_pt_regs_proto = {
.func = bpf_task_pt_regs,
.gpl_only = true,
.arg1_type = ARG_PTR_TO_BTF_ID,
- .arg1_btf_id = &btf_task_struct_ids[0],
+ .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.ret_type = RET_PTR_TO_BTF_ID,
.ret_btf_id = &bpf_task_pt_regs_ids[0],
};
@@ -1206,6 +1206,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_func_ip_proto_tracing;
case BPF_FUNC_get_branch_snapshot:
return &bpf_get_branch_snapshot_proto;
+ case BPF_FUNC_find_vma:
+ return &bpf_find_vma_proto;
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
default:
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 887e73d520e4..1997b7dd265e 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -65,7 +65,7 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev,
hip->le.src_addr_type = 2; /* 12 bit SC address */
memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3);
- memset(&hip->le.reserved, 0, 16);
+ memset_startat(&hip->le, 0, reserved);
hip->snap.dsap = HIPPI_EXTENDED_SAP;
hip->snap.ssap = HIPPI_EXTENDED_SAP;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index abaa5d96ded2..788076b002b3 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -319,8 +319,8 @@ static void vlan_transfer_features(struct net_device *dev,
{
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
- vlandev->gso_max_size = dev->gso_max_size;
- vlandev->gso_max_segs = dev->gso_max_segs;
+ netif_set_gso_max_size(vlandev, dev->gso_max_size);
+ netif_set_gso_max_segs(vlandev, dev->gso_max_segs);
if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
vlandev->hard_header_len = dev->hard_header_len;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 59bc13b5f14f..acf8c791f320 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -476,10 +476,9 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
type = vhdr->h_vlan_encapsulated_proto;
- rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (!ptype)
- goto out_unlock;
+ goto out;
flush = 0;
@@ -501,8 +500,6 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
ipv6_gro_receive, inet_gro_receive,
head, skb);
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
@@ -516,14 +513,12 @@ static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
struct packet_offload *ptype;
int err = -ENOENT;
- rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype)
err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
ipv6_gro_complete, inet_gro_complete,
skb, nhoff + sizeof(*vhdr));
- rcu_read_unlock();
return err;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index a54535cbcf4c..866556e041b7 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -573,8 +573,8 @@ static int vlan_dev_init(struct net_device *dev)
NETIF_F_ALL_FCOE;
dev->features |= dev->hw_features | NETIF_F_LLTX;
- dev->gso_max_size = real_dev->gso_max_size;
- dev->gso_max_segs = real_dev->gso_max_segs;
+ netif_set_gso_max_size(dev, real_dev->gso_max_size);
+ netif_set_gso_max_segs(dev, real_dev->gso_max_segs);
if (dev->features & NETIF_F_VLAN_FEATURES)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index ec87dea23719..08bf6c839e25 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -252,7 +252,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
stats = dev_get_stats(vlandev, &temp);
seq_printf(seq,
- "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
+ "%s VID: %d REORDER_HDR: %i dev->priv_flags: %llx\n",
vlandev->name, vlan->vlan_id,
(int)(vlan->flags & 1), vlandev->priv_flags);
diff --git a/net/Kconfig b/net/Kconfig
index 074472dfa94a..8a1f9d0287de 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -455,4 +455,9 @@ config ETHTOOL_NETLINK
netlink. It provides better extensibility and some new features,
e.g. notification messages.
+config NETDEV_ADDR_LIST_TEST
+ tristate "Unit tests for device address list"
+ default KUNIT_ALL_TESTS
+ depends on KUNIT
+
endif # if NET
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 291770fc9551..a52bba8500e1 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -15,7 +15,7 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \
- eir.o
+ eir.o hci_sync.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
diff --git a/net/bluetooth/aosp.c b/net/bluetooth/aosp.c
index a1b7762335a5..432ae3aac9e3 100644
--- a/net/bluetooth/aosp.c
+++ b/net/bluetooth/aosp.c
@@ -8,9 +8,43 @@
#include "aosp.h"
+/* Command complete parameters of LE_Get_Vendor_Capabilities_Command
+ * The parameters grow over time. The base version that declares the
+ * version_supported field is v0.95. Refer to
+ * https://cs.android.com/android/platform/superproject/+/master:system/
+ * bt/gd/hci/controller.cc;l=452?q=le_get_vendor_capabilities_handler
+ */
+struct aosp_rp_le_get_vendor_capa {
+ /* v0.95: 15 octets */
+ __u8 status;
+ __u8 max_advt_instances;
+ __u8 offloaded_resolution_of_private_address;
+ __le16 total_scan_results_storage;
+ __u8 max_irk_list_sz;
+ __u8 filtering_support;
+ __u8 max_filter;
+ __u8 activity_energy_info_support;
+ __le16 version_supported;
+ __le16 total_num_of_advt_tracked;
+ __u8 extended_scan_support;
+ __u8 debug_logging_supported;
+ /* v0.96: 16 octets */
+ __u8 le_address_generation_offloading_support;
+ /* v0.98: 21 octets */
+ __le32 a2dp_source_offload_capability_mask;
+ __u8 bluetooth_quality_report_support;
+ /* v1.00: 25 octets */
+ __le32 dynamic_audio_buffer_support;
+} __packed;
+
+#define VENDOR_CAPA_BASE_SIZE 15
+#define VENDOR_CAPA_0_98_SIZE 21
+
void aosp_do_open(struct hci_dev *hdev)
{
struct sk_buff *skb;
+ struct aosp_rp_le_get_vendor_capa *rp;
+ u16 version_supported;
if (!hdev->aosp_capable)
return;
@@ -20,9 +54,54 @@ void aosp_do_open(struct hci_dev *hdev)
/* LE Get Vendor Capabilities Command */
skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL,
HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "AOSP get vendor capabilities (%ld)",
+ PTR_ERR(skb));
return;
+ }
+
+ /* A basic length check */
+ if (skb->len < VENDOR_CAPA_BASE_SIZE)
+ goto length_error;
+
+ rp = (struct aosp_rp_le_get_vendor_capa *)skb->data;
+
+ version_supported = le16_to_cpu(rp->version_supported);
+ /* AOSP displays the verion number like v0.98, v1.00, etc. */
+ bt_dev_info(hdev, "AOSP extensions version v%u.%02u",
+ version_supported >> 8, version_supported & 0xff);
+
+ /* Do not support very old versions. */
+ if (version_supported < 95) {
+ bt_dev_warn(hdev, "AOSP capabilities version %u too old",
+ version_supported);
+ goto done;
+ }
+
+ if (version_supported < 98) {
+ bt_dev_warn(hdev, "AOSP quality report is not supported");
+ goto done;
+ }
+
+ if (skb->len < VENDOR_CAPA_0_98_SIZE)
+ goto length_error;
+
+ /* The bluetooth_quality_report_support is defined at version
+ * v0.98. Refer to
+ * https://cs.android.com/android/platform/superproject/+/
+ * master:system/bt/gd/hci/controller.cc;l=477
+ */
+ if (rp->bluetooth_quality_report_support) {
+ hdev->aosp_quality_report = true;
+ bt_dev_info(hdev, "AOSP quality report is supported");
+ }
+
+ goto done;
+
+length_error:
+ bt_dev_err(hdev, "AOSP capabilities length %d too short", skb->len);
+done:
kfree_skb(skb);
}
@@ -33,3 +112,90 @@ void aosp_do_close(struct hci_dev *hdev)
bt_dev_dbg(hdev, "Cleanup of AOSP extension");
}
+
+/* BQR command */
+#define BQR_OPCODE hci_opcode_pack(0x3f, 0x015e)
+
+/* BQR report action */
+#define REPORT_ACTION_ADD 0x00
+#define REPORT_ACTION_DELETE 0x01
+#define REPORT_ACTION_CLEAR 0x02
+
+/* BQR event masks */
+#define QUALITY_MONITORING BIT(0)
+#define APPRAOCHING_LSTO BIT(1)
+#define A2DP_AUDIO_CHOPPY BIT(2)
+#define SCO_VOICE_CHOPPY BIT(3)
+
+#define DEFAULT_BQR_EVENT_MASK (QUALITY_MONITORING | APPRAOCHING_LSTO | \
+ A2DP_AUDIO_CHOPPY | SCO_VOICE_CHOPPY)
+
+/* Reporting at milliseconds so as not to stress the controller too much.
+ * Range: 0 ~ 65535 ms
+ */
+#define DEFALUT_REPORT_INTERVAL_MS 5000
+
+struct aosp_bqr_cp {
+ __u8 report_action;
+ __u32 event_mask;
+ __u16 min_report_interval;
+} __packed;
+
+static int enable_quality_report(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct aosp_bqr_cp cp;
+
+ cp.report_action = REPORT_ACTION_ADD;
+ cp.event_mask = DEFAULT_BQR_EVENT_MASK;
+ cp.min_report_interval = DEFALUT_REPORT_INTERVAL_MS;
+
+ skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Enabling Android BQR failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int disable_quality_report(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct aosp_bqr_cp cp = { 0 };
+
+ cp.report_action = REPORT_ACTION_CLEAR;
+
+ skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Disabling Android BQR failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+bool aosp_has_quality_report(struct hci_dev *hdev)
+{
+ return hdev->aosp_quality_report;
+}
+
+int aosp_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ if (!aosp_has_quality_report(hdev))
+ return -EOPNOTSUPP;
+
+ bt_dev_dbg(hdev, "quality report enable %d", enable);
+
+ /* Enable or disable the quality report feature. */
+ if (enable)
+ return enable_quality_report(hdev);
+ else
+ return disable_quality_report(hdev);
+}
diff --git a/net/bluetooth/aosp.h b/net/bluetooth/aosp.h
index 328fc6d39f70..2fd8886d51b2 100644
--- a/net/bluetooth/aosp.h
+++ b/net/bluetooth/aosp.h
@@ -8,9 +8,22 @@
void aosp_do_open(struct hci_dev *hdev);
void aosp_do_close(struct hci_dev *hdev);
+bool aosp_has_quality_report(struct hci_dev *hdev);
+int aosp_set_quality_report(struct hci_dev *hdev, bool enable);
+
#else
static inline void aosp_do_open(struct hci_dev *hdev) {}
static inline void aosp_do_close(struct hci_dev *hdev) {}
+static inline bool aosp_has_quality_report(struct hci_dev *hdev)
+{
+ return false;
+}
+
+static inline int aosp_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 0a2d78e811cf..83eb84e8e688 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -501,9 +501,7 @@ static int __init cmtp_init(void)
{
BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
- cmtp_init_sockets();
-
- return 0;
+ return cmtp_init_sockets();
}
static void __exit cmtp_exit(void)
diff --git a/net/bluetooth/hci_codec.c b/net/bluetooth/hci_codec.c
index f0421d0edaa3..38201532f58e 100644
--- a/net/bluetooth/hci_codec.c
+++ b/net/bluetooth/hci_codec.c
@@ -25,9 +25,11 @@ static int hci_codec_list_add(struct list_head *list,
}
entry->transport = sent->transport;
entry->len = len;
- entry->num_caps = rp->num_caps;
- if (rp->num_caps)
+ entry->num_caps = 0;
+ if (rp) {
+ entry->num_caps = rp->num_caps;
memcpy(entry->caps, caps, len);
+ }
list_add(&entry->list, list);
return 0;
@@ -58,6 +60,18 @@ static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport,
__u32 len;
cmd->transport = i;
+
+ /* If Read_Codec_Capabilities command is not supported
+ * then just add codec to the list without caps
+ */
+ if (!(hdev->commands[45] & 0x08)) {
+ hci_dev_lock(hdev);
+ hci_codec_list_add(&hdev->local_codecs, cmd,
+ NULL, NULL, 0);
+ hci_dev_unlock(hdev);
+ continue;
+ }
+
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS,
sizeof(*cmd), cmd,
HCI_CMD_TIMEOUT);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index bd669c95b9a7..cd6e1cf7e396 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -108,7 +108,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
break;
}
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
}
static void hci_conn_cleanup(struct hci_conn *conn)
@@ -900,25 +900,15 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
hci_conn_del(conn);
- /* The suspend notifier is waiting for all devices to disconnect and an
- * LE connect cancel will result in an hci_le_conn_failed. Once the last
- * connection is deleted, we should also wake the suspend queue to
- * complete suspend operations.
- */
- if (list_empty(&hdev->conn_hash.list) &&
- test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
- wake_up(&hdev->suspend_wait_q);
- }
-
/* Since we may have temporarily stopped the background scanning in
* favor of connection establishment, we should restart it.
*/
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
- /* Re-enable advertising in case this was a failed connection
+ /* Enable advertising in case this was a failed connection
* attempt as a peripheral.
*/
- hci_req_reenable_advertising(hdev);
+ hci_enable_advertising(hdev);
}
static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
@@ -1411,7 +1401,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
conn->conn_timeout = conn_timeout;
conn->conn_reason = conn_reason;
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
done:
hci_conn_hold(conn);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8d33aa64846b..fdc0dcf8ee36 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -62,824 +62,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);
-static int hci_reset_req(struct hci_request *req, unsigned long opt)
-{
- BT_DBG("%s %ld", req->hdev->name, opt);
-
- /* Reset device */
- set_bit(HCI_RESET, &req->hdev->flags);
- hci_req_add(req, HCI_OP_RESET, 0, NULL);
- return 0;
-}
-
-static void bredr_init(struct hci_request *req)
-{
- req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
-
- /* Read Local Supported Features */
- hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
-
- /* Read Local Version */
- hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
- /* Read BD Address */
- hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
-}
-
-static void amp_init1(struct hci_request *req)
-{
- req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
-
- /* Read Local Version */
- hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
- /* Read Local Supported Commands */
- hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
-
- /* Read Local AMP Info */
- hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
-
- /* Read Data Blk size */
- hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
-
- /* Read Flow Control Mode */
- hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
-
- /* Read Location Data */
- hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
-}
-
-static int amp_init2(struct hci_request *req)
-{
- /* Read Local Supported Features. Not all AMP controllers
- * support this so it's placed conditionally in the second
- * stage init.
- */
- if (req->hdev->commands[14] & 0x20)
- hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
-
- return 0;
-}
-
-static int hci_init1_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- BT_DBG("%s %ld", hdev->name, opt);
-
- /* Reset */
- if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
- hci_reset_req(req, 0);
-
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- bredr_init(req);
- break;
- case HCI_AMP:
- amp_init1(req);
- break;
- default:
- bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
- break;
- }
-
- return 0;
-}
-
-static void bredr_setup(struct hci_request *req)
-{
- __le16 param;
- __u8 flt_type;
-
- /* Read Buffer Size (ACL mtu, max pkt, etc.) */
- hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
-
- /* Read Class of Device */
- hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
-
- /* Read Local Name */
- hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
-
- /* Read Voice Setting */
- hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
-
- /* Read Number of Supported IAC */
- hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
-
- /* Read Current IAC LAP */
- hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
-
- /* Clear Event Filters */
- flt_type = HCI_FLT_CLEAR_ALL;
- hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
-
- /* Connection accept timeout ~20 secs */
- param = cpu_to_le16(0x7d00);
- hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
-}
-
-static void le_setup(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* Read LE Buffer Size */
- hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
-
- /* Read LE Local Supported Features */
- hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
-
- /* Read LE Supported States */
- hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
-
- /* LE-only controllers have LE implicitly enabled */
- if (!lmp_bredr_capable(hdev))
- hci_dev_set_flag(hdev, HCI_LE_ENABLED);
-}
-
-static void hci_setup_event_mask(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* The second byte is 0xff instead of 0x9f (two reserved bits
- * disabled) since a Broadcom 1.2 dongle doesn't respond to the
- * command otherwise.
- */
- u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
-
- /* CSR 1.1 dongles does not accept any bitfield so don't try to set
- * any event mask for pre 1.2 devices.
- */
- if (hdev->hci_ver < BLUETOOTH_VER_1_2)
- return;
-
- if (lmp_bredr_capable(hdev)) {
- events[4] |= 0x01; /* Flow Specification Complete */
- } else {
- /* Use a different default for LE-only devices */
- memset(events, 0, sizeof(events));
- events[1] |= 0x20; /* Command Complete */
- events[1] |= 0x40; /* Command Status */
- events[1] |= 0x80; /* Hardware Error */
-
- /* If the controller supports the Disconnect command, enable
- * the corresponding event. In addition enable packet flow
- * control related events.
- */
- if (hdev->commands[0] & 0x20) {
- events[0] |= 0x10; /* Disconnection Complete */
- events[2] |= 0x04; /* Number of Completed Packets */
- events[3] |= 0x02; /* Data Buffer Overflow */
- }
-
- /* If the controller supports the Read Remote Version
- * Information command, enable the corresponding event.
- */
- if (hdev->commands[2] & 0x80)
- events[1] |= 0x08; /* Read Remote Version Information
- * Complete
- */
-
- if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
- events[0] |= 0x80; /* Encryption Change */
- events[5] |= 0x80; /* Encryption Key Refresh Complete */
- }
- }
-
- if (lmp_inq_rssi_capable(hdev) ||
- test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
- events[4] |= 0x02; /* Inquiry Result with RSSI */
-
- if (lmp_ext_feat_capable(hdev))
- events[4] |= 0x04; /* Read Remote Extended Features Complete */
-
- if (lmp_esco_capable(hdev)) {
- events[5] |= 0x08; /* Synchronous Connection Complete */
- events[5] |= 0x10; /* Synchronous Connection Changed */
- }
-
- if (lmp_sniffsubr_capable(hdev))
- events[5] |= 0x20; /* Sniff Subrating */
-
- if (lmp_pause_enc_capable(hdev))
- events[5] |= 0x80; /* Encryption Key Refresh Complete */
-
- if (lmp_ext_inq_capable(hdev))
- events[5] |= 0x40; /* Extended Inquiry Result */
-
- if (lmp_no_flush_capable(hdev))
- events[7] |= 0x01; /* Enhanced Flush Complete */
-
- if (lmp_lsto_capable(hdev))
- events[6] |= 0x80; /* Link Supervision Timeout Changed */
-
- if (lmp_ssp_capable(hdev)) {
- events[6] |= 0x01; /* IO Capability Request */
- events[6] |= 0x02; /* IO Capability Response */
- events[6] |= 0x04; /* User Confirmation Request */
- events[6] |= 0x08; /* User Passkey Request */
- events[6] |= 0x10; /* Remote OOB Data Request */
- events[6] |= 0x20; /* Simple Pairing Complete */
- events[7] |= 0x04; /* User Passkey Notification */
- events[7] |= 0x08; /* Keypress Notification */
- events[7] |= 0x10; /* Remote Host Supported
- * Features Notification
- */
- }
-
- if (lmp_le_capable(hdev))
- events[7] |= 0x20; /* LE Meta-Event */
-
- hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
-}
-
-static int hci_init2_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- if (hdev->dev_type == HCI_AMP)
- return amp_init2(req);
-
- if (lmp_bredr_capable(hdev))
- bredr_setup(req);
- else
- hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
-
- if (lmp_le_capable(hdev))
- le_setup(req);
-
- /* All Bluetooth 1.2 and later controllers should support the
- * HCI command for reading the local supported commands.
- *
- * Unfortunately some controllers indicate Bluetooth 1.2 support,
- * but do not have support for this command. If that is the case,
- * the driver can quirk the behavior and skip reading the local
- * supported commands.
- */
- if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
- !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
- hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
-
- if (lmp_ssp_capable(hdev)) {
- /* When SSP is available, then the host features page
- * should also be available as well. However some
- * controllers list the max_page as 0 as long as SSP
- * has not been enabled. To achieve proper debugging
- * output, force the minimum max_page to 1 at least.
- */
- hdev->max_page = 0x01;
-
- if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
- u8 mode = 0x01;
-
- hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
- sizeof(mode), &mode);
- } else {
- struct hci_cp_write_eir cp;
-
- memset(hdev->eir, 0, sizeof(hdev->eir));
- memset(&cp, 0, sizeof(cp));
-
- hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
- }
- }
-
- if (lmp_inq_rssi_capable(hdev) ||
- test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
- u8 mode;
-
- /* If Extended Inquiry Result events are supported, then
- * they are clearly preferred over Inquiry Result with RSSI
- * events.
- */
- mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
-
- hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
- }
-
- if (lmp_inq_tx_pwr_capable(hdev))
- hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
-
- if (lmp_ext_feat_capable(hdev)) {
- struct hci_cp_read_local_ext_features cp;
-
- cp.page = 0x01;
- hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
- sizeof(cp), &cp);
- }
-
- if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
- u8 enable = 1;
- hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
- &enable);
- }
-
- return 0;
-}
-
-static void hci_setup_link_policy(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_write_def_link_policy cp;
- u16 link_policy = 0;
-
- if (lmp_rswitch_capable(hdev))
- link_policy |= HCI_LP_RSWITCH;
- if (lmp_hold_capable(hdev))
- link_policy |= HCI_LP_HOLD;
- if (lmp_sniff_capable(hdev))
- link_policy |= HCI_LP_SNIFF;
- if (lmp_park_capable(hdev))
- link_policy |= HCI_LP_PARK;
-
- cp.policy = cpu_to_le16(link_policy);
- hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
-}
-
-static void hci_set_le_support(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_write_le_host_supported cp;
-
- /* LE-only devices do not support explicit enablement */
- if (!lmp_bredr_capable(hdev))
- return;
-
- memset(&cp, 0, sizeof(cp));
-
- if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
- cp.le = 0x01;
- cp.simul = 0x00;
- }
-
- if (cp.le != lmp_host_le_capable(hdev))
- hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
- &cp);
-}
-
-static void hci_set_event_mask_page_2(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
- bool changed = false;
-
- /* If Connectionless Peripheral Broadcast central role is supported
- * enable all necessary events for it.
- */
- if (lmp_cpb_central_capable(hdev)) {
- events[1] |= 0x40; /* Triggered Clock Capture */
- events[1] |= 0x80; /* Synchronization Train Complete */
- events[2] |= 0x10; /* Peripheral Page Response Timeout */
- events[2] |= 0x20; /* CPB Channel Map Change */
- changed = true;
- }
-
- /* If Connectionless Peripheral Broadcast peripheral role is supported
- * enable all necessary events for it.
- */
- if (lmp_cpb_peripheral_capable(hdev)) {
- events[2] |= 0x01; /* Synchronization Train Received */
- events[2] |= 0x02; /* CPB Receive */
- events[2] |= 0x04; /* CPB Timeout */
- events[2] |= 0x08; /* Truncated Page Complete */
- changed = true;
- }
-
- /* Enable Authenticated Payload Timeout Expired event if supported */
- if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
- events[2] |= 0x80;
- changed = true;
- }
-
- /* Some Broadcom based controllers indicate support for Set Event
- * Mask Page 2 command, but then actually do not support it. Since
- * the default value is all bits set to zero, the command is only
- * required if the event mask has to be changed. In case no change
- * to the event mask is needed, skip this command.
- */
- if (changed)
- hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
- sizeof(events), events);
-}
-
-static int hci_init3_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
- u8 p;
-
- hci_setup_event_mask(req);
-
- if (hdev->commands[6] & 0x20 &&
- !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
- struct hci_cp_read_stored_link_key cp;
-
- bacpy(&cp.bdaddr, BDADDR_ANY);
- cp.read_all = 0x01;
- hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
- }
-
- if (hdev->commands[5] & 0x10)
- hci_setup_link_policy(req);
-
- if (hdev->commands[8] & 0x01)
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
-
- if (hdev->commands[18] & 0x04 &&
- !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
- hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
-
- /* Some older Broadcom based Bluetooth 1.2 controllers do not
- * support the Read Page Scan Type command. Check support for
- * this command in the bit mask of supported commands.
- */
- if (hdev->commands[13] & 0x01)
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
-
- if (lmp_le_capable(hdev)) {
- u8 events[8];
-
- memset(events, 0, sizeof(events));
-
- if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
- events[0] |= 0x10; /* LE Long Term Key Request */
-
- /* If controller supports the Connection Parameters Request
- * Link Layer Procedure, enable the corresponding event.
- */
- if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
- events[0] |= 0x20; /* LE Remote Connection
- * Parameter Request
- */
-
- /* If the controller supports the Data Length Extension
- * feature, enable the corresponding event.
- */
- if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
- events[0] |= 0x40; /* LE Data Length Change */
-
- /* If the controller supports LL Privacy feature, enable
- * the corresponding event.
- */
- if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
- events[1] |= 0x02; /* LE Enhanced Connection
- * Complete
- */
-
- /* If the controller supports Extended Scanner Filter
- * Policies, enable the corresponding event.
- */
- if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
- events[1] |= 0x04; /* LE Direct Advertising
- * Report
- */
-
- /* If the controller supports Channel Selection Algorithm #2
- * feature, enable the corresponding event.
- */
- if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
- events[2] |= 0x08; /* LE Channel Selection
- * Algorithm
- */
-
- /* If the controller supports the LE Set Scan Enable command,
- * enable the corresponding advertising report event.
- */
- if (hdev->commands[26] & 0x08)
- events[0] |= 0x02; /* LE Advertising Report */
-
- /* If the controller supports the LE Create Connection
- * command, enable the corresponding event.
- */
- if (hdev->commands[26] & 0x10)
- events[0] |= 0x01; /* LE Connection Complete */
-
- /* If the controller supports the LE Connection Update
- * command, enable the corresponding event.
- */
- if (hdev->commands[27] & 0x04)
- events[0] |= 0x04; /* LE Connection Update
- * Complete
- */
-
- /* If the controller supports the LE Read Remote Used Features
- * command, enable the corresponding event.
- */
- if (hdev->commands[27] & 0x20)
- events[0] |= 0x08; /* LE Read Remote Used
- * Features Complete
- */
-
- /* If the controller supports the LE Read Local P-256
- * Public Key command, enable the corresponding event.
- */
- if (hdev->commands[34] & 0x02)
- events[0] |= 0x80; /* LE Read Local P-256
- * Public Key Complete
- */
-
- /* If the controller supports the LE Generate DHKey
- * command, enable the corresponding event.
- */
- if (hdev->commands[34] & 0x04)
- events[1] |= 0x01; /* LE Generate DHKey Complete */
-
- /* If the controller supports the LE Set Default PHY or
- * LE Set PHY commands, enable the corresponding event.
- */
- if (hdev->commands[35] & (0x20 | 0x40))
- events[1] |= 0x08; /* LE PHY Update Complete */
-
- /* If the controller supports LE Set Extended Scan Parameters
- * and LE Set Extended Scan Enable commands, enable the
- * corresponding event.
- */
- if (use_ext_scan(hdev))
- events[1] |= 0x10; /* LE Extended Advertising
- * Report
- */
-
- /* If the controller supports the LE Extended Advertising
- * command, enable the corresponding event.
- */
- if (ext_adv_capable(hdev))
- events[2] |= 0x02; /* LE Advertising Set
- * Terminated
- */
-
- hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
- events);
-
- /* Read LE Advertising Channel TX Power */
- if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
- /* HCI TS spec forbids mixing of legacy and extended
- * advertising commands wherein READ_ADV_TX_POWER is
- * also included. So do not call it if extended adv
- * is supported otherwise controller will return
- * COMMAND_DISALLOWED for extended commands.
- */
- hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
- }
-
- if (hdev->commands[38] & 0x80) {
- /* Read LE Min/Max Tx Power*/
- hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
- 0, NULL);
- }
-
- if (hdev->commands[26] & 0x40) {
- /* Read LE Accept List Size */
- hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
- 0, NULL);
- }
-
- if (hdev->commands[26] & 0x80) {
- /* Clear LE Accept List */
- hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
- }
-
- if (hdev->commands[34] & 0x40) {
- /* Read LE Resolving List Size */
- hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
- 0, NULL);
- }
-
- if (hdev->commands[34] & 0x20) {
- /* Clear LE Resolving List */
- hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
- }
-
- if (hdev->commands[35] & 0x04) {
- __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
-
- /* Set RPA timeout */
- hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
- &rpa_timeout);
- }
-
- if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
- /* Read LE Maximum Data Length */
- hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
-
- /* Read LE Suggested Default Data Length */
- hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
- }
-
- if (ext_adv_capable(hdev)) {
- /* Read LE Number of Supported Advertising Sets */
- hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
- 0, NULL);
- }
-
- hci_set_le_support(req);
- }
-
- /* Read features beyond page 1 if available */
- for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
- struct hci_cp_read_local_ext_features cp;
-
- cp.page = p;
- hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
- sizeof(cp), &cp);
- }
-
- return 0;
-}
-
-static int hci_init4_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* Some Broadcom based Bluetooth controllers do not support the
- * Delete Stored Link Key command. They are clearly indicating its
- * absence in the bit mask of supported commands.
- *
- * Check the supported commands and only if the command is marked
- * as supported send it. If not supported assume that the controller
- * does not have actual support for stored link keys which makes this
- * command redundant anyway.
- *
- * Some controllers indicate that they support handling deleting
- * stored link keys, but they don't. The quirk lets a driver
- * just disable this command.
- */
- if (hdev->commands[6] & 0x80 &&
- !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
- struct hci_cp_delete_stored_link_key cp;
-
- bacpy(&cp.bdaddr, BDADDR_ANY);
- cp.delete_all = 0x01;
- hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
- sizeof(cp), &cp);
- }
-
- /* Set event mask page 2 if the HCI command for it is supported */
- if (hdev->commands[22] & 0x04)
- hci_set_event_mask_page_2(req);
-
- /* Read local pairing options if the HCI command is supported */
- if (hdev->commands[41] & 0x08)
- hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
-
- /* Get MWS transport configuration if the HCI command is supported */
- if (hdev->commands[30] & 0x08)
- hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
-
- /* Check for Synchronization Train support */
- if (lmp_sync_train_capable(hdev))
- hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
-
- /* Enable Secure Connections if supported and configured */
- if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
- bredr_sc_enabled(hdev)) {
- u8 support = 0x01;
-
- hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
- sizeof(support), &support);
- }
-
- /* Set erroneous data reporting if supported to the wideband speech
- * setting value
- */
- if (hdev->commands[18] & 0x08 &&
- !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
- bool enabled = hci_dev_test_flag(hdev,
- HCI_WIDEBAND_SPEECH_ENABLED);
-
- if (enabled !=
- (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
- struct hci_cp_write_def_err_data_reporting cp;
-
- cp.err_data_reporting = enabled ?
- ERR_DATA_REPORTING_ENABLED :
- ERR_DATA_REPORTING_DISABLED;
-
- hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
- sizeof(cp), &cp);
- }
- }
-
- /* Set Suggested Default Data Length to maximum if supported */
- if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
- struct hci_cp_le_write_def_data_len cp;
-
- cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
- cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
- hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
- }
-
- /* Set Default PHY parameters if command is supported */
- if (hdev->commands[35] & 0x20) {
- struct hci_cp_le_set_default_phy cp;
-
- cp.all_phys = 0x00;
- cp.tx_phys = hdev->le_tx_def_phys;
- cp.rx_phys = hdev->le_rx_def_phys;
-
- hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
- }
-
- return 0;
-}
-
-static int __hci_init(struct hci_dev *hdev)
-{
- int err;
-
- err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- if (hci_dev_test_flag(hdev, HCI_SETUP))
- hci_debugfs_create_basic(hdev);
-
- err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
- * BR/EDR/LE type controllers. AMP controllers only need the
- * first two stages of init.
- */
- if (hdev->dev_type != HCI_PRIMARY)
- return 0;
-
- err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- /* Read local codec list if the HCI command is supported */
- if (hdev->commands[45] & 0x04)
- hci_read_supported_codecs_v2(hdev);
- else if (hdev->commands[29] & 0x20)
- hci_read_supported_codecs(hdev);
-
- /* This function is only called when the controller is actually in
- * configured state. When the controller is marked as unconfigured,
- * this initialization procedure is not run.
- *
- * It means that it is possible that a controller runs through its
- * setup phase and then discovers missing settings. If that is the
- * case, then this function will not be called. It then will only
- * be called during the config phase.
- *
- * So only when in setup phase or config phase, create the debugfs
- * entries and register the SMP channels.
- */
- if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !hci_dev_test_flag(hdev, HCI_CONFIG))
- return 0;
-
- hci_debugfs_create_common(hdev);
-
- if (lmp_bredr_capable(hdev))
- hci_debugfs_create_bredr(hdev);
-
- if (lmp_le_capable(hdev))
- hci_debugfs_create_le(hdev);
-
- return 0;
-}
-
-static int hci_init0_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- BT_DBG("%s %ld", hdev->name, opt);
-
- /* Reset */
- if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
- hci_reset_req(req, 0);
-
- /* Read Local Version */
- hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
- /* Read BD Address */
- if (hdev->set_bdaddr)
- hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
-
- return 0;
-}
-
-static int __hci_unconf_init(struct hci_dev *hdev)
-{
- int err;
-
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
- return 0;
-
- err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- if (hci_dev_test_flag(hdev, HCI_SETUP))
- hci_debugfs_create_basic(hdev);
-
- return 0;
-}
-
static int hci_scan_req(struct hci_request *req, unsigned long opt)
{
__u8 scan = opt;
@@ -975,7 +157,7 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
switch (state) {
case DISCOVERY_STOPPED:
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
if (old_state != DISCOVERY_STARTING)
mgmt_discovering(hdev, 0);
@@ -1289,32 +471,6 @@ done:
return err;
}
-/**
- * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
- * (BD_ADDR) for a HCI device from
- * a firmware node property.
- * @hdev: The HCI device
- *
- * Search the firmware node for 'local-bd-address'.
- *
- * All-zero BD addresses are rejected, because those could be properties
- * that exist in the firmware tables, but were not updated by the firmware. For
- * example, the DTS could define 'local-bd-address', with zero BD addresses.
- */
-static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
-{
- struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
- bdaddr_t ba;
- int ret;
-
- ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
- (u8 *)&ba, sizeof(ba));
- if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
- return;
-
- bacpy(&hdev->public_addr, &ba);
-}
-
static int hci_dev_do_open(struct hci_dev *hdev)
{
int ret = 0;
@@ -1323,205 +479,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
hci_req_sync_lock(hdev);
- if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
- ret = -ENODEV;
- goto done;
- }
-
- if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !hci_dev_test_flag(hdev, HCI_CONFIG)) {
- /* Check for rfkill but allow the HCI setup stage to
- * proceed (which in itself doesn't cause any RF activity).
- */
- if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
- ret = -ERFKILL;
- goto done;
- }
-
- /* Check for valid public address or a configured static
- * random address, but let the HCI setup proceed to
- * be able to determine if there is a public address
- * or not.
- *
- * In case of user channel usage, it is not important
- * if a public address or static random address is
- * available.
- *
- * This check is only valid for BR/EDR controllers
- * since AMP controllers do not have an address.
- */
- if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hdev->dev_type == HCI_PRIMARY &&
- !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
- !bacmp(&hdev->static_addr, BDADDR_ANY)) {
- ret = -EADDRNOTAVAIL;
- goto done;
- }
- }
-
- if (test_bit(HCI_UP, &hdev->flags)) {
- ret = -EALREADY;
- goto done;
- }
-
- if (hdev->open(hdev)) {
- ret = -EIO;
- goto done;
- }
-
- set_bit(HCI_RUNNING, &hdev->flags);
- hci_sock_dev_event(hdev, HCI_DEV_OPEN);
-
- atomic_set(&hdev->cmd_cnt, 1);
- set_bit(HCI_INIT, &hdev->flags);
-
- if (hci_dev_test_flag(hdev, HCI_SETUP) ||
- test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
- bool invalid_bdaddr;
-
- hci_sock_dev_event(hdev, HCI_DEV_SETUP);
-
- if (hdev->setup)
- ret = hdev->setup(hdev);
-
- /* The transport driver can set the quirk to mark the
- * BD_ADDR invalid before creating the HCI device or in
- * its setup callback.
- */
- invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
- &hdev->quirks);
-
- if (ret)
- goto setup_failed;
-
- if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
- if (!bacmp(&hdev->public_addr, BDADDR_ANY))
- hci_dev_get_bd_addr_from_property(hdev);
-
- if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
- hdev->set_bdaddr) {
- ret = hdev->set_bdaddr(hdev,
- &hdev->public_addr);
-
- /* If setting of the BD_ADDR from the device
- * property succeeds, then treat the address
- * as valid even if the invalid BD_ADDR
- * quirk indicates otherwise.
- */
- if (!ret)
- invalid_bdaddr = false;
- }
- }
-
-setup_failed:
- /* The transport driver can set these quirks before
- * creating the HCI device or in its setup callback.
- *
- * For the invalid BD_ADDR quirk it is possible that
- * it becomes a valid address if the bootloader does
- * provide it (see above).
- *
- * In case any of them is set, the controller has to
- * start up as unconfigured.
- */
- if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
- invalid_bdaddr)
- hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
-
- /* For an unconfigured controller it is required to
- * read at least the version information provided by
- * the Read Local Version Information command.
- *
- * If the set_bdaddr driver callback is provided, then
- * also the original Bluetooth public device address
- * will be read using the Read BD Address command.
- */
- if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
- ret = __hci_unconf_init(hdev);
- }
-
- if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
- /* If public address change is configured, ensure that
- * the address gets programmed. If the driver does not
- * support changing the public address, fail the power
- * on procedure.
- */
- if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
- hdev->set_bdaddr)
- ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
- else
- ret = -EADDRNOTAVAIL;
- }
-
- if (!ret) {
- if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
- ret = __hci_init(hdev);
- if (!ret && hdev->post_init)
- ret = hdev->post_init(hdev);
- }
- }
-
- /* If the HCI Reset command is clearing all diagnostic settings,
- * then they need to be reprogrammed after the init procedure
- * completed.
- */
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
- ret = hdev->set_diag(hdev, true);
-
- msft_do_open(hdev);
- aosp_do_open(hdev);
-
- clear_bit(HCI_INIT, &hdev->flags);
-
- if (!ret) {
- hci_dev_hold(hdev);
- hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
- hci_adv_instances_set_rpa_expired(hdev, true);
- set_bit(HCI_UP, &hdev->flags);
- hci_sock_dev_event(hdev, HCI_DEV_UP);
- hci_leds_update_powered(hdev, true);
- if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !hci_dev_test_flag(hdev, HCI_CONFIG) &&
- !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hci_dev_test_flag(hdev, HCI_MGMT) &&
- hdev->dev_type == HCI_PRIMARY) {
- ret = __hci_req_hci_power_on(hdev);
- mgmt_power_on(hdev, ret);
- }
- } else {
- /* Init failed, cleanup */
- flush_work(&hdev->tx_work);
-
- /* Since hci_rx_work() is possible to awake new cmd_work
- * it should be flushed first to avoid unexpected call of
- * hci_cmd_work()
- */
- flush_work(&hdev->rx_work);
- flush_work(&hdev->cmd_work);
-
- skb_queue_purge(&hdev->cmd_q);
- skb_queue_purge(&hdev->rx_q);
-
- if (hdev->flush)
- hdev->flush(hdev);
-
- if (hdev->sent_cmd) {
- kfree_skb(hdev->sent_cmd);
- hdev->sent_cmd = NULL;
- }
-
- clear_bit(HCI_RUNNING, &hdev->flags);
- hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
-
- hdev->close(hdev);
- hdev->flags &= BIT(HCI_RAW);
- }
+ ret = hci_dev_open_sync(hdev);
-done:
hci_req_sync_unlock(hdev);
return ret;
}
@@ -1583,155 +542,18 @@ done:
return err;
}
-/* This function requires the caller holds hdev->lock */
-static void hci_pend_le_actions_clear(struct hci_dev *hdev)
-{
- struct hci_conn_params *p;
-
- list_for_each_entry(p, &hdev->le_conn_params, list) {
- if (p->conn) {
- hci_conn_drop(p->conn);
- hci_conn_put(p->conn);
- p->conn = NULL;
- }
- list_del_init(&p->action);
- }
-
- BT_DBG("All LE pending actions cleared");
-}
-
int hci_dev_do_close(struct hci_dev *hdev)
{
- bool auto_off;
- int err = 0;
+ int err;
BT_DBG("%s %p", hdev->name, hdev);
- cancel_delayed_work(&hdev->power_off);
- cancel_delayed_work(&hdev->ncmd_timer);
-
- hci_request_cancel_all(hdev);
hci_req_sync_lock(hdev);
- if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- test_bit(HCI_UP, &hdev->flags)) {
- /* Execute vendor specific shutdown routine */
- if (hdev->shutdown)
- err = hdev->shutdown(hdev);
- }
-
- if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
- cancel_delayed_work_sync(&hdev->cmd_timer);
- hci_req_sync_unlock(hdev);
- return err;
- }
-
- hci_leds_update_powered(hdev, false);
-
- /* Flush RX and TX works */
- flush_work(&hdev->tx_work);
- flush_work(&hdev->rx_work);
-
- if (hdev->discov_timeout > 0) {
- hdev->discov_timeout = 0;
- hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
- hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
- }
-
- if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
- cancel_delayed_work(&hdev->service_cache);
-
- if (hci_dev_test_flag(hdev, HCI_MGMT)) {
- struct adv_info *adv_instance;
-
- cancel_delayed_work_sync(&hdev->rpa_expired);
-
- list_for_each_entry(adv_instance, &hdev->adv_instances, list)
- cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
- }
-
- /* Avoid potential lockdep warnings from the *_flush() calls by
- * ensuring the workqueue is empty up front.
- */
- drain_workqueue(hdev->workqueue);
-
- hci_dev_lock(hdev);
-
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-
- auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
-
- if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hci_dev_test_flag(hdev, HCI_MGMT))
- __mgmt_power_off(hdev);
-
- hci_inquiry_cache_flush(hdev);
- hci_pend_le_actions_clear(hdev);
- hci_conn_hash_flush(hdev);
- hci_dev_unlock(hdev);
-
- smp_unregister(hdev);
-
- hci_sock_dev_event(hdev, HCI_DEV_DOWN);
-
- aosp_do_close(hdev);
- msft_do_close(hdev);
-
- if (hdev->flush)
- hdev->flush(hdev);
-
- /* Reset device */
- skb_queue_purge(&hdev->cmd_q);
- atomic_set(&hdev->cmd_cnt, 1);
- if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
- !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
- set_bit(HCI_INIT, &hdev->flags);
- __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
- clear_bit(HCI_INIT, &hdev->flags);
- }
-
- /* flush cmd work */
- flush_work(&hdev->cmd_work);
-
- /* Drop queues */
- skb_queue_purge(&hdev->rx_q);
- skb_queue_purge(&hdev->cmd_q);
- skb_queue_purge(&hdev->raw_q);
-
- /* Drop last sent command */
- if (hdev->sent_cmd) {
- cancel_delayed_work_sync(&hdev->cmd_timer);
- kfree_skb(hdev->sent_cmd);
- hdev->sent_cmd = NULL;
- }
-
- clear_bit(HCI_RUNNING, &hdev->flags);
- hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
-
- if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
- wake_up(&hdev->suspend_wait_q);
-
- /* After this point our queues are empty
- * and no tasks are scheduled. */
- hdev->close(hdev);
-
- /* Clear flags */
- hdev->flags &= BIT(HCI_RAW);
- hci_dev_clear_volatile_flags(hdev);
-
- /* Controller radio is available but is currently powered down */
- hdev->amp_status = AMP_STATUS_POWERED_DOWN;
-
- memset(hdev->eir, 0, sizeof(hdev->eir));
- memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
- bacpy(&hdev->random_addr, BDADDR_ANY);
- hci_codec_list_clear(&hdev->local_codecs);
+ err = hci_dev_close_sync(hdev);
hci_req_sync_unlock(hdev);
- hci_dev_put(hdev);
return err;
}
@@ -1787,7 +609,7 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
atomic_set(&hdev->cmd_cnt, 1);
hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
- ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
+ ret = hci_reset_sync(hdev);
hci_req_sync_unlock(hdev);
return ret;
@@ -1850,7 +672,7 @@ done:
return ret;
}
-static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
+static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
{
bool conn_changed, discov_changed;
@@ -1951,7 +773,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
* get correctly modified as this was a non-mgmt change.
*/
if (!err)
- hci_update_scan_state(hdev, dr.dev_opt);
+ hci_update_passive_scan_state(hdev, dr.dev_opt);
break;
case HCISETLINKPOL:
@@ -2133,9 +955,7 @@ static void hci_power_on(struct work_struct *work)
hci_dev_test_flag(hdev, HCI_MGMT) &&
hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
cancel_delayed_work(&hdev->power_off);
- hci_req_sync_lock(hdev);
- err = __hci_req_hci_power_on(hdev);
- hci_req_sync_unlock(hdev);
+ err = hci_powered_update_sync(hdev);
mgmt_power_on(hdev, err);
return;
}
@@ -3096,7 +1916,7 @@ bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
switch (hci_get_adv_monitor_offload_ext(hdev)) {
case HCI_ADV_MONITOR_EXT_NONE:
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
/* Message was not forwarded to controller - not an error */
return false;
@@ -3160,7 +1980,7 @@ bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
if (!*err && !pending)
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
hdev->name, handle, *err, pending ? "" : "not ");
@@ -3192,7 +2012,7 @@ bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
}
if (update)
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
hdev->name, *err, pending ? "" : "not ");
@@ -3486,7 +2306,7 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
hci_conn_params_free(params);
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
BT_DBG("addr %pMR (type %u)", addr, addr_type);
}
@@ -3554,61 +2374,6 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
}
-static void hci_suspend_clear_tasks(struct hci_dev *hdev)
-{
- int i;
-
- for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
- clear_bit(i, hdev->suspend_tasks);
-
- wake_up(&hdev->suspend_wait_q);
-}
-
-static int hci_suspend_wait_event(struct hci_dev *hdev)
-{
-#define WAKE_COND \
- (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
- __SUSPEND_NUM_TASKS)
-
- int i;
- int ret = wait_event_timeout(hdev->suspend_wait_q,
- WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
-
- if (ret == 0) {
- bt_dev_err(hdev, "Timed out waiting for suspend events");
- for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
- if (test_bit(i, hdev->suspend_tasks))
- bt_dev_err(hdev, "Suspend timeout bit: %d", i);
- clear_bit(i, hdev->suspend_tasks);
- }
-
- ret = -ETIMEDOUT;
- } else {
- ret = 0;
- }
-
- return ret;
-}
-
-static void hci_prepare_suspend(struct work_struct *work)
-{
- struct hci_dev *hdev =
- container_of(work, struct hci_dev, suspend_prepare);
-
- hci_dev_lock(hdev);
- hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
- hci_dev_unlock(hdev);
-}
-
-static int hci_change_suspend_state(struct hci_dev *hdev,
- enum suspended_state next)
-{
- hdev->suspend_state_next = next;
- set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
- queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
- return hci_suspend_wait_event(hdev);
-}
-
static void hci_clear_wake_reason(struct hci_dev *hdev)
{
hci_dev_lock(hdev);
@@ -3745,7 +2510,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
INIT_WORK(&hdev->tx_work, hci_tx_work);
INIT_WORK(&hdev->power_on, hci_power_on);
INIT_WORK(&hdev->error_reset, hci_error_reset);
- INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
+
+ hci_cmd_sync_init(hdev);
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
@@ -3754,7 +2520,6 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
skb_queue_head_init(&hdev->raw_q);
init_waitqueue_head(&hdev->req_wait_q);
- init_waitqueue_head(&hdev->suspend_wait_q);
INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
@@ -3882,6 +2647,7 @@ int hci_register_dev(struct hci_dev *hdev)
return id;
err_wqueue:
+ debugfs_remove_recursive(hdev->debugfs);
destroy_workqueue(hdev->workqueue);
destroy_workqueue(hdev->req_workqueue);
err:
@@ -3904,11 +2670,10 @@ void hci_unregister_dev(struct hci_dev *hdev)
cancel_work_sync(&hdev->power_on);
- if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
- hci_suspend_clear_tasks(hdev);
+ hci_cmd_sync_clear(hdev);
+
+ if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
unregister_pm_notifier(&hdev->suspend_notifier);
- cancel_work_sync(&hdev->suspend_prepare);
- }
msft_unregister(hdev);
@@ -3975,7 +2740,6 @@ EXPORT_SYMBOL(hci_release_dev);
int hci_suspend_dev(struct hci_dev *hdev)
{
int ret;
- u8 state = BT_RUNNING;
bt_dev_dbg(hdev, "");
@@ -3984,40 +2748,17 @@ int hci_suspend_dev(struct hci_dev *hdev)
hci_dev_test_flag(hdev, HCI_UNREGISTER))
return 0;
- /* If powering down, wait for completion. */
- if (mgmt_powering_down(hdev)) {
- set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
- ret = hci_suspend_wait_event(hdev);
- if (ret)
- goto done;
- }
-
- /* Suspend consists of two actions:
- * - First, disconnect everything and make the controller not
- * connectable (disabling scanning)
- * - Second, program event filter/accept list and enable scan
- */
- ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
- if (ret)
- goto clear;
-
- state = BT_SUSPEND_DISCONNECT;
+ /* If powering down don't attempt to suspend */
+ if (mgmt_powering_down(hdev))
+ return 0;
- /* Only configure accept list if device may wakeup. */
- if (hdev->wakeup && hdev->wakeup(hdev)) {
- ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE);
- if (!ret)
- state = BT_SUSPEND_CONFIGURE_WAKE;
- }
+ hci_req_sync_lock(hdev);
+ ret = hci_suspend_sync(hdev);
+ hci_req_sync_unlock(hdev);
-clear:
hci_clear_wake_reason(hdev);
- mgmt_suspending(hdev, state);
+ mgmt_suspending(hdev, hdev->suspend_state);
-done:
- /* We always allow suspend even if suspend preparation failed and
- * attempt to recover in resume.
- */
hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
return ret;
}
@@ -4039,10 +2780,12 @@ int hci_resume_dev(struct hci_dev *hdev)
if (mgmt_powering_down(hdev))
return 0;
- ret = hci_change_suspend_state(hdev, BT_RUNNING);
+ hci_req_sync_lock(hdev);
+ ret = hci_resume_sync(hdev);
+ hci_req_sync_unlock(hdev);
mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
- hdev->wake_addr_type);
+ hdev->wake_addr_type);
hci_sock_dev_event(hdev, HCI_DEV_RESUME);
return ret;
@@ -4270,25 +3013,6 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
-/* Send HCI command and wait for command complete event */
-struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout)
-{
- struct sk_buff *skb;
-
- if (!test_bit(HCI_UP, &hdev->flags))
- return ERR_PTR(-ENETDOWN);
-
- bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
-
- hci_req_sync_lock(hdev);
- skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
- hci_req_sync_unlock(hdev);
-
- return skb;
-}
-EXPORT_SYMBOL(hci_cmd_sync);
-
/* Send ACL data */
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
{
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7d0db1ca1248..efc5458b1345 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -545,9 +545,7 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
hdev->features[1][0] &= ~LMP_HOST_SSP;
}
- if (hci_dev_test_flag(hdev, HCI_MGMT))
- mgmt_ssp_enable_complete(hdev, sent->mode, status);
- else if (!status) {
+ if (!status) {
if (sent->mode)
hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
else
@@ -1239,6 +1237,55 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
+static void hci_cc_le_remove_adv_set(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *)skb->data);
+ u8 *instance;
+ int err;
+
+ if (status)
+ return;
+
+ instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
+ if (!instance)
+ return;
+
+ hci_dev_lock(hdev);
+
+ err = hci_remove_adv_instance(hdev, *instance);
+ if (!err)
+ mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
+ *instance);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_clear_adv_sets(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *)skb->data);
+ struct adv_info *adv, *n;
+ int err;
+
+ if (status)
+ return;
+
+ if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
+ return;
+
+ hci_dev_lock(hdev);
+
+ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
+ u8 instance = adv->instance;
+
+ err = hci_remove_adv_instance(hdev, instance);
+ if (!err)
+ mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
+ hdev, instance);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -1326,8 +1373,10 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
&conn->le_conn_timeout,
conn->conn_timeout);
} else {
- if (adv) {
- adv->enabled = false;
+ if (cp->num_of_sets) {
+ if (adv)
+ adv->enabled = false;
+
/* If just one instance was disabled check if there are
* any other instance enabled before clearing HCI_LE_ADV
*/
@@ -1463,16 +1512,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
* interrupted scanning due to a connect request. Mark
- * therefore discovery as stopped. If this was not
- * because of a connect request advertising might have
- * been disabled because of active scanning, so
- * re-enable it again if necessary.
+ * therefore discovery as stopped.
*/
if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
- hdev->discovery.state == DISCOVERY_FINDING)
- hci_req_reenable_advertising(hdev);
break;
@@ -2371,9 +2414,14 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
{
struct hci_cp_disconnect *cp;
+ struct hci_conn_params *params;
struct hci_conn *conn;
+ bool mgmt_conn;
- if (!status)
+ /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
+ * otherwise cleanup the connection immediately.
+ */
+ if (!status && !hdev->suspended)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
@@ -2383,23 +2431,60 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
- if (conn) {
+ if (!conn)
+ goto unlock;
+
+ if (status) {
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
hdev->cur_adv_instance = conn->adv_instance;
- hci_req_reenable_advertising(hdev);
+ hci_enable_advertising(hdev);
}
- /* If the disconnection failed for any reason, the upper layer
- * does not retry to disconnect in current implementation.
- * Hence, we need to do some basic cleanup here and re-enable
- * advertising if necessary.
- */
- hci_conn_del(conn);
+ goto done;
+ }
+
+ mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
+
+ if (conn->type == ACL_LINK) {
+ if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
+ hci_remove_link_key(hdev, &conn->dst);
+ }
+
+ params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+ if (params) {
+ switch (params->auto_connect) {
+ case HCI_AUTO_CONN_LINK_LOSS:
+ if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
+ break;
+ fallthrough;
+
+ case HCI_AUTO_CONN_DIRECT:
+ case HCI_AUTO_CONN_ALWAYS:
+ list_del_init(&params->action);
+ list_add(&params->action, &hdev->pend_le_conns);
+ break;
+
+ default:
+ break;
+ }
}
+ mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
+ cp->reason, mgmt_conn);
+
+ hci_disconn_cfm(conn, cp->reason);
+
+done:
+ /* If the disconnection failed for any reason, the upper layer
+ * does not retry to disconnect in current implementation.
+ * Hence, we need to do some basic cleanup here and re-enable
+ * advertising if necessary.
+ */
+ hci_conn_del(conn);
+unlock:
hci_dev_unlock(hdev);
}
@@ -2977,7 +3062,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_AUTO_CONN_ALWAYS:
list_del_init(&params->action);
list_add(&params->action, &hdev->pend_le_conns);
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
break;
default:
@@ -2987,14 +3072,6 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_disconn_cfm(conn, ev->reason);
- /* The suspend notifier is waiting for all devices to disconnect so
- * clear the bit from pending tasks and inform the wait queue.
- */
- if (list_empty(&hdev->conn_hash.list) &&
- test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
- wake_up(&hdev->suspend_wait_q);
- }
-
/* Re-enable advertising if necessary, since it might
* have been disabled by the connection. From the
* HCI_LE_Set_Advertise_Enable command description in
@@ -3007,7 +3084,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
*/
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
hdev->cur_adv_instance = conn->adv_instance;
- hci_req_reenable_advertising(hdev);
+ hci_enable_advertising(hdev);
}
hci_conn_del(conn);
@@ -3723,6 +3800,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_le_set_adv_set_random_addr(hdev, skb);
break;
+ case HCI_OP_LE_REMOVE_ADV_SET:
+ hci_cc_le_remove_adv_set(hdev, skb);
+ break;
+
+ case HCI_OP_LE_CLEAR_ADV_SETS:
+ hci_cc_le_clear_adv_sets(hdev, skb);
+ break;
+
case HCI_OP_LE_READ_TRANSMIT_POWER:
hci_cc_le_read_transmit_power(hdev, skb);
break;
@@ -4445,7 +4530,6 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
{
struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- unsigned int notify_evt;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
@@ -4517,22 +4601,18 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
}
bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
-
- switch (ev->air_mode) {
- case 0x02:
- notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD;
- break;
- case 0x03:
- notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP;
- break;
- }
-
/* Notify only in case of SCO over HCI transport data path which
* is zero and non-zero value shall be non-HCI transport data path
*/
- if (conn->codec.data_path == 0) {
- if (hdev->notify)
- hdev->notify(hdev, notify_evt);
+ if (conn->codec.data_path == 0 && hdev->notify) {
+ switch (ev->air_mode) {
+ case 0x02:
+ hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
+ break;
+ case 0x03:
+ hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
+ break;
+ }
}
hci_connect_cfm(conn, ev->status);
@@ -5412,7 +5492,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
}
unlock:
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
hci_dev_unlock(hdev);
}
@@ -5441,23 +5521,30 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
le16_to_cpu(ev->interval),
le16_to_cpu(ev->latency),
le16_to_cpu(ev->supervision_timeout));
-
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
- hci_req_disable_address_resolution(hdev);
}
static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
struct hci_conn *conn;
- struct adv_info *adv;
+ struct adv_info *adv, *n;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
adv = hci_find_adv_instance(hdev, ev->handle);
+ /* The Bluetooth Core 5.3 specification clearly states that this event
+ * shall not be sent when the Host disables the advertising set. So in
+ * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
+ *
+ * When the Host disables an advertising set, all cleanup is done via
+ * its command callback and not needed to be duplicated here.
+ */
+ if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
+ bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
+ return;
+ }
+
if (ev->status) {
if (!adv)
return;
@@ -5466,6 +5553,13 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_remove_adv_instance(hdev, ev->handle);
mgmt_advertising_removed(NULL, hdev, ev->handle);
+ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
+ if (adv->enabled)
+ return;
+ }
+
+ /* We are no longer advertising, clear HCI_LE_ADV */
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
return;
}
@@ -5529,8 +5623,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
return NULL;
- /* Ignore if the device is blocked */
- if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
+ /* Ignore if the device is blocked or hdev is suspended */
+ if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
+ hdev->suspended)
return NULL;
/* Most controller will fail if we try to create new connections
@@ -5825,7 +5920,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_ev_le_advertising_info *ev = ptr;
s8 rssi;
- if (ev->length <= HCI_MAX_AD_LENGTH) {
+ if (ev->length <= HCI_MAX_AD_LENGTH &&
+ ev->data + ev->length <= skb_tail_pointer(skb)) {
rssi = ev->data[ev->length];
process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
ev->bdaddr_type, NULL, 0, rssi,
@@ -5835,6 +5931,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
ptr += sizeof(*ev) + ev->length + 1;
+
+ if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
+ bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
+ break;
+ }
}
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 92611bfc0b9e..8b3205e4b23e 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -32,10 +32,6 @@
#include "msft.h"
#include "eir.h"
-#define HCI_REQ_DONE 0
-#define HCI_REQ_PEND 1
-#define HCI_REQ_CANCELED 2
-
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
{
skb_queue_head_init(&req->cmd_q);
@@ -101,8 +97,8 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
return req_run(req, NULL, complete);
}
-static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
- struct sk_buff *skb)
+void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ struct sk_buff *skb)
{
bt_dev_dbg(hdev, "result 0x%2.2x", result);
@@ -126,70 +122,6 @@ void hci_req_sync_cancel(struct hci_dev *hdev, int err)
}
}
-struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u8 event, u32 timeout)
-{
- struct hci_request req;
- struct sk_buff *skb;
- int err = 0;
-
- bt_dev_dbg(hdev, "");
-
- hci_req_init(&req, hdev);
-
- hci_req_add_ev(&req, opcode, plen, param, event);
-
- hdev->req_status = HCI_REQ_PEND;
-
- err = hci_req_run_skb(&req, hci_req_sync_complete);
- if (err < 0)
- return ERR_PTR(err);
-
- err = wait_event_interruptible_timeout(hdev->req_wait_q,
- hdev->req_status != HCI_REQ_PEND, timeout);
-
- if (err == -ERESTARTSYS)
- return ERR_PTR(-EINTR);
-
- switch (hdev->req_status) {
- case HCI_REQ_DONE:
- err = -bt_to_errno(hdev->req_result);
- break;
-
- case HCI_REQ_CANCELED:
- err = -hdev->req_result;
- break;
-
- default:
- err = -ETIMEDOUT;
- break;
- }
-
- hdev->req_status = hdev->req_result = 0;
- skb = hdev->req_skb;
- hdev->req_skb = NULL;
-
- bt_dev_dbg(hdev, "end: err %d", err);
-
- if (err < 0) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
-
- if (!skb)
- return ERR_PTR(-ENODATA);
-
- return skb;
-}
-EXPORT_SYMBOL(__hci_cmd_sync_ev);
-
-struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout)
-{
- return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
-}
-EXPORT_SYMBOL(__hci_cmd_sync);
-
/* Execute request and wait for completion. */
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt),
@@ -436,82 +368,6 @@ static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
return false;
}
-/* This function controls the background scanning based on hdev->pend_le_conns
- * list. If there are pending LE connection we start the background scanning,
- * otherwise we stop it.
- *
- * This function requires the caller holds hdev->lock.
- */
-static void __hci_update_background_scan(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
-
- if (!test_bit(HCI_UP, &hdev->flags) ||
- test_bit(HCI_INIT, &hdev->flags) ||
- hci_dev_test_flag(hdev, HCI_SETUP) ||
- hci_dev_test_flag(hdev, HCI_CONFIG) ||
- hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
- hci_dev_test_flag(hdev, HCI_UNREGISTER))
- return;
-
- /* No point in doing scanning if LE support hasn't been enabled */
- if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
- return;
-
- /* If discovery is active don't interfere with it */
- if (hdev->discovery.state != DISCOVERY_STOPPED)
- return;
-
- /* Reset RSSI and UUID filters when starting background scanning
- * since these filters are meant for service discovery only.
- *
- * The Start Discovery and Start Service Discovery operations
- * ensure to set proper values for RSSI threshold and UUID
- * filter list. So it is safe to just reset them here.
- */
- hci_discovery_filter_clear(hdev);
-
- bt_dev_dbg(hdev, "ADV monitoring is %s",
- hci_is_adv_monitoring(hdev) ? "on" : "off");
-
- if (list_empty(&hdev->pend_le_conns) &&
- list_empty(&hdev->pend_le_reports) &&
- !hci_is_adv_monitoring(hdev)) {
- /* If there is no pending LE connections or devices
- * to be scanned for or no ADV monitors, we should stop the
- * background scanning.
- */
-
- /* If controller is not scanning we are done. */
- if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
- return;
-
- hci_req_add_le_scan_disable(req, false);
-
- bt_dev_dbg(hdev, "stopping background scanning");
- } else {
- /* If there is at least one pending LE connection, we should
- * keep the background scan running.
- */
-
- /* If controller is connecting, we should not start scanning
- * since some controllers are not able to scan and connect at
- * the same time.
- */
- if (hci_lookup_le_connect(hdev))
- return;
-
- /* If controller is currently scanning, we stop it to ensure we
- * don't miss any advertising (due to duplicates filter).
- */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
- hci_req_add_le_scan_disable(req, false);
-
- hci_req_add_le_passive_scan(req);
- bt_dev_dbg(hdev, "starting background scanning");
- }
-}
-
void __hci_req_update_name(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
@@ -560,9 +416,6 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
return;
}
- if (hdev->suspended)
- set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
-
if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_enable cp;
@@ -579,9 +432,7 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
}
/* Disable address resolution */
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
+ if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
__u8 enable = 0x00;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
@@ -600,8 +451,7 @@ static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
- if (use_ll_privacy(req->hdev) &&
- hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
+ if (use_ll_privacy(req->hdev)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
@@ -654,8 +504,7 @@ static int add_to_accept_list(struct hci_request *req,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
+ if (use_ll_privacy(hdev)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(hdev, &params->addr,
@@ -694,8 +543,7 @@ static u8 update_accept_list(struct hci_request *req)
*/
bool allow_rpa = hdev->suspended;
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
+ if (use_ll_privacy(hdev))
allow_rpa = true;
/* Go through the current accept list programmed into the
@@ -784,9 +632,7 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
return;
}
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- addr_resolv) {
+ if (use_ll_privacy(hdev) && addr_resolv) {
u8 enable = 0x01;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
@@ -943,8 +789,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
if (hdev->suspended) {
window = hdev->le_scan_window_suspend;
interval = hdev->le_scan_int_suspend;
-
- set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
} else if (hci_is_le_conn_scanning(hdev)) {
window = hdev->le_scan_window_connect;
interval = hdev->le_scan_int_connect;
@@ -977,59 +821,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
addr_resolv);
}
-static void hci_req_clear_event_filter(struct hci_request *req)
-{
- struct hci_cp_set_event_filter f;
-
- if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
- return;
-
- if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
- memset(&f, 0, sizeof(f));
- f.flt_type = HCI_FLT_CLEAR_ALL;
- hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
- }
-}
-
-static void hci_req_set_event_filter(struct hci_request *req)
-{
- struct bdaddr_list_with_flags *b;
- struct hci_cp_set_event_filter f;
- struct hci_dev *hdev = req->hdev;
- u8 scan = SCAN_DISABLED;
- bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
-
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
- return;
-
- /* Always clear event filter when starting */
- hci_req_clear_event_filter(req);
-
- list_for_each_entry(b, &hdev->accept_list, list) {
- if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
- b->current_flags))
- continue;
-
- memset(&f, 0, sizeof(f));
- bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
- f.flt_type = HCI_FLT_CONN_SETUP;
- f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
- f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
-
- bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
- hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
- scan = SCAN_PAGE;
- }
-
- if (scan && !scanning) {
- set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- } else if (!scan && scanning) {
- set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- }
-}
-
static void cancel_adv_timeout(struct hci_dev *hdev)
{
if (hdev->adv_instance_timeout) {
@@ -1088,185 +879,6 @@ int hci_req_resume_adv_instances(struct hci_dev *hdev)
return hci_req_run(&req, NULL);
}
-static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
-{
- bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
- status);
- if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
- test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
- clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
- clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
- wake_up(&hdev->suspend_wait_q);
- }
-
- if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
- clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
- wake_up(&hdev->suspend_wait_q);
- }
-}
-
-static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req,
- bool suspending)
-{
- struct hci_dev *hdev = req->hdev;
-
- switch (hci_get_adv_monitor_offload_ext(hdev)) {
- case HCI_ADV_MONITOR_EXT_MSFT:
- if (suspending)
- msft_suspend(hdev);
- else
- msft_resume(hdev);
- break;
- default:
- return;
- }
-
- /* No need to block when enabling since it's on resume path */
- if (hdev->suspended && suspending)
- set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
-}
-
-/* Call with hci_dev_lock */
-void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
-{
- int old_state;
- struct hci_conn *conn;
- struct hci_request req;
- u8 page_scan;
- int disconnect_counter;
-
- if (next == hdev->suspend_state) {
- bt_dev_dbg(hdev, "Same state before and after: %d", next);
- goto done;
- }
-
- hdev->suspend_state = next;
- hci_req_init(&req, hdev);
-
- if (next == BT_SUSPEND_DISCONNECT) {
- /* Mark device as suspended */
- hdev->suspended = true;
-
- /* Pause discovery if not already stopped */
- old_state = hdev->discovery.state;
- if (old_state != DISCOVERY_STOPPED) {
- set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- }
-
- hdev->discovery_paused = true;
- hdev->discovery_old_state = old_state;
-
- /* Stop directed advertising */
- old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
- if (old_state) {
- set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
- cancel_delayed_work(&hdev->discov_off);
- queue_delayed_work(hdev->req_workqueue,
- &hdev->discov_off, 0);
- }
-
- /* Pause other advertisements */
- if (hdev->adv_instance_cnt)
- __hci_req_pause_adv_instances(&req);
-
- hdev->advertising_paused = true;
- hdev->advertising_old_state = old_state;
-
- /* Disable page scan if enabled */
- if (test_bit(HCI_PSCAN, &hdev->flags)) {
- page_scan = SCAN_DISABLED;
- hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
- &page_scan);
- set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
- }
-
- /* Disable LE passive scan if enabled */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
- cancel_interleave_scan(hdev);
- hci_req_add_le_scan_disable(&req, false);
- }
-
- /* Disable advertisement filters */
- hci_req_prepare_adv_monitor_suspend(&req, true);
-
- /* Prevent disconnects from causing scanning to be re-enabled */
- hdev->scanning_paused = true;
-
- /* Run commands before disconnecting */
- hci_req_run(&req, suspend_req_complete);
-
- disconnect_counter = 0;
- /* Soft disconnect everything (power off) */
- list_for_each_entry(conn, &hdev->conn_hash.list, list) {
- hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
- disconnect_counter++;
- }
-
- if (disconnect_counter > 0) {
- bt_dev_dbg(hdev,
- "Had %d disconnects. Will wait on them",
- disconnect_counter);
- set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
- }
- } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
- /* Unpause to take care of updating scanning params */
- hdev->scanning_paused = false;
- /* Enable event filter for paired devices */
- hci_req_set_event_filter(&req);
- /* Enable passive scan at lower duty cycle */
- __hci_update_background_scan(&req);
- /* Pause scan changes again. */
- hdev->scanning_paused = true;
- hci_req_run(&req, suspend_req_complete);
- } else {
- hdev->suspended = false;
- hdev->scanning_paused = false;
-
- /* Clear any event filters and restore scan state */
- hci_req_clear_event_filter(&req);
- __hci_req_update_scan(&req);
-
- /* Reset passive/background scanning to normal */
- __hci_update_background_scan(&req);
- /* Enable all of the advertisement filters */
- hci_req_prepare_adv_monitor_suspend(&req, false);
-
- /* Unpause directed advertising */
- hdev->advertising_paused = false;
- if (hdev->advertising_old_state) {
- set_bit(SUSPEND_UNPAUSE_ADVERTISING,
- hdev->suspend_tasks);
- hci_dev_set_flag(hdev, HCI_ADVERTISING);
- queue_work(hdev->req_workqueue,
- &hdev->discoverable_update);
- hdev->advertising_old_state = 0;
- }
-
- /* Resume other advertisements */
- if (hdev->adv_instance_cnt)
- __hci_req_resume_adv_instances(&req);
-
- /* Unpause discovery */
- hdev->discovery_paused = false;
- if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
- hdev->discovery_old_state != DISCOVERY_STOPPING) {
- set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
- hci_discovery_set_state(hdev, DISCOVERY_STARTING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- }
-
- hci_req_run(&req, suspend_req_complete);
- }
-
- hdev->suspend_state = next;
-
-done:
- clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
- wake_up(&hdev->suspend_wait_q);
-}
-
static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
{
return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
@@ -1548,8 +1160,7 @@ void hci_req_disable_address_resolution(struct hci_dev *hdev)
struct hci_request req;
__u8 enable = 0x00;
- if (!use_ll_privacy(hdev) &&
- !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
+ if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
return;
hci_req_init(&req, hdev);
@@ -1692,8 +1303,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
/* If Controller supports LL Privacy use own address type is
* 0x03
*/
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
+ if (use_ll_privacy(hdev))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else
*own_addr_type = ADDR_LE_DEV_RANDOM;
@@ -1871,7 +1481,8 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
- if (own_addr_type == ADDR_LE_DEV_RANDOM &&
+ if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
+ own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
bacmp(&random_addr, BDADDR_ANY)) {
struct hci_cp_le_set_adv_set_rand_addr cp;
@@ -2160,8 +1771,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
/* If Controller supports LL Privacy use own address type is
* 0x03
*/
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
+ if (use_ll_privacy(hdev))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else
*own_addr_type = ADDR_LE_DEV_RANDOM;
@@ -2301,47 +1911,6 @@ static void scan_update_work(struct work_struct *work)
hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
}
-static int connectable_update(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- hci_dev_lock(hdev);
-
- __hci_req_update_scan(req);
-
- /* If BR/EDR is not enabled and we disable advertising as a
- * by-product of disabling connectable, we need to update the
- * advertising flags.
- */
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
- __hci_req_update_adv_data(req, hdev->cur_adv_instance);
-
- /* Update the advertising parameters if necessary */
- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
- !list_empty(&hdev->adv_instances)) {
- if (ext_adv_capable(hdev))
- __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
- else
- __hci_req_enable_advertising(req);
- }
-
- __hci_update_background_scan(req);
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static void connectable_update_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- connectable_update);
- u8 status;
-
- hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
- mgmt_set_connectable_complete(hdev, status);
-}
-
static u8 get_service_classes(struct hci_dev *hdev)
{
struct bt_uuid *uuid;
@@ -2445,16 +2014,6 @@ static int discoverable_update(struct hci_request *req, unsigned long opt)
return 0;
}
-static void discoverable_update_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- discoverable_update);
- u8 status;
-
- hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
- mgmt_set_discoverable_complete(hdev, status);
-}
-
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason)
{
@@ -2548,35 +2107,6 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
return 0;
}
-static int update_bg_scan(struct hci_request *req, unsigned long opt)
-{
- hci_dev_lock(req->hdev);
- __hci_update_background_scan(req);
- hci_dev_unlock(req->hdev);
- return 0;
-}
-
-static void bg_scan_update(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- bg_scan_update);
- struct hci_conn *conn;
- u8 status;
- int err;
-
- err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
- if (!err)
- return;
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (conn)
- hci_le_conn_failed(conn, status);
-
- hci_dev_unlock(hdev);
-}
-
static int le_scan_disable(struct hci_request *req, unsigned long opt)
{
hci_req_add_le_scan_disable(req, false);
@@ -3163,10 +2693,7 @@ int __hci_req_hci_power_on(struct hci_dev *hdev)
void hci_request_setup(struct hci_dev *hdev)
{
INIT_WORK(&hdev->discov_update, discov_update);
- INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
INIT_WORK(&hdev->scan_update, scan_update_work);
- INIT_WORK(&hdev->connectable_update, connectable_update_work);
- INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
@@ -3179,10 +2706,7 @@ void hci_request_cancel_all(struct hci_dev *hdev)
hci_req_sync_cancel(hdev, ENODEV);
cancel_work_sync(&hdev->discov_update);
- cancel_work_sync(&hdev->bg_scan_update);
cancel_work_sync(&hdev->scan_update);
- cancel_work_sync(&hdev->connectable_update);
- cancel_work_sync(&hdev->discoverable_update);
cancel_delayed_work_sync(&hdev->discov_off);
cancel_delayed_work_sync(&hdev->le_scan_disable);
cancel_delayed_work_sync(&hdev->le_scan_restart);
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index f31420f58525..5f8e8846ec74 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -22,9 +22,17 @@
#include <asm/unaligned.h>
+#define HCI_REQ_DONE 0
+#define HCI_REQ_PEND 1
+#define HCI_REQ_CANCELED 2
+
#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
+#define HCI_REQ_DONE 0
+#define HCI_REQ_PEND 1
+#define HCI_REQ_CANCELED 2
+
struct hci_request {
struct hci_dev *hdev;
struct sk_buff_head cmd_q;
@@ -40,6 +48,8 @@ void hci_req_purge(struct hci_request *req);
bool hci_req_status_pend(struct hci_dev *hdev);
int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
+void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ struct sk_buff *skb);
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
const void *param);
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
@@ -117,10 +127,5 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason);
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason);
-static inline void hci_update_background_scan(struct hci_dev *hdev)
-{
- queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
-}
-
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index d0dad1fafe07..446573a12571 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -889,10 +889,6 @@ static int hci_sock_release(struct socket *sock)
}
sock_orphan(sk);
-
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-
release_sock(sk);
sock_put(sk);
return 0;
@@ -2058,6 +2054,12 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
return err;
}
+static void hci_sock_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+}
+
static const struct proto_ops hci_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
@@ -2111,6 +2113,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
sock->state = SS_UNCONNECTED;
sk->sk_state = BT_OPEN;
+ sk->sk_destruct = hci_sock_destruct;
bt_sock_link(&hci_sk_list, sk);
return 0;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
new file mode 100644
index 000000000000..ad86caf41f91
--- /dev/null
+++ b/net/bluetooth/hci_sync.c
@@ -0,0 +1,4922 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <linux/property.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
+
+#include "hci_request.h"
+#include "hci_debugfs.h"
+#include "smp.h"
+#include "eir.h"
+#include "msft.h"
+#include "aosp.h"
+#include "leds.h"
+
+static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ struct sk_buff *skb)
+{
+ bt_dev_dbg(hdev, "result 0x%2.2x", result);
+
+ if (hdev->req_status != HCI_REQ_PEND)
+ return;
+
+ hdev->req_result = result;
+ hdev->req_status = HCI_REQ_DONE;
+
+ if (skb) {
+ struct sock *sk = hci_skb_sk(skb);
+
+ /* Drop sk reference if set */
+ if (sk)
+ sock_put(sk);
+
+ hdev->req_skb = skb_get(skb);
+ }
+
+ wake_up_interruptible(&hdev->req_wait_q);
+}
+
+static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
+ u32 plen, const void *param,
+ struct sock *sk)
+{
+ int len = HCI_COMMAND_HDR_SIZE + plen;
+ struct hci_command_hdr *hdr;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(opcode);
+ hdr->plen = plen;
+
+ if (plen)
+ skb_put_data(skb, param, plen);
+
+ bt_dev_dbg(hdev, "skb len %d", skb->len);
+
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+ hci_skb_opcode(skb) = opcode;
+
+ /* Grab a reference if command needs to be associated with a sock (e.g.
+ * likely mgmt socket that initiated the command).
+ */
+ if (sk) {
+ hci_skb_sk(skb) = sk;
+ sock_hold(sk);
+ }
+
+ return skb;
+}
+
+static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param, u8 event, struct sock *sk)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+
+ bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
+
+ /* If an error occurred during request building, there is no point in
+ * queueing the HCI command. We can simply return.
+ */
+ if (req->err)
+ return;
+
+ skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
+ if (!skb) {
+ bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
+ opcode);
+ req->err = -ENOMEM;
+ return;
+ }
+
+ if (skb_queue_empty(&req->cmd_q))
+ bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
+
+ bt_cb(skb)->hci.req_event = event;
+
+ skb_queue_tail(&req->cmd_q, skb);
+}
+
+static int hci_cmd_sync_run(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
+
+ /* If an error occurred during request building, remove all HCI
+ * commands queued on the HCI request queue.
+ */
+ if (req->err) {
+ skb_queue_purge(&req->cmd_q);
+ return req->err;
+ }
+
+ /* Do not allow empty requests */
+ if (skb_queue_empty(&req->cmd_q))
+ return -ENODATA;
+
+ skb = skb_peek_tail(&req->cmd_q);
+ bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
+ bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
+
+ spin_lock_irqsave(&hdev->cmd_q.lock, flags);
+ skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+
+ return 0;
+}
+
+/* This function requires the caller holds hdev->req_lock. */
+struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk)
+{
+ struct hci_request req;
+ struct sk_buff *skb;
+ int err = 0;
+
+ bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
+
+ hci_req_init(&req, hdev);
+
+ hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
+
+ hdev->req_status = HCI_REQ_PEND;
+
+ err = hci_cmd_sync_run(&req);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ err = wait_event_interruptible_timeout(hdev->req_wait_q,
+ hdev->req_status != HCI_REQ_PEND,
+ timeout);
+
+ if (err == -ERESTARTSYS)
+ return ERR_PTR(-EINTR);
+
+ switch (hdev->req_status) {
+ case HCI_REQ_DONE:
+ err = -bt_to_errno(hdev->req_result);
+ break;
+
+ case HCI_REQ_CANCELED:
+ err = -hdev->req_result;
+ break;
+
+ default:
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ hdev->req_status = 0;
+ hdev->req_result = 0;
+ skb = hdev->req_skb;
+ hdev->req_skb = NULL;
+
+ bt_dev_dbg(hdev, "end: err %d", err);
+
+ if (err < 0) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+
+ return skb;
+}
+EXPORT_SYMBOL(__hci_cmd_sync_sk);
+
+/* This function requires the caller holds hdev->req_lock. */
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
+}
+EXPORT_SYMBOL(__hci_cmd_sync);
+
+/* Send HCI command and wait for command complete event */
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ struct sk_buff *skb;
+
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return ERR_PTR(-ENETDOWN);
+
+ bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
+
+ hci_req_sync_lock(hdev);
+ skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
+ hci_req_sync_unlock(hdev);
+
+ return skb;
+}
+EXPORT_SYMBOL(hci_cmd_sync);
+
+/* This function requires the caller holds hdev->req_lock. */
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout)
+{
+ return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
+ NULL);
+}
+EXPORT_SYMBOL(__hci_cmd_sync_ev);
+
+/* This function requires the caller holds hdev->req_lock. */
+int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk)
+{
+ struct sk_buff *skb;
+ u8 status;
+
+ skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ /* If command return a status event skb will be set to NULL as there are
+ * no parameters, in case of failure IS_ERR(skb) would have be set to
+ * the actual error would be found with PTR_ERR(skb).
+ */
+ if (!skb)
+ return 0;
+
+ status = skb->data[0];
+
+ kfree_skb(skb);
+
+ return status;
+}
+EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
+
+int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
+ NULL);
+}
+EXPORT_SYMBOL(__hci_cmd_sync_status);
+
+static void hci_cmd_sync_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
+ struct hci_cmd_sync_work_entry *entry;
+ hci_cmd_sync_work_func_t func;
+ hci_cmd_sync_work_destroy_t destroy;
+ void *data;
+
+ bt_dev_dbg(hdev, "");
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ entry = list_first_entry(&hdev->cmd_sync_work_list,
+ struct hci_cmd_sync_work_entry, list);
+ if (entry) {
+ list_del(&entry->list);
+ func = entry->func;
+ data = entry->data;
+ destroy = entry->destroy;
+ kfree(entry);
+ } else {
+ func = NULL;
+ data = NULL;
+ destroy = NULL;
+ }
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ if (func) {
+ int err;
+
+ hci_req_sync_lock(hdev);
+
+ err = func(hdev, data);
+
+ if (destroy)
+ destroy(hdev, data, err);
+
+ hci_req_sync_unlock(hdev);
+ }
+}
+
+void hci_cmd_sync_init(struct hci_dev *hdev)
+{
+ INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
+ INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
+ mutex_init(&hdev->cmd_sync_work_lock);
+}
+
+void hci_cmd_sync_clear(struct hci_dev *hdev)
+{
+ struct hci_cmd_sync_work_entry *entry, *tmp;
+
+ cancel_work_sync(&hdev->cmd_sync_work);
+
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+ if (entry->destroy)
+ entry->destroy(hdev, entry->data, -ECANCELED);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->func = func;
+ entry->data = data;
+ entry->destroy = destroy;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(hci_cmd_sync_queue);
+
+int hci_update_eir_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_eir cp;
+
+ bt_dev_dbg(hdev, "");
+
+ if (!hdev_is_powered(hdev))
+ return 0;
+
+ if (!lmp_ext_inq_capable(hdev))
+ return 0;
+
+ if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ return 0;
+
+ if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ eir_create(hdev, cp.data);
+
+ if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
+ return 0;
+
+ memcpy(hdev->eir, cp.data, sizeof(cp.data));
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+ struct bt_uuid *uuid;
+ u8 val = 0;
+
+ list_for_each_entry(uuid, &hdev->uuids, list)
+ val |= uuid->svc_hint;
+
+ return val;
+}
+
+int hci_update_class_sync(struct hci_dev *hdev)
+{
+ u8 cod[3];
+
+ bt_dev_dbg(hdev, "");
+
+ if (!hdev_is_powered(hdev))
+ return 0;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
+ return 0;
+
+ cod[0] = hdev->minor_class;
+ cod[1] = hdev->major_class;
+ cod[2] = get_service_classes(hdev);
+
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
+ cod[1] |= 0x20;
+
+ if (memcmp(cod, hdev->dev_class, 3) == 0)
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
+ sizeof(cod), cod, HCI_CMD_TIMEOUT);
+}
+
+static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
+{
+ /* If there is no connection we are OK to advertise. */
+ if (hci_conn_num(hdev, LE_LINK) == 0)
+ return true;
+
+ /* Check le_states if there is any connection in peripheral role. */
+ if (hdev->conn_hash.le_num_peripheral > 0) {
+ /* Peripheral connection state and non connectable mode
+ * bit 20.
+ */
+ if (!connectable && !(hdev->le_states[2] & 0x10))
+ return false;
+
+ /* Peripheral connection state and connectable mode bit 38
+ * and scannable bit 21.
+ */
+ if (connectable && (!(hdev->le_states[4] & 0x40) ||
+ !(hdev->le_states[2] & 0x20)))
+ return false;
+ }
+
+ /* Check le_states if there is any connection in central role. */
+ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
+ /* Central connection state and non connectable mode bit 18. */
+ if (!connectable && !(hdev->le_states[2] & 0x02))
+ return false;
+
+ /* Central connection state and connectable mode bit 35 and
+ * scannable 19.
+ */
+ if (connectable && (!(hdev->le_states[4] & 0x08) ||
+ !(hdev->le_states[2] & 0x08)))
+ return false;
+ }
+
+ return true;
+}
+
+static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
+{
+ /* If privacy is not enabled don't use RPA */
+ if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
+ return false;
+
+ /* If basic privacy mode is enabled use RPA */
+ if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
+ return true;
+
+ /* If limited privacy mode is enabled don't use RPA if we're
+ * both discoverable and bondable.
+ */
+ if ((flags & MGMT_ADV_FLAG_DISCOV) &&
+ hci_dev_test_flag(hdev, HCI_BONDABLE))
+ return false;
+
+ /* We're neither bondable nor discoverable in the limited
+ * privacy mode, therefore use RPA.
+ */
+ return true;
+}
+
+static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
+{
+ /* If we're advertising or initiating an LE connection we can't
+ * go ahead and change the random address at this time. This is
+ * because the eventual initiator address used for the
+ * subsequently created connection will be undefined (some
+ * controllers use the new address and others the one we had
+ * when the operation started).
+ *
+ * In this kind of scenario skip the update and let the random
+ * address be updated at the next cycle.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
+ hci_lookup_le_connect(hdev)) {
+ bt_dev_dbg(hdev, "Deferring random address update");
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+ return 0;
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
+ 6, rpa, HCI_CMD_TIMEOUT);
+}
+
+int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
+ bool rpa, u8 *own_addr_type)
+{
+ int err;
+
+ /* If privacy is enabled use a resolvable private address. If
+ * current RPA has expired or there is something else than
+ * the current RPA in use, then generate a new one.
+ */
+ if (rpa) {
+ /* If Controller supports LL Privacy use own address type is
+ * 0x03
+ */
+ if (use_ll_privacy(hdev))
+ *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
+ else
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+ /* Check if RPA is valid */
+ if (rpa_valid(hdev))
+ return 0;
+
+ err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
+ if (err < 0) {
+ bt_dev_err(hdev, "failed to generate new RPA");
+ return err;
+ }
+
+ err = hci_set_random_addr_sync(hdev, &hdev->rpa);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ /* In case of required privacy without resolvable private address,
+ * use an non-resolvable private address. This is useful for active
+ * scanning and non-connectable advertising.
+ */
+ if (require_privacy) {
+ bdaddr_t nrpa;
+
+ while (true) {
+ /* The non-resolvable private address is generated
+ * from random six bytes with the two most significant
+ * bits cleared.
+ */
+ get_random_bytes(&nrpa, 6);
+ nrpa.b[5] &= 0x3f;
+
+ /* The non-resolvable private address shall not be
+ * equal to the public address.
+ */
+ if (bacmp(&hdev->bdaddr, &nrpa))
+ break;
+ }
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+ return hci_set_random_addr_sync(hdev, &nrpa);
+ }
+
+ /* If forcing static address is in use or there is no public
+ * address use the static address as random address (but skip
+ * the HCI command if the current random address is already the
+ * static one.
+ *
+ * In case BR/EDR has been disabled on a dual-mode controller
+ * and a static address has been configured, then use that
+ * address instead of the public BR/EDR address.
+ */
+ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+ (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
+ bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ if (bacmp(&hdev->static_addr, &hdev->random_addr))
+ return hci_set_random_addr_sync(hdev,
+ &hdev->static_addr);
+ return 0;
+ }
+
+ /* Neither privacy nor static address is being used so use a
+ * public address.
+ */
+ *own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ return 0;
+}
+
+static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_ext_adv_enable *cp;
+ struct hci_cp_ext_adv_set *set;
+ u8 data[sizeof(*cp) + sizeof(*set) * 1];
+ u8 size;
+
+ /* If request specifies an instance that doesn't exist, fail */
+ if (instance > 0) {
+ struct adv_info *adv;
+
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return -EINVAL;
+
+ /* If not enabled there is nothing to do */
+ if (!adv->enabled)
+ return 0;
+ }
+
+ memset(data, 0, sizeof(data));
+
+ cp = (void *)data;
+ set = (void *)cp->data;
+
+ /* Instance 0x00 indicates all advertising instances will be disabled */
+ cp->num_of_sets = !!instance;
+ cp->enable = 0x00;
+
+ set->handle = instance;
+
+ size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
+ size, data, HCI_CMD_TIMEOUT);
+}
+
+static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
+ bdaddr_t *random_addr)
+{
+ struct hci_cp_le_set_adv_set_rand_addr cp;
+ int err;
+
+ if (!instance) {
+ /* Instance 0x00 doesn't have an adv_info, instead it uses
+ * hdev->random_addr to track its address so whenever it needs
+ * to be updated this also set the random address since
+ * hdev->random_addr is shared with scan state machine.
+ */
+ err = hci_set_random_addr_sync(hdev, random_addr);
+ if (err)
+ return err;
+ }
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = instance;
+ bacpy(&cp.bdaddr, random_addr);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_ext_adv_params cp;
+ bool connectable;
+ u32 flags;
+ bdaddr_t random_addr;
+ u8 own_addr_type;
+ int err;
+ struct adv_info *adv;
+ bool secondary_adv;
+
+ if (instance > 0) {
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return -EINVAL;
+ } else {
+ adv = NULL;
+ }
+
+ /* Updating parameters of an active instance will return a
+ * Command Disallowed error, so we must first disable the
+ * instance if it is active.
+ */
+ if (adv && !adv->pending) {
+ err = hci_disable_ext_adv_instance_sync(hdev, instance);
+ if (err)
+ return err;
+ }
+
+ flags = hci_adv_instance_flags(hdev, instance);
+
+ /* If the "connectable" instance flag was not set, then choose between
+ * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+ */
+ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+ mgmt_get_connectable(hdev);
+
+ if (!is_advertising_allowed(hdev, connectable))
+ return -EPERM;
+
+ /* Set require_privacy to true only when non-connectable
+ * advertising is used. In that case it is fine to use a
+ * non-resolvable private address.
+ */
+ err = hci_get_random_address(hdev, !connectable,
+ adv_use_rpa(hdev, flags), adv,
+ &own_addr_type, &random_addr);
+ if (err < 0)
+ return err;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (adv) {
+ hci_cpu_to_le24(adv->min_interval, cp.min_interval);
+ hci_cpu_to_le24(adv->max_interval, cp.max_interval);
+ cp.tx_power = adv->tx_power;
+ } else {
+ hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
+ hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
+ cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
+ }
+
+ secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
+
+ if (connectable) {
+ if (secondary_adv)
+ cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
+ else
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
+ } else if (hci_adv_instance_is_scannable(hdev, instance) ||
+ (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
+ if (secondary_adv)
+ cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
+ else
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
+ } else {
+ if (secondary_adv)
+ cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
+ else
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
+ }
+
+ /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
+ * contains the peer’s Identity Address and the Peer_Address_Type
+ * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
+ * These parameters are used to locate the corresponding local IRK in
+ * the resolving list; this IRK is used to generate their own address
+ * used in the advertisement.
+ */
+ if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
+ hci_copy_identity_address(hdev, &cp.peer_addr,
+ &cp.peer_addr_type);
+
+ cp.own_addr_type = own_addr_type;
+ cp.channel_map = hdev->le_adv_channel_map;
+ cp.handle = instance;
+
+ if (flags & MGMT_ADV_FLAG_SEC_2M) {
+ cp.primary_phy = HCI_ADV_PHY_1M;
+ cp.secondary_phy = HCI_ADV_PHY_2M;
+ } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
+ cp.primary_phy = HCI_ADV_PHY_CODED;
+ cp.secondary_phy = HCI_ADV_PHY_CODED;
+ } else {
+ /* In all other cases use 1M */
+ cp.primary_phy = HCI_ADV_PHY_1M;
+ cp.secondary_phy = HCI_ADV_PHY_1M;
+ }
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+
+ if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
+ own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
+ bacmp(&random_addr, BDADDR_ANY)) {
+ /* Check if random address need to be updated */
+ if (adv) {
+ if (!bacmp(&random_addr, &adv->random_addr))
+ return 0;
+ } else {
+ if (!bacmp(&random_addr, &hdev->random_addr))
+ return 0;
+ }
+
+ return hci_set_adv_set_random_addr_sync(hdev, instance,
+ &random_addr);
+ }
+
+ return 0;
+}
+
+static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct {
+ struct hci_cp_le_set_ext_scan_rsp_data cp;
+ u8 data[HCI_MAX_EXT_AD_LENGTH];
+ } pdu;
+ u8 len;
+
+ memset(&pdu, 0, sizeof(pdu));
+
+ len = eir_create_scan_rsp(hdev, instance, pdu.data);
+
+ if (hdev->scan_rsp_data_len == len &&
+ !memcmp(pdu.data, hdev->scan_rsp_data, len))
+ return 0;
+
+ memcpy(hdev->scan_rsp_data, pdu.data, len);
+ hdev->scan_rsp_data_len = len;
+
+ pdu.cp.handle = instance;
+ pdu.cp.length = len;
+ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
+ sizeof(pdu.cp) + len, &pdu.cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_scan_rsp_data cp;
+ u8 len;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = eir_create_scan_rsp(hdev, instance, cp.data);
+
+ if (hdev->scan_rsp_data_len == len &&
+ !memcmp(cp.data, hdev->scan_rsp_data, len))
+ return 0;
+
+ memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+ hdev->scan_rsp_data_len = len;
+
+ cp.length = len;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return 0;
+
+ if (ext_adv_capable(hdev))
+ return hci_set_ext_scan_rsp_data_sync(hdev, instance);
+
+ return __hci_set_scan_rsp_data_sync(hdev, instance);
+}
+
+int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_ext_adv_enable *cp;
+ struct hci_cp_ext_adv_set *set;
+ u8 data[sizeof(*cp) + sizeof(*set) * 1];
+ struct adv_info *adv;
+
+ if (instance > 0) {
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return -EINVAL;
+ /* If already enabled there is nothing to do */
+ if (adv->enabled)
+ return 0;
+ } else {
+ adv = NULL;
+ }
+
+ cp = (void *)data;
+ set = (void *)cp->data;
+
+ memset(cp, 0, sizeof(*cp));
+
+ cp->enable = 0x01;
+ cp->num_of_sets = 0x01;
+
+ memset(set, 0, sizeof(*set));
+
+ set->handle = instance;
+
+ /* Set duration per instance since controller is responsible for
+ * scheduling it.
+ */
+ if (adv && adv->timeout) {
+ u16 duration = adv->timeout * MSEC_PER_SEC;
+
+ /* Time = N * 10 ms */
+ set->duration = cpu_to_le16(duration / 10);
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
+ sizeof(*cp) +
+ sizeof(*set) * cp->num_of_sets,
+ data, HCI_CMD_TIMEOUT);
+}
+
+int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
+{
+ int err;
+
+ err = hci_setup_ext_adv_instance_sync(hdev, instance);
+ if (err)
+ return err;
+
+ err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
+ if (err)
+ return err;
+
+ return hci_enable_ext_advertising_sync(hdev, instance);
+}
+
+static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
+{
+ int err;
+
+ if (ext_adv_capable(hdev))
+ return hci_start_ext_adv_sync(hdev, instance);
+
+ err = hci_update_adv_data_sync(hdev, instance);
+ if (err)
+ return err;
+
+ err = hci_update_scan_rsp_data_sync(hdev, instance);
+ if (err)
+ return err;
+
+ return hci_enable_advertising_sync(hdev);
+}
+
+int hci_enable_advertising_sync(struct hci_dev *hdev)
+{
+ struct adv_info *adv_instance;
+ struct hci_cp_le_set_adv_param cp;
+ u8 own_addr_type, enable = 0x01;
+ bool connectable;
+ u16 adv_min_interval, adv_max_interval;
+ u32 flags;
+ u8 status;
+
+ if (ext_adv_capable(hdev))
+ return hci_enable_ext_advertising_sync(hdev,
+ hdev->cur_adv_instance);
+
+ flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
+ adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
+
+ /* If the "connectable" instance flag was not set, then choose between
+ * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+ */
+ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+ mgmt_get_connectable(hdev);
+
+ if (!is_advertising_allowed(hdev, connectable))
+ return -EINVAL;
+
+ status = hci_disable_advertising_sync(hdev);
+ if (status)
+ return status;
+
+ /* Clear the HCI_LE_ADV bit temporarily so that the
+ * hci_update_random_address knows that it's safe to go ahead
+ * and write a new random address. The flag will be set back on
+ * as soon as the SET_ADV_ENABLE HCI command completes.
+ */
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
+
+ /* Set require_privacy to true only when non-connectable
+ * advertising is used. In that case it is fine to use a
+ * non-resolvable private address.
+ */
+ status = hci_update_random_address_sync(hdev, !connectable,
+ adv_use_rpa(hdev, flags),
+ &own_addr_type);
+ if (status)
+ return status;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (adv_instance) {
+ adv_min_interval = adv_instance->min_interval;
+ adv_max_interval = adv_instance->max_interval;
+ } else {
+ adv_min_interval = hdev->le_adv_min_interval;
+ adv_max_interval = hdev->le_adv_max_interval;
+ }
+
+ if (connectable) {
+ cp.type = LE_ADV_IND;
+ } else {
+ if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
+ cp.type = LE_ADV_SCAN_IND;
+ else
+ cp.type = LE_ADV_NONCONN_IND;
+
+ if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
+ hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
+ adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
+ adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
+ }
+ }
+
+ cp.min_interval = cpu_to_le16(adv_min_interval);
+ cp.max_interval = cpu_to_le16(adv_max_interval);
+ cp.own_address_type = own_addr_type;
+ cp.channel_map = hdev->le_adv_channel_map;
+
+ status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (status)
+ return status;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
+ sizeof(enable), &enable, HCI_CMD_TIMEOUT);
+}
+
+static int enable_advertising_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_enable_advertising_sync(hdev);
+}
+
+int hci_enable_advertising(struct hci_dev *hdev)
+{
+ if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ list_empty(&hdev->adv_instances))
+ return 0;
+
+ return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
+}
+
+int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ struct sock *sk)
+{
+ int err;
+
+ if (!ext_adv_capable(hdev))
+ return 0;
+
+ err = hci_disable_ext_adv_instance_sync(hdev, instance);
+ if (err)
+ return err;
+
+ /* If request specifies an instance that doesn't exist, fail */
+ if (instance > 0 && !hci_find_adv_instance(hdev, instance))
+ return -EINVAL;
+
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
+ sizeof(instance), &instance, 0,
+ HCI_CMD_TIMEOUT, sk);
+}
+
+static void cancel_adv_timeout(struct hci_dev *hdev)
+{
+ if (hdev->adv_instance_timeout) {
+ hdev->adv_instance_timeout = 0;
+ cancel_delayed_work(&hdev->adv_instance_expire);
+ }
+}
+
+static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct {
+ struct hci_cp_le_set_ext_adv_data cp;
+ u8 data[HCI_MAX_EXT_AD_LENGTH];
+ } pdu;
+ u8 len;
+
+ memset(&pdu, 0, sizeof(pdu));
+
+ len = eir_create_adv_data(hdev, instance, pdu.data);
+
+ /* There's nothing to do if the data hasn't changed */
+ if (hdev->adv_data_len == len &&
+ memcmp(pdu.data, hdev->adv_data, len) == 0)
+ return 0;
+
+ memcpy(hdev->adv_data, pdu.data, len);
+ hdev->adv_data_len = len;
+
+ pdu.cp.length = len;
+ pdu.cp.handle = instance;
+ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
+ sizeof(pdu.cp) + len, &pdu.cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_adv_data cp;
+ u8 len;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = eir_create_adv_data(hdev, instance, cp.data);
+
+ /* There's nothing to do if the data hasn't changed */
+ if (hdev->adv_data_len == len &&
+ memcmp(cp.data, hdev->adv_data, len) == 0)
+ return 0;
+
+ memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ hdev->adv_data_len = len;
+
+ cp.length = len;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return 0;
+
+ if (ext_adv_capable(hdev))
+ return hci_set_ext_adv_data_sync(hdev, instance);
+
+ return hci_set_adv_data_sync(hdev, instance);
+}
+
+int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ bool force)
+{
+ struct adv_info *adv = NULL;
+ u16 timeout;
+
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
+ return -EPERM;
+
+ if (hdev->adv_instance_timeout)
+ return -EBUSY;
+
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return -ENOENT;
+
+ /* A zero timeout means unlimited advertising. As long as there is
+ * only one instance, duration should be ignored. We still set a timeout
+ * in case further instances are being added later on.
+ *
+ * If the remaining lifetime of the instance is more than the duration
+ * then the timeout corresponds to the duration, otherwise it will be
+ * reduced to the remaining instance lifetime.
+ */
+ if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
+ timeout = adv->duration;
+ else
+ timeout = adv->remaining_time;
+
+ /* The remaining time is being reduced unless the instance is being
+ * advertised without time limit.
+ */
+ if (adv->timeout)
+ adv->remaining_time = adv->remaining_time - timeout;
+
+ /* Only use work for scheduling instances with legacy advertising */
+ if (!ext_adv_capable(hdev)) {
+ hdev->adv_instance_timeout = timeout;
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->adv_instance_expire,
+ msecs_to_jiffies(timeout * 1000));
+ }
+
+ /* If we're just re-scheduling the same instance again then do not
+ * execute any HCI commands. This happens when a single instance is
+ * being advertised.
+ */
+ if (!force && hdev->cur_adv_instance == instance &&
+ hci_dev_test_flag(hdev, HCI_LE_ADV))
+ return 0;
+
+ hdev->cur_adv_instance = instance;
+
+ return hci_start_adv_sync(hdev, instance);
+}
+
+static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
+{
+ int err;
+
+ if (!ext_adv_capable(hdev))
+ return 0;
+
+ /* Disable instance 0x00 to disable all instances */
+ err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
+ if (err)
+ return err;
+
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
+ 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
+}
+
+static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
+{
+ struct adv_info *adv, *n;
+
+ if (ext_adv_capable(hdev))
+ /* Remove all existing sets */
+ return hci_clear_adv_sets_sync(hdev, sk);
+
+ /* This is safe as long as there is no command send while the lock is
+ * held.
+ */
+ hci_dev_lock(hdev);
+
+ /* Cleanup non-ext instances */
+ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
+ u8 instance = adv->instance;
+ int err;
+
+ if (!(force || adv->timeout))
+ continue;
+
+ err = hci_remove_adv_instance(hdev, instance);
+ if (!err)
+ mgmt_advertising_removed(sk, hdev, instance);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
+ struct sock *sk)
+{
+ int err;
+
+ /* If we use extended advertising, instance has to be removed first. */
+ if (ext_adv_capable(hdev))
+ return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
+
+ /* This is safe as long as there is no command send while the lock is
+ * held.
+ */
+ hci_dev_lock(hdev);
+
+ err = hci_remove_adv_instance(hdev, instance);
+ if (!err)
+ mgmt_advertising_removed(sk, hdev, instance);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+/* For a single instance:
+ * - force == true: The instance will be removed even when its remaining
+ * lifetime is not zero.
+ * - force == false: the instance will be deactivated but kept stored unless
+ * the remaining lifetime is zero.
+ *
+ * For instance == 0x00:
+ * - force == true: All instances will be removed regardless of their timeout
+ * setting.
+ * - force == false: Only instances that have a timeout will be removed.
+ */
+int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force)
+{
+ struct adv_info *next = NULL;
+ int err;
+
+ /* Cancel any timeout concerning the removed instance(s). */
+ if (!instance || hdev->cur_adv_instance == instance)
+ cancel_adv_timeout(hdev);
+
+ /* Get the next instance to advertise BEFORE we remove
+ * the current one. This can be the same instance again
+ * if there is only one instance.
+ */
+ if (hdev->cur_adv_instance == instance)
+ next = hci_get_next_instance(hdev, instance);
+
+ if (!instance) {
+ err = hci_clear_adv_sync(hdev, sk, force);
+ if (err)
+ return err;
+ } else {
+ struct adv_info *adv = hci_find_adv_instance(hdev, instance);
+
+ if (force || (adv && adv->timeout && !adv->remaining_time)) {
+ /* Don't advertise a removed instance. */
+ if (next && next->instance == instance)
+ next = NULL;
+
+ err = hci_remove_adv_sync(hdev, instance, sk);
+ if (err)
+ return err;
+ }
+ }
+
+ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ return 0;
+
+ if (next && !ext_adv_capable(hdev))
+ hci_schedule_adv_instance_sync(hdev, next->instance, false);
+
+ return 0;
+}
+
+int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
+{
+ struct hci_cp_read_rssi cp;
+
+ cp.handle = handle;
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
+ sizeof(*cp), cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
+{
+ struct hci_cp_read_tx_power cp;
+
+ cp.handle = handle;
+ cp.type = type;
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_disable_advertising_sync(struct hci_dev *hdev)
+{
+ u8 enable = 0x00;
+
+ /* If controller is not advertising we are done. */
+ if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
+ return 0;
+
+ if (ext_adv_capable(hdev))
+ return hci_disable_ext_adv_instance_sync(hdev, 0x00);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
+ sizeof(enable), &enable, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
+ u8 filter_dup)
+{
+ struct hci_cp_le_set_ext_scan_enable cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = val;
+ cp.filter_dup = filter_dup;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
+ u8 filter_dup)
+{
+ struct hci_cp_le_set_scan_enable cp;
+
+ if (use_ext_scan(hdev))
+ return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = val;
+ cp.filter_dup = filter_dup;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
+{
+ if (!use_ll_privacy(hdev))
+ return 0;
+
+ /* If controller is not/already resolving we are done. */
+ if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
+ sizeof(val), &val, HCI_CMD_TIMEOUT);
+}
+
+static int hci_scan_disable_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* If controller is not scanning we are done. */
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ return 0;
+
+ if (hdev->scanning_paused) {
+ bt_dev_dbg(hdev, "Scanning is paused for suspend");
+ return 0;
+ }
+
+ err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
+ if (err) {
+ bt_dev_err(hdev, "Unable to disable scanning: %d", err);
+ return err;
+ }
+
+ return err;
+}
+
+static bool scan_use_rpa(struct hci_dev *hdev)
+{
+ return hci_dev_test_flag(hdev, HCI_PRIVACY);
+}
+
+static void hci_start_interleave_scan(struct hci_dev *hdev)
+{
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->interleave_scan, 0);
+}
+
+static bool is_interleave_scanning(struct hci_dev *hdev)
+{
+ return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
+}
+
+static void cancel_interleave_scan(struct hci_dev *hdev)
+{
+ bt_dev_dbg(hdev, "cancelling interleave scan");
+
+ cancel_delayed_work_sync(&hdev->interleave_scan);
+
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
+}
+
+/* Return true if interleave_scan wasn't started until exiting this function,
+ * otherwise, return false
+ */
+static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
+{
+ /* Do interleaved scan only if all of the following are true:
+ * - There is at least one ADV monitor
+ * - At least one pending LE connection or one device to be scanned for
+ * - Monitor offloading is not supported
+ * If so, we should alternate between allowlist scan and one without
+ * any filters to save power.
+ */
+ bool use_interleaving = hci_is_adv_monitoring(hdev) &&
+ !(list_empty(&hdev->pend_le_conns) &&
+ list_empty(&hdev->pend_le_reports)) &&
+ hci_get_adv_monitor_offload_ext(hdev) ==
+ HCI_ADV_MONITOR_EXT_NONE;
+ bool is_interleaving = is_interleave_scanning(hdev);
+
+ if (use_interleaving && !is_interleaving) {
+ hci_start_interleave_scan(hdev);
+ bt_dev_dbg(hdev, "starting interleave scan");
+ return true;
+ }
+
+ if (!use_interleaving && is_interleaving)
+ cancel_interleave_scan(hdev);
+
+ return false;
+}
+
+/* Removes connection to resolve list if needed.*/
+static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 bdaddr_type)
+{
+ struct hci_cp_le_del_from_resolv_list cp;
+ struct bdaddr_list_with_irk *entry;
+
+ if (!use_ll_privacy(hdev))
+ return 0;
+
+ /* Check if the IRK has been programmed */
+ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
+ bdaddr_type);
+ if (!entry)
+ return 0;
+
+ cp.bdaddr_type = bdaddr_type;
+ bacpy(&cp.bdaddr, bdaddr);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 bdaddr_type)
+{
+ struct hci_cp_le_del_from_accept_list cp;
+ int err;
+
+ /* Check if device is on accept list before removing it */
+ if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
+ return 0;
+
+ cp.bdaddr_type = bdaddr_type;
+ bacpy(&cp.bdaddr, bdaddr);
+
+ /* Ignore errors when removing from resolving list as that is likely
+ * that the device was never added.
+ */
+ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
+ return err;
+ }
+
+ bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
+ cp.bdaddr_type);
+
+ return 0;
+}
+
+/* Adds connection to resolve list if needed.
+ * Setting params to NULL programs local hdev->irk
+ */
+static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
+ struct hci_conn_params *params)
+{
+ struct hci_cp_le_add_to_resolv_list cp;
+ struct smp_irk *irk;
+ struct bdaddr_list_with_irk *entry;
+
+ if (!use_ll_privacy(hdev))
+ return 0;
+
+ /* Attempt to program local identity address, type and irk if params is
+ * NULL.
+ */
+ if (!params) {
+ if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
+ return 0;
+
+ hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
+ memcpy(cp.peer_irk, hdev->irk, 16);
+ goto done;
+ }
+
+ irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
+ if (!irk)
+ return 0;
+
+ /* Check if the IK has _not_ been programmed yet. */
+ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
+ &params->addr,
+ params->addr_type);
+ if (entry)
+ return 0;
+
+ cp.bdaddr_type = params->addr_type;
+ bacpy(&cp.bdaddr, &params->addr);
+ memcpy(cp.peer_irk, irk->val, 16);
+
+done:
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY))
+ memcpy(cp.local_irk, hdev->irk, 16);
+ else
+ memset(cp.local_irk, 0, 16);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+/* Adds connection to allow list if needed, if the device uses RPA (has IRK)
+ * this attempts to program the device in the resolving list as well.
+ */
+static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
+ struct hci_conn_params *params,
+ u8 *num_entries)
+{
+ struct hci_cp_le_add_to_accept_list cp;
+ int err;
+
+ /* Already in accept list */
+ if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
+ params->addr_type))
+ return 0;
+
+ /* Select filter policy to accept all advertising */
+ if (*num_entries >= hdev->le_accept_list_size)
+ return -ENOSPC;
+
+ /* Accept list can not be used with RPAs */
+ if (!use_ll_privacy(hdev) &&
+ hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
+ return -EINVAL;
+ }
+
+ /* During suspend, only wakeable devices can be in acceptlist */
+ if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
+ params->current_flags))
+ return 0;
+
+ /* Attempt to program the device in the resolving list first to avoid
+ * having to rollback in case it fails since the resolving list is
+ * dynamic it can probably be smaller than the accept list.
+ */
+ err = hci_le_add_resolve_list_sync(hdev, params);
+ if (err) {
+ bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
+ return err;
+ }
+
+ *num_entries += 1;
+ cp.bdaddr_type = params->addr_type;
+ bacpy(&cp.bdaddr, &params->addr);
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev, "Unable to add to allow list: %d", err);
+ /* Rollback the device from the resolving list */
+ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
+ return err;
+ }
+
+ bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
+ cp.bdaddr_type);
+
+ return 0;
+}
+
+/* This function disables/pause all advertising instances */
+static int hci_pause_advertising_sync(struct hci_dev *hdev)
+{
+ int err;
+ int old_state;
+
+ /* If there are no instances or advertising has already been paused
+ * there is nothing to do.
+ */
+ if (!hdev->adv_instance_cnt || hdev->advertising_paused)
+ return 0;
+
+ bt_dev_dbg(hdev, "Pausing directed advertising");
+
+ /* Stop directed advertising */
+ old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
+ if (old_state) {
+ /* When discoverable timeout triggers, then just make sure
+ * the limited discoverable flag is cleared. Even in the case
+ * of a timeout triggered from general discoverable, it is
+ * safe to unconditionally clear the flag.
+ */
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+ hdev->discov_timeout = 0;
+ }
+
+ bt_dev_dbg(hdev, "Pausing advertising instances");
+
+ /* Call to disable any advertisements active on the controller.
+ * This will succeed even if no advertisements are configured.
+ */
+ err = hci_disable_advertising_sync(hdev);
+ if (err)
+ return err;
+
+ /* If we are using software rotation, pause the loop */
+ if (!ext_adv_capable(hdev))
+ cancel_adv_timeout(hdev);
+
+ hdev->advertising_paused = true;
+ hdev->advertising_old_state = old_state;
+
+ return 0;
+}
+
+/* This function enables all user advertising instances */
+static int hci_resume_advertising_sync(struct hci_dev *hdev)
+{
+ struct adv_info *adv, *tmp;
+ int err;
+
+ /* If advertising has not been paused there is nothing to do. */
+ if (!hdev->advertising_paused)
+ return 0;
+
+ /* Resume directed advertising */
+ hdev->advertising_paused = false;
+ if (hdev->advertising_old_state) {
+ hci_dev_set_flag(hdev, HCI_ADVERTISING);
+ hdev->advertising_old_state = 0;
+ }
+
+ bt_dev_dbg(hdev, "Resuming advertising instances");
+
+ if (ext_adv_capable(hdev)) {
+ /* Call for each tracked instance to be re-enabled */
+ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
+ err = hci_enable_ext_advertising_sync(hdev,
+ adv->instance);
+ if (!err)
+ continue;
+
+ /* If the instance cannot be resumed remove it */
+ hci_remove_ext_adv_instance_sync(hdev, adv->instance,
+ NULL);
+ }
+ } else {
+ /* Schedule for most recent instance to be restarted and begin
+ * the software rotation loop
+ */
+ err = hci_schedule_adv_instance_sync(hdev,
+ hdev->cur_adv_instance,
+ true);
+ }
+
+ hdev->advertising_paused = false;
+
+ return err;
+}
+
+struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
+ bool extended, struct sock *sk)
+{
+ u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
+ HCI_OP_READ_LOCAL_OOB_DATA;
+
+ return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
+}
+
+/* Device must not be scanning when updating the accept list.
+ *
+ * Update is done using the following sequence:
+ *
+ * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
+ * Remove Devices From Accept List ->
+ * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
+ * Add Devices to Accept List ->
+ * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
+ * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
+ * Enable Scanning
+ *
+ * In case of failure advertising shall be restored to its original state and
+ * return would disable accept list since either accept or resolving list could
+ * not be programmed.
+ *
+ */
+static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+{
+ struct hci_conn_params *params;
+ struct bdaddr_list *b, *t;
+ u8 num_entries = 0;
+ bool pend_conn, pend_report;
+ int err;
+
+ /* Pause advertising if resolving list can be used as controllers are
+ * cannot accept resolving list modifications while advertising.
+ */
+ if (use_ll_privacy(hdev)) {
+ err = hci_pause_advertising_sync(hdev);
+ if (err) {
+ bt_dev_err(hdev, "pause advertising failed: %d", err);
+ return 0x00;
+ }
+ }
+
+ /* Disable address resolution while reprogramming accept list since
+ * devices that do have an IRK will be programmed in the resolving list
+ * when LL Privacy is enabled.
+ */
+ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
+ if (err) {
+ bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
+ goto done;
+ }
+
+ /* Go through the current accept list programmed into the
+ * controller one by one and check if that address is still
+ * in the list of pending connections or list of devices to
+ * report. If not present in either list, then remove it from
+ * the controller.
+ */
+ list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
+ pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
+ &b->bdaddr,
+ b->bdaddr_type);
+ pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
+ &b->bdaddr,
+ b->bdaddr_type);
+
+ /* If the device is not likely to connect or report,
+ * remove it from the acceptlist.
+ */
+ if (!pend_conn && !pend_report) {
+ hci_le_del_accept_list_sync(hdev, &b->bdaddr,
+ b->bdaddr_type);
+ continue;
+ }
+
+ num_entries++;
+ }
+
+ /* Since all no longer valid accept list entries have been
+ * removed, walk through the list of pending connections
+ * and ensure that any new device gets programmed into
+ * the controller.
+ *
+ * If the list of the devices is larger than the list of
+ * available accept list entries in the controller, then
+ * just abort and return filer policy value to not use the
+ * accept list.
+ */
+ list_for_each_entry(params, &hdev->pend_le_conns, action) {
+ err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
+ if (err)
+ goto done;
+ }
+
+ /* After adding all new pending connections, walk through
+ * the list of pending reports and also add these to the
+ * accept list if there is still space. Abort if space runs out.
+ */
+ list_for_each_entry(params, &hdev->pend_le_reports, action) {
+ err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
+ if (err)
+ goto done;
+ }
+
+ /* Use the allowlist unless the following conditions are all true:
+ * - We are not currently suspending
+ * - There are 1 or more ADV monitors registered and it's not offloaded
+ * - Interleaved scanning is not currently using the allowlist
+ */
+ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
+ hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
+ hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
+ err = -EINVAL;
+
+done:
+ /* Enable address resolution when LL Privacy is enabled. */
+ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
+ if (err)
+ bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
+
+ /* Resume advertising if it was paused */
+ if (use_ll_privacy(hdev))
+ hci_resume_advertising_sync(hdev);
+
+ /* Select filter policy to use accept list */
+ return err ? 0x00 : 0x01;
+}
+
+/* Returns true if an le connection is in the scanning state */
+static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return false;
+}
+
+static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
+ u16 interval, u16 window,
+ u8 own_addr_type, u8 filter_policy)
+{
+ struct hci_cp_le_set_ext_scan_params *cp;
+ struct hci_cp_le_scan_phy_params *phy;
+ u8 data[sizeof(*cp) + sizeof(*phy) * 2];
+ u8 num_phy = 0;
+
+ cp = (void *)data;
+ phy = (void *)cp->data;
+
+ memset(data, 0, sizeof(data));
+
+ cp->own_addr_type = own_addr_type;
+ cp->filter_policy = filter_policy;
+
+ if (scan_1m(hdev) || scan_2m(hdev)) {
+ cp->scanning_phys |= LE_SCAN_PHY_1M;
+
+ phy->type = type;
+ phy->interval = cpu_to_le16(interval);
+ phy->window = cpu_to_le16(window);
+
+ num_phy++;
+ phy++;
+ }
+
+ if (scan_coded(hdev)) {
+ cp->scanning_phys |= LE_SCAN_PHY_CODED;
+
+ phy->type = type;
+ phy->interval = cpu_to_le16(interval);
+ phy->window = cpu_to_le16(window);
+
+ num_phy++;
+ phy++;
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
+ sizeof(*cp) + sizeof(*phy) * num_phy,
+ data, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
+ u16 interval, u16 window,
+ u8 own_addr_type, u8 filter_policy)
+{
+ struct hci_cp_le_set_scan_param cp;
+
+ if (use_ext_scan(hdev))
+ return hci_le_set_ext_scan_param_sync(hdev, type, interval,
+ window, own_addr_type,
+ filter_policy);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.type = type;
+ cp.interval = cpu_to_le16(interval);
+ cp.window = cpu_to_le16(window);
+ cp.own_address_type = own_addr_type;
+ cp.filter_policy = filter_policy;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
+ u16 window, u8 own_addr_type, u8 filter_policy,
+ u8 filter_dup)
+{
+ int err;
+
+ if (hdev->scanning_paused) {
+ bt_dev_dbg(hdev, "Scanning is paused for suspend");
+ return 0;
+ }
+
+ err = hci_le_set_scan_param_sync(hdev, type, interval, window,
+ own_addr_type, filter_policy);
+ if (err)
+ return err;
+
+ return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
+}
+
+static int hci_passive_scan_sync(struct hci_dev *hdev)
+{
+ u8 own_addr_type;
+ u8 filter_policy;
+ u16 window, interval;
+ int err;
+
+ if (hdev->scanning_paused) {
+ bt_dev_dbg(hdev, "Scanning is paused for suspend");
+ return 0;
+ }
+
+ err = hci_scan_disable_sync(hdev);
+ if (err) {
+ bt_dev_err(hdev, "disable scanning failed: %d", err);
+ return err;
+ }
+
+ /* Set require_privacy to false since no SCAN_REQ are send
+ * during passive scanning. Not using an non-resolvable address
+ * here is important so that peer devices using direct
+ * advertising with our address will be correctly reported
+ * by the controller.
+ */
+ if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
+ &own_addr_type))
+ return 0;
+
+ if (hdev->enable_advmon_interleave_scan &&
+ hci_update_interleaved_scan_sync(hdev))
+ return 0;
+
+ bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
+
+ /* Adding or removing entries from the accept list must
+ * happen before enabling scanning. The controller does
+ * not allow accept list modification while scanning.
+ */
+ filter_policy = hci_update_accept_list_sync(hdev);
+
+ /* When the controller is using random resolvable addresses and
+ * with that having LE privacy enabled, then controllers with
+ * Extended Scanner Filter Policies support can now enable support
+ * for handling directed advertising.
+ *
+ * So instead of using filter polices 0x00 (no acceptlist)
+ * and 0x01 (acceptlist enabled) use the new filter policies
+ * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
+ */
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
+ (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
+ filter_policy |= 0x02;
+
+ if (hdev->suspended) {
+ window = hdev->le_scan_window_suspend;
+ interval = hdev->le_scan_int_suspend;
+ } else if (hci_is_le_conn_scanning(hdev)) {
+ window = hdev->le_scan_window_connect;
+ interval = hdev->le_scan_int_connect;
+ } else if (hci_is_adv_monitoring(hdev)) {
+ window = hdev->le_scan_window_adv_monitor;
+ interval = hdev->le_scan_int_adv_monitor;
+ } else {
+ window = hdev->le_scan_window;
+ interval = hdev->le_scan_interval;
+ }
+
+ bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
+
+ return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
+ own_addr_type, filter_policy,
+ LE_SCAN_FILTER_DUP_ENABLE);
+}
+
+/* This function controls the passive scanning based on hdev->pend_le_conns
+ * list. If there are pending LE connection we start the background scanning,
+ * otherwise we stop it in the following sequence:
+ *
+ * If there are devices to scan:
+ *
+ * Disable Scanning -> Update Accept List ->
+ * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
+ * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
+ * Enable Scanning
+ *
+ * Otherwise:
+ *
+ * Disable Scanning
+ */
+int hci_update_passive_scan_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ if (!test_bit(HCI_UP, &hdev->flags) ||
+ test_bit(HCI_INIT, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG) ||
+ hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
+ hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ return 0;
+
+ /* No point in doing scanning if LE support hasn't been enabled */
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return 0;
+
+ /* If discovery is active don't interfere with it */
+ if (hdev->discovery.state != DISCOVERY_STOPPED)
+ return 0;
+
+ /* Reset RSSI and UUID filters when starting background scanning
+ * since these filters are meant for service discovery only.
+ *
+ * The Start Discovery and Start Service Discovery operations
+ * ensure to set proper values for RSSI threshold and UUID
+ * filter list. So it is safe to just reset them here.
+ */
+ hci_discovery_filter_clear(hdev);
+
+ bt_dev_dbg(hdev, "ADV monitoring is %s",
+ hci_is_adv_monitoring(hdev) ? "on" : "off");
+
+ if (list_empty(&hdev->pend_le_conns) &&
+ list_empty(&hdev->pend_le_reports) &&
+ !hci_is_adv_monitoring(hdev)) {
+ /* If there is no pending LE connections or devices
+ * to be scanned for or no ADV monitors, we should stop the
+ * background scanning.
+ */
+
+ bt_dev_dbg(hdev, "stopping background scanning");
+
+ err = hci_scan_disable_sync(hdev);
+ if (err)
+ bt_dev_err(hdev, "stop background scanning failed: %d",
+ err);
+ } else {
+ /* If there is at least one pending LE connection, we should
+ * keep the background scan running.
+ */
+
+ /* If controller is connecting, we should not start scanning
+ * since some controllers are not able to scan and connect at
+ * the same time.
+ */
+ if (hci_lookup_le_connect(hdev))
+ return 0;
+
+ bt_dev_dbg(hdev, "start background scanning");
+
+ err = hci_passive_scan_sync(hdev);
+ if (err)
+ bt_dev_err(hdev, "start background scanning failed: %d",
+ err);
+ }
+
+ return err;
+}
+
+static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_passive_scan_sync(hdev);
+}
+
+int hci_update_passive_scan(struct hci_dev *hdev)
+{
+ /* Only queue if it would have any effect */
+ if (!test_bit(HCI_UP, &hdev->flags) ||
+ test_bit(HCI_INIT, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG) ||
+ hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
+ hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ return 0;
+
+ return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL);
+}
+
+int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
+{
+ int err;
+
+ if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
+ return 0;
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
+ sizeof(val), &val, HCI_CMD_TIMEOUT);
+
+ if (!err) {
+ if (val) {
+ hdev->features[1][0] |= LMP_HOST_SC;
+ hci_dev_set_flag(hdev, HCI_SC_ENABLED);
+ } else {
+ hdev->features[1][0] &= ~LMP_HOST_SC;
+ hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
+ }
+ }
+
+ return err;
+}
+
+int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
+{
+ int err;
+
+ if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
+ lmp_host_ssp_capable(hdev))
+ return 0;
+
+ if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
+ __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
+ sizeof(mode), &mode, HCI_CMD_TIMEOUT);
+ }
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
+ sizeof(mode), &mode, HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+
+ return hci_write_sc_support_sync(hdev, 0x01);
+}
+
+int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
+{
+ struct hci_cp_write_le_host_supported cp;
+
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
+ !lmp_bredr_capable(hdev))
+ return 0;
+
+ /* Check first if we already have the right host state
+ * (host features set)
+ */
+ if (le == lmp_host_le_capable(hdev) &&
+ simul == lmp_host_le_br_capable(hdev))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.le = le;
+ cp.simul = simul;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_powered_update_adv_sync(struct hci_dev *hdev)
+{
+ struct adv_info *adv, *tmp;
+ int err;
+
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return 0;
+
+ /* If RPA Resolution has not been enable yet it means the
+ * resolving list is empty and we should attempt to program the
+ * local IRK in order to support using own_addr_type
+ * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
+ */
+ if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
+ hci_le_add_resolve_list_sync(hdev, NULL);
+ hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
+ }
+
+ /* Make sure the controller has a good default for
+ * advertising data. This also applies to the case
+ * where BR/EDR was toggled during the AUTO_OFF phase.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ list_empty(&hdev->adv_instances)) {
+ if (ext_adv_capable(hdev)) {
+ err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
+ if (!err)
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
+ } else {
+ err = hci_update_adv_data_sync(hdev, 0x00);
+ if (!err)
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ hci_enable_advertising_sync(hdev);
+ }
+
+ /* Call for each tracked instance to be scheduled */
+ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
+ hci_schedule_adv_instance_sync(hdev, adv->instance, true);
+
+ return 0;
+}
+
+static int hci_write_auth_enable_sync(struct hci_dev *hdev)
+{
+ u8 link_sec;
+
+ link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
+ if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
+ sizeof(link_sec), &link_sec,
+ HCI_CMD_TIMEOUT);
+}
+
+int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
+{
+ struct hci_cp_write_page_scan_activity cp;
+ u8 type;
+ int err = 0;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (enable) {
+ type = PAGE_SCAN_TYPE_INTERLACED;
+
+ /* 160 msec page scan interval */
+ cp.interval = cpu_to_le16(0x0100);
+ } else {
+ type = hdev->def_page_scan_type;
+ cp.interval = cpu_to_le16(hdev->def_page_scan_int);
+ }
+
+ cp.window = cpu_to_le16(hdev->def_page_scan_window);
+
+ if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
+ __cpu_to_le16(hdev->page_scan_window) != cp.window) {
+ err = __hci_cmd_sync_status(hdev,
+ HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ if (hdev->page_scan_type != type)
+ err = __hci_cmd_sync_status(hdev,
+ HCI_OP_WRITE_PAGE_SCAN_TYPE,
+ sizeof(type), &type,
+ HCI_CMD_TIMEOUT);
+
+ return err;
+}
+
+static bool disconnected_accept_list_entries(struct hci_dev *hdev)
+{
+ struct bdaddr_list *b;
+
+ list_for_each_entry(b, &hdev->accept_list, list) {
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
+ if (!conn)
+ return true;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ return true;
+ }
+
+ return false;
+}
+
+static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
+ sizeof(val), &val,
+ HCI_CMD_TIMEOUT);
+}
+
+int hci_update_scan_sync(struct hci_dev *hdev)
+{
+ u8 scan;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ if (!hdev_is_powered(hdev))
+ return 0;
+
+ if (mgmt_powering_down(hdev))
+ return 0;
+
+ if (hdev->scanning_paused)
+ return 0;
+
+ if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
+ disconnected_accept_list_entries(hdev))
+ scan = SCAN_PAGE;
+ else
+ scan = SCAN_DISABLED;
+
+ if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
+ scan |= SCAN_INQUIRY;
+
+ if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
+ test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
+ return 0;
+
+ return hci_write_scan_enable_sync(hdev, scan);
+}
+
+int hci_update_name_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_local_name cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
+ sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+/* This function perform powered update HCI command sequence after the HCI init
+ * sequence which end up resetting all states, the sequence is as follows:
+ *
+ * HCI_SSP_ENABLED(Enable SSP)
+ * HCI_LE_ENABLED(Enable LE)
+ * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
+ * Update adv data)
+ * Enable Authentication
+ * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
+ * Set Name -> Set EIR)
+ */
+int hci_powered_update_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* Register the available SMP channels (BR/EDR and LE) only when
+ * successfully powering on the controller. This late
+ * registration is required so that LE SMP can clearly decide if
+ * the public address or static address is used.
+ */
+ smp_register(hdev);
+
+ err = hci_write_ssp_mode_sync(hdev, 0x01);
+ if (err)
+ return err;
+
+ err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
+ if (err)
+ return err;
+
+ err = hci_powered_update_adv_sync(hdev);
+ if (err)
+ return err;
+
+ err = hci_write_auth_enable_sync(hdev);
+ if (err)
+ return err;
+
+ if (lmp_bredr_capable(hdev)) {
+ if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
+ hci_write_fast_connectable_sync(hdev, true);
+ else
+ hci_write_fast_connectable_sync(hdev, false);
+ hci_update_scan_sync(hdev);
+ hci_update_class_sync(hdev);
+ hci_update_name_sync(hdev);
+ hci_update_eir_sync(hdev);
+ }
+
+ return 0;
+}
+
+/**
+ * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
+ * (BD_ADDR) for a HCI device from
+ * a firmware node property.
+ * @hdev: The HCI device
+ *
+ * Search the firmware node for 'local-bd-address'.
+ *
+ * All-zero BD addresses are rejected, because those could be properties
+ * that exist in the firmware tables, but were not updated by the firmware. For
+ * example, the DTS could define 'local-bd-address', with zero BD addresses.
+ */
+static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
+ bdaddr_t ba;
+ int ret;
+
+ ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
+ (u8 *)&ba, sizeof(ba));
+ if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
+ return;
+
+ bacpy(&hdev->public_addr, &ba);
+}
+
+struct hci_init_stage {
+ int (*func)(struct hci_dev *hdev);
+};
+
+/* Run init stage NULL terminated function table */
+static int hci_init_stage_sync(struct hci_dev *hdev,
+ const struct hci_init_stage *stage)
+{
+ size_t i;
+
+ for (i = 0; stage[i].func; i++) {
+ int err;
+
+ err = stage[i].func(hdev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Read Local Version */
+static int hci_read_local_version_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read BD Address */
+static int hci_read_bd_addr_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+#define HCI_INIT(_func) \
+{ \
+ .func = _func, \
+}
+
+static const struct hci_init_stage hci_init0[] = {
+ /* HCI_OP_READ_LOCAL_VERSION */
+ HCI_INIT(hci_read_local_version_sync),
+ /* HCI_OP_READ_BD_ADDR */
+ HCI_INIT(hci_read_bd_addr_sync),
+ {}
+};
+
+int hci_reset_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ set_bit(HCI_RESET, &hdev->flags);
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int hci_init0_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ /* Reset */
+ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
+ err = hci_reset_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ return hci_init_stage_sync(hdev, hci_init0);
+}
+
+static int hci_unconf_init_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ return 0;
+
+ err = hci_init0_sync(hdev);
+ if (err < 0)
+ return err;
+
+ if (hci_dev_test_flag(hdev, HCI_SETUP))
+ hci_debugfs_create_basic(hdev);
+
+ return 0;
+}
+
+/* Read Local Supported Features. */
+static int hci_read_local_features_sync(struct hci_dev *hdev)
+{
+ /* Not all AMP controllers support this command */
+ if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* BR Controller init stage 1 command sequence */
+static const struct hci_init_stage br_init1[] = {
+ /* HCI_OP_READ_LOCAL_FEATURES */
+ HCI_INIT(hci_read_local_features_sync),
+ /* HCI_OP_READ_LOCAL_VERSION */
+ HCI_INIT(hci_read_local_version_sync),
+ /* HCI_OP_READ_BD_ADDR */
+ HCI_INIT(hci_read_bd_addr_sync),
+ {}
+};
+
+/* Read Local Commands */
+static int hci_read_local_cmds_sync(struct hci_dev *hdev)
+{
+ /* All Bluetooth 1.2 and later controllers should support the
+ * HCI command for reading the local supported commands.
+ *
+ * Unfortunately some controllers indicate Bluetooth 1.2 support,
+ * but do not have support for this command. If that is the case,
+ * the driver can quirk the behavior and skip reading the local
+ * supported commands.
+ */
+ if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
+ !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
+ 0, NULL, HCI_CMD_TIMEOUT);
+
+ return 0;
+}
+
+/* Read Local AMP Info */
+static int hci_read_local_amp_info_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Data Blk size */
+static int hci_read_data_block_size_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Flow Control Mode */
+static int hci_read_flow_control_mode_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Location Data */
+static int hci_read_location_data_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* AMP Controller init stage 1 command sequence */
+static const struct hci_init_stage amp_init1[] = {
+ /* HCI_OP_READ_LOCAL_VERSION */
+ HCI_INIT(hci_read_local_version_sync),
+ /* HCI_OP_READ_LOCAL_COMMANDS */
+ HCI_INIT(hci_read_local_cmds_sync),
+ /* HCI_OP_READ_LOCAL_AMP_INFO */
+ HCI_INIT(hci_read_local_amp_info_sync),
+ /* HCI_OP_READ_DATA_BLOCK_SIZE */
+ HCI_INIT(hci_read_data_block_size_sync),
+ /* HCI_OP_READ_FLOW_CONTROL_MODE */
+ HCI_INIT(hci_read_flow_control_mode_sync),
+ /* HCI_OP_READ_LOCATION_DATA */
+ HCI_INIT(hci_read_location_data_sync),
+};
+
+static int hci_init1_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ /* Reset */
+ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
+ err = hci_reset_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ switch (hdev->dev_type) {
+ case HCI_PRIMARY:
+ hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
+ return hci_init_stage_sync(hdev, br_init1);
+ case HCI_AMP:
+ hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
+ return hci_init_stage_sync(hdev, amp_init1);
+ default:
+ bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
+ break;
+ }
+
+ return 0;
+}
+
+/* AMP Controller init stage 2 command sequence */
+static const struct hci_init_stage amp_init2[] = {
+ /* HCI_OP_READ_LOCAL_FEATURES */
+ HCI_INIT(hci_read_local_features_sync),
+};
+
+/* Read Buffer Size (ACL mtu, max pkt, etc.) */
+static int hci_read_buffer_size_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Class of Device */
+static int hci_read_dev_class_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Local Name */
+static int hci_read_local_name_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Voice Setting */
+static int hci_read_voice_setting_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Number of Supported IAC */
+static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Current IAC LAP */
+static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
+ u8 cond_type, bdaddr_t *bdaddr,
+ u8 auto_accept)
+{
+ struct hci_cp_set_event_filter cp;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.flt_type = flt_type;
+
+ if (flt_type != HCI_FLT_CLEAR_ALL) {
+ cp.cond_type = cond_type;
+ bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
+ cp.addr_conn_flt.auto_accept = auto_accept;
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
+ flt_type == HCI_FLT_CLEAR_ALL ?
+ sizeof(cp.flt_type) : sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int hci_clear_event_filter_sync(struct hci_dev *hdev)
+{
+ if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
+ return 0;
+
+ return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
+ BDADDR_ANY, 0x00);
+}
+
+/* Connection accept timeout ~20 secs */
+static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
+{
+ __le16 param = cpu_to_le16(0x7d00);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
+ sizeof(param), &param, HCI_CMD_TIMEOUT);
+}
+
+/* BR Controller init stage 2 command sequence */
+static const struct hci_init_stage br_init2[] = {
+ /* HCI_OP_READ_BUFFER_SIZE */
+ HCI_INIT(hci_read_buffer_size_sync),
+ /* HCI_OP_READ_CLASS_OF_DEV */
+ HCI_INIT(hci_read_dev_class_sync),
+ /* HCI_OP_READ_LOCAL_NAME */
+ HCI_INIT(hci_read_local_name_sync),
+ /* HCI_OP_READ_VOICE_SETTING */
+ HCI_INIT(hci_read_voice_setting_sync),
+ /* HCI_OP_READ_NUM_SUPPORTED_IAC */
+ HCI_INIT(hci_read_num_supported_iac_sync),
+ /* HCI_OP_READ_CURRENT_IAC_LAP */
+ HCI_INIT(hci_read_current_iac_lap_sync),
+ /* HCI_OP_SET_EVENT_FLT */
+ HCI_INIT(hci_clear_event_filter_sync),
+ /* HCI_OP_WRITE_CA_TIMEOUT */
+ HCI_INIT(hci_write_ca_timeout_sync),
+ {}
+};
+
+static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
+{
+ u8 mode = 0x01;
+
+ if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ return 0;
+
+ /* When SSP is available, then the host features page
+ * should also be available as well. However some
+ * controllers list the max_page as 0 as long as SSP
+ * has not been enabled. To achieve proper debugging
+ * output, force the minimum max_page to 1 at least.
+ */
+ hdev->max_page = 0x01;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
+ sizeof(mode), &mode, HCI_CMD_TIMEOUT);
+}
+
+static int hci_write_eir_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_eir cp;
+
+ if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ return 0;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(&cp, 0, sizeof(cp));
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
+{
+ u8 mode;
+
+ if (!lmp_inq_rssi_capable(hdev) &&
+ !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
+ return 0;
+
+ /* If Extended Inquiry Result events are supported, then
+ * they are clearly preferred over Inquiry Result with RSSI
+ * events.
+ */
+ mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
+ sizeof(mode), &mode, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
+{
+ if (!lmp_inq_tx_pwr_capable(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
+{
+ struct hci_cp_read_local_ext_features cp;
+
+ if (!lmp_ext_feat_capable(hdev))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.page = page;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
+{
+ return hci_read_local_ext_features_sync(hdev, 0x01);
+}
+
+/* HCI Controller init stage 2 command sequence */
+static const struct hci_init_stage hci_init2[] = {
+ /* HCI_OP_READ_LOCAL_COMMANDS */
+ HCI_INIT(hci_read_local_cmds_sync),
+ /* HCI_OP_WRITE_SSP_MODE */
+ HCI_INIT(hci_write_ssp_mode_1_sync),
+ /* HCI_OP_WRITE_EIR */
+ HCI_INIT(hci_write_eir_sync),
+ /* HCI_OP_WRITE_INQUIRY_MODE */
+ HCI_INIT(hci_write_inquiry_mode_sync),
+ /* HCI_OP_READ_INQ_RSP_TX_POWER */
+ HCI_INIT(hci_read_inq_rsp_tx_power_sync),
+ /* HCI_OP_READ_LOCAL_EXT_FEATURES */
+ HCI_INIT(hci_read_local_ext_features_1_sync),
+ /* HCI_OP_WRITE_AUTH_ENABLE */
+ HCI_INIT(hci_write_auth_enable_sync),
+ {}
+};
+
+/* Read LE Buffer Size */
+static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Local Supported Features */
+static int hci_le_read_local_features_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Supported States */
+static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* LE Controller init stage 2 command sequence */
+static const struct hci_init_stage le_init2[] = {
+ /* HCI_OP_LE_READ_BUFFER_SIZE */
+ HCI_INIT(hci_le_read_buffer_size_sync),
+ /* HCI_OP_LE_READ_LOCAL_FEATURES */
+ HCI_INIT(hci_le_read_local_features_sync),
+ /* HCI_OP_LE_READ_SUPPORTED_STATES */
+ HCI_INIT(hci_le_read_supported_states_sync),
+ {}
+};
+
+static int hci_init2_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ if (hdev->dev_type == HCI_AMP)
+ return hci_init_stage_sync(hdev, amp_init2);
+
+ if (lmp_bredr_capable(hdev)) {
+ err = hci_init_stage_sync(hdev, br_init2);
+ if (err)
+ return err;
+ } else {
+ hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
+ }
+
+ if (lmp_le_capable(hdev)) {
+ err = hci_init_stage_sync(hdev, le_init2);
+ if (err)
+ return err;
+ /* LE-only controllers have LE implicitly enabled */
+ if (!lmp_bredr_capable(hdev))
+ hci_dev_set_flag(hdev, HCI_LE_ENABLED);
+ }
+
+ return hci_init_stage_sync(hdev, hci_init2);
+}
+
+static int hci_set_event_mask_sync(struct hci_dev *hdev)
+{
+ /* The second byte is 0xff instead of 0x9f (two reserved bits
+ * disabled) since a Broadcom 1.2 dongle doesn't respond to the
+ * command otherwise.
+ */
+ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ /* CSR 1.1 dongles does not accept any bitfield so don't try to set
+ * any event mask for pre 1.2 devices.
+ */
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return 0;
+
+ if (lmp_bredr_capable(hdev)) {
+ events[4] |= 0x01; /* Flow Specification Complete */
+
+ /* Don't set Disconnect Complete when suspended as that
+ * would wakeup the host when disconnecting due to
+ * suspend.
+ */
+ if (hdev->suspended)
+ events[0] &= 0xef;
+ } else {
+ /* Use a different default for LE-only devices */
+ memset(events, 0, sizeof(events));
+ events[1] |= 0x20; /* Command Complete */
+ events[1] |= 0x40; /* Command Status */
+ events[1] |= 0x80; /* Hardware Error */
+
+ /* If the controller supports the Disconnect command, enable
+ * the corresponding event. In addition enable packet flow
+ * control related events.
+ */
+ if (hdev->commands[0] & 0x20) {
+ /* Don't set Disconnect Complete when suspended as that
+ * would wakeup the host when disconnecting due to
+ * suspend.
+ */
+ if (!hdev->suspended)
+ events[0] |= 0x10; /* Disconnection Complete */
+ events[2] |= 0x04; /* Number of Completed Packets */
+ events[3] |= 0x02; /* Data Buffer Overflow */
+ }
+
+ /* If the controller supports the Read Remote Version
+ * Information command, enable the corresponding event.
+ */
+ if (hdev->commands[2] & 0x80)
+ events[1] |= 0x08; /* Read Remote Version Information
+ * Complete
+ */
+
+ if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
+ events[0] |= 0x80; /* Encryption Change */
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
+ }
+ }
+
+ if (lmp_inq_rssi_capable(hdev) ||
+ test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+
+ if (lmp_ext_feat_capable(hdev))
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+
+ if (lmp_esco_capable(hdev)) {
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
+ }
+
+ if (lmp_sniffsubr_capable(hdev))
+ events[5] |= 0x20; /* Sniff Subrating */
+
+ if (lmp_pause_enc_capable(hdev))
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+ if (lmp_ext_inq_capable(hdev))
+ events[5] |= 0x40; /* Extended Inquiry Result */
+
+ if (lmp_no_flush_capable(hdev))
+ events[7] |= 0x01; /* Enhanced Flush Complete */
+
+ if (lmp_lsto_capable(hdev))
+ events[6] |= 0x80; /* Link Supervision Timeout Changed */
+
+ if (lmp_ssp_capable(hdev)) {
+ events[6] |= 0x01; /* IO Capability Request */
+ events[6] |= 0x02; /* IO Capability Response */
+ events[6] |= 0x04; /* User Confirmation Request */
+ events[6] |= 0x08; /* User Passkey Request */
+ events[6] |= 0x10; /* Remote OOB Data Request */
+ events[6] |= 0x20; /* Simple Pairing Complete */
+ events[7] |= 0x04; /* User Passkey Notification */
+ events[7] |= 0x08; /* Keypress Notification */
+ events[7] |= 0x10; /* Remote Host Supported
+ * Features Notification
+ */
+ }
+
+ if (lmp_le_capable(hdev))
+ events[7] |= 0x20; /* LE Meta-Event */
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
+ sizeof(events), events, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_read_stored_link_key cp;
+
+ if (!(hdev->commands[6] & 0x20) ||
+ test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.read_all = 0x01;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_setup_link_policy_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_def_link_policy cp;
+ u16 link_policy = 0;
+
+ if (!(hdev->commands[5] & 0x10))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (lmp_rswitch_capable(hdev))
+ link_policy |= HCI_LP_RSWITCH;
+ if (lmp_hold_capable(hdev))
+ link_policy |= HCI_LP_HOLD;
+ if (lmp_sniff_capable(hdev))
+ link_policy |= HCI_LP_SNIFF;
+ if (lmp_park_capable(hdev))
+ link_policy |= HCI_LP_PARK;
+
+ cp.policy = cpu_to_le16(link_policy);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[8] & 0x01))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[18] & 0x04) ||
+ test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
+{
+ /* Some older Broadcom based Bluetooth 1.2 controllers do not
+ * support the Read Page Scan Type command. Check support for
+ * this command in the bit mask of supported commands.
+ */
+ if (!(hdev->commands[13] & 0x01))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read features beyond page 1 if available */
+static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
+{
+ u8 page;
+ int err;
+
+ if (!lmp_ext_feat_capable(hdev))
+ return 0;
+
+ for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
+ page++) {
+ err = hci_read_local_ext_features_sync(hdev, page);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* HCI Controller init stage 3 command sequence */
+static const struct hci_init_stage hci_init3[] = {
+ /* HCI_OP_SET_EVENT_MASK */
+ HCI_INIT(hci_set_event_mask_sync),
+ /* HCI_OP_READ_STORED_LINK_KEY */
+ HCI_INIT(hci_read_stored_link_key_sync),
+ /* HCI_OP_WRITE_DEF_LINK_POLICY */
+ HCI_INIT(hci_setup_link_policy_sync),
+ /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
+ HCI_INIT(hci_read_page_scan_activity_sync),
+ /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
+ HCI_INIT(hci_read_def_err_data_reporting_sync),
+ /* HCI_OP_READ_PAGE_SCAN_TYPE */
+ HCI_INIT(hci_read_page_scan_type_sync),
+ /* HCI_OP_READ_LOCAL_EXT_FEATURES */
+ HCI_INIT(hci_read_local_ext_features_all_sync),
+ {}
+};
+
+static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
+{
+ u8 events[8];
+
+ if (!lmp_le_capable(hdev))
+ return 0;
+
+ memset(events, 0, sizeof(events));
+
+ if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
+ events[0] |= 0x10; /* LE Long Term Key Request */
+
+ /* If controller supports the Connection Parameters Request
+ * Link Layer Procedure, enable the corresponding event.
+ */
+ if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
+ /* LE Remote Connection Parameter Request */
+ events[0] |= 0x20;
+
+ /* If the controller supports the Data Length Extension
+ * feature, enable the corresponding event.
+ */
+ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
+ events[0] |= 0x40; /* LE Data Length Change */
+
+ /* If the controller supports LL Privacy feature, enable
+ * the corresponding event.
+ */
+ if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
+ events[1] |= 0x02; /* LE Enhanced Connection Complete */
+
+ /* If the controller supports Extended Scanner Filter
+ * Policies, enable the corresponding event.
+ */
+ if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
+ events[1] |= 0x04; /* LE Direct Advertising Report */
+
+ /* If the controller supports Channel Selection Algorithm #2
+ * feature, enable the corresponding event.
+ */
+ if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
+ events[2] |= 0x08; /* LE Channel Selection Algorithm */
+
+ /* If the controller supports the LE Set Scan Enable command,
+ * enable the corresponding advertising report event.
+ */
+ if (hdev->commands[26] & 0x08)
+ events[0] |= 0x02; /* LE Advertising Report */
+
+ /* If the controller supports the LE Create Connection
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[26] & 0x10)
+ events[0] |= 0x01; /* LE Connection Complete */
+
+ /* If the controller supports the LE Connection Update
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[27] & 0x04)
+ events[0] |= 0x04; /* LE Connection Update Complete */
+
+ /* If the controller supports the LE Read Remote Used Features
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[27] & 0x20)
+ /* LE Read Remote Used Features Complete */
+ events[0] |= 0x08;
+
+ /* If the controller supports the LE Read Local P-256
+ * Public Key command, enable the corresponding event.
+ */
+ if (hdev->commands[34] & 0x02)
+ /* LE Read Local P-256 Public Key Complete */
+ events[0] |= 0x80;
+
+ /* If the controller supports the LE Generate DHKey
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[34] & 0x04)
+ events[1] |= 0x01; /* LE Generate DHKey Complete */
+
+ /* If the controller supports the LE Set Default PHY or
+ * LE Set PHY commands, enable the corresponding event.
+ */
+ if (hdev->commands[35] & (0x20 | 0x40))
+ events[1] |= 0x08; /* LE PHY Update Complete */
+
+ /* If the controller supports LE Set Extended Scan Parameters
+ * and LE Set Extended Scan Enable commands, enable the
+ * corresponding event.
+ */
+ if (use_ext_scan(hdev))
+ events[1] |= 0x10; /* LE Extended Advertising Report */
+
+ /* If the controller supports the LE Extended Advertising
+ * command, enable the corresponding event.
+ */
+ if (ext_adv_capable(hdev))
+ events[2] |= 0x02; /* LE Advertising Set Terminated */
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
+ sizeof(events), events, HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Advertising Channel TX Power */
+static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
+{
+ if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
+ /* HCI TS spec forbids mixing of legacy and extended
+ * advertising commands wherein READ_ADV_TX_POWER is
+ * also included. So do not call it if extended adv
+ * is supported otherwise controller will return
+ * COMMAND_DISALLOWED for extended commands.
+ */
+ return __hci_cmd_sync_status(hdev,
+ HCI_OP_LE_READ_ADV_TX_POWER,
+ 0, NULL, HCI_CMD_TIMEOUT);
+ }
+
+ return 0;
+}
+
+/* Read LE Min/Max Tx Power*/
+static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[38] & 0x80))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Accept List Size */
+static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[26] & 0x40))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Clear LE Accept List */
+static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[26] & 0x80))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Resolving List Size */
+static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[34] & 0x40))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Clear LE Resolving List */
+static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[34] & 0x20))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Set RPA timeout */
+static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
+{
+ __le16 timeout = cpu_to_le16(hdev->rpa_timeout);
+
+ if (!(hdev->commands[35] & 0x04))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
+ sizeof(timeout), &timeout,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Maximum Data Length */
+static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Suggested Default Data Length */
+static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Number of Supported Advertising Sets */
+static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
+{
+ if (!ext_adv_capable(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev,
+ HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Write LE Host Supported */
+static int hci_set_le_support_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_le_host_supported cp;
+
+ /* LE-only devices do not support explicit enablement */
+ if (!lmp_bredr_capable(hdev))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+ cp.le = 0x01;
+ cp.simul = 0x00;
+ }
+
+ if (cp.le == lmp_host_le_capable(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+/* LE Controller init stage 3 command sequence */
+static const struct hci_init_stage le_init3[] = {
+ /* HCI_OP_LE_SET_EVENT_MASK */
+ HCI_INIT(hci_le_set_event_mask_sync),
+ /* HCI_OP_LE_READ_ADV_TX_POWER */
+ HCI_INIT(hci_le_read_adv_tx_power_sync),
+ /* HCI_OP_LE_READ_TRANSMIT_POWER */
+ HCI_INIT(hci_le_read_tx_power_sync),
+ /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
+ HCI_INIT(hci_le_read_accept_list_size_sync),
+ /* HCI_OP_LE_CLEAR_ACCEPT_LIST */
+ HCI_INIT(hci_le_clear_accept_list_sync),
+ /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
+ HCI_INIT(hci_le_read_resolv_list_size_sync),
+ /* HCI_OP_LE_CLEAR_RESOLV_LIST */
+ HCI_INIT(hci_le_clear_resolv_list_sync),
+ /* HCI_OP_LE_SET_RPA_TIMEOUT */
+ HCI_INIT(hci_le_set_rpa_timeout_sync),
+ /* HCI_OP_LE_READ_MAX_DATA_LEN */
+ HCI_INIT(hci_le_read_max_data_len_sync),
+ /* HCI_OP_LE_READ_DEF_DATA_LEN */
+ HCI_INIT(hci_le_read_def_data_len_sync),
+ /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
+ HCI_INIT(hci_le_read_num_support_adv_sets_sync),
+ /* HCI_OP_WRITE_LE_HOST_SUPPORTED */
+ HCI_INIT(hci_set_le_support_sync),
+ {}
+};
+
+static int hci_init3_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ err = hci_init_stage_sync(hdev, hci_init3);
+ if (err)
+ return err;
+
+ if (lmp_le_capable(hdev))
+ return hci_init_stage_sync(hdev, le_init3);
+
+ return 0;
+}
+
+static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_delete_stored_link_key cp;
+
+ /* Some Broadcom based Bluetooth controllers do not support the
+ * Delete Stored Link Key command. They are clearly indicating its
+ * absence in the bit mask of supported commands.
+ *
+ * Check the supported commands and only if the command is marked
+ * as supported send it. If not supported assume that the controller
+ * does not have actual support for stored link keys which makes this
+ * command redundant anyway.
+ *
+ * Some controllers indicate that they support handling deleting
+ * stored link keys, but they don't. The quirk lets a driver
+ * just disable this command.
+ */
+ if (!(hdev->commands[6] & 0x80) ||
+ test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.delete_all = 0x01;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
+{
+ u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ bool changed = false;
+
+ /* Set event mask page 2 if the HCI command for it is supported */
+ if (!(hdev->commands[22] & 0x04))
+ return 0;
+
+ /* If Connectionless Peripheral Broadcast central role is supported
+ * enable all necessary events for it.
+ */
+ if (lmp_cpb_central_capable(hdev)) {
+ events[1] |= 0x40; /* Triggered Clock Capture */
+ events[1] |= 0x80; /* Synchronization Train Complete */
+ events[2] |= 0x10; /* Peripheral Page Response Timeout */
+ events[2] |= 0x20; /* CPB Channel Map Change */
+ changed = true;
+ }
+
+ /* If Connectionless Peripheral Broadcast peripheral role is supported
+ * enable all necessary events for it.
+ */
+ if (lmp_cpb_peripheral_capable(hdev)) {
+ events[2] |= 0x01; /* Synchronization Train Received */
+ events[2] |= 0x02; /* CPB Receive */
+ events[2] |= 0x04; /* CPB Timeout */
+ events[2] |= 0x08; /* Truncated Page Complete */
+ changed = true;
+ }
+
+ /* Enable Authenticated Payload Timeout Expired event if supported */
+ if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
+ events[2] |= 0x80;
+ changed = true;
+ }
+
+ /* Some Broadcom based controllers indicate support for Set Event
+ * Mask Page 2 command, but then actually do not support it. Since
+ * the default value is all bits set to zero, the command is only
+ * required if the event mask has to be changed. In case no change
+ * to the event mask is needed, skip this command.
+ */
+ if (!changed)
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
+ sizeof(events), events, HCI_CMD_TIMEOUT);
+}
+
+/* Read local codec list if the HCI command is supported */
+static int hci_read_local_codecs_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[29] & 0x20))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read local pairing options if the HCI command is supported */
+static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[41] & 0x08))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Get MWS transport configuration if the HCI command is supported */
+static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[30] & 0x08))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Check for Synchronization Train support */
+static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
+{
+ if (!lmp_sync_train_capable(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Enable Secure Connections if supported and configured */
+static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
+{
+ u8 support = 0x01;
+
+ if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
+ !bredr_sc_enabled(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
+ sizeof(support), &support,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Set erroneous data reporting if supported to the wideband speech
+ * setting value
+ */
+static int hci_set_err_data_report_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_def_err_data_reporting cp;
+ bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
+
+ if (!(hdev->commands[18] & 0x08) ||
+ test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
+ return 0;
+
+ if (enabled == hdev->err_data_reporting)
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
+ ERR_DATA_REPORTING_DISABLED;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static const struct hci_init_stage hci_init4[] = {
+ /* HCI_OP_DELETE_STORED_LINK_KEY */
+ HCI_INIT(hci_delete_stored_link_key_sync),
+ /* HCI_OP_SET_EVENT_MASK_PAGE_2 */
+ HCI_INIT(hci_set_event_mask_page_2_sync),
+ /* HCI_OP_READ_LOCAL_CODECS */
+ HCI_INIT(hci_read_local_codecs_sync),
+ /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
+ HCI_INIT(hci_read_local_pairing_opts_sync),
+ /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
+ HCI_INIT(hci_get_mws_transport_config_sync),
+ /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
+ HCI_INIT(hci_read_sync_train_params_sync),
+ /* HCI_OP_WRITE_SC_SUPPORT */
+ HCI_INIT(hci_write_sc_support_1_sync),
+ /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
+ HCI_INIT(hci_set_err_data_report_sync),
+ {}
+};
+
+/* Set Suggested Default Data Length to maximum if supported */
+static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_le_write_def_data_len cp;
+
+ if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
+ cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+/* Set Default PHY parameters if command is supported */
+static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_le_set_default_phy cp;
+
+ if (!(hdev->commands[35] & 0x20))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.all_phys = 0x00;
+ cp.tx_phys = hdev->le_tx_def_phys;
+ cp.rx_phys = hdev->le_rx_def_phys;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static const struct hci_init_stage le_init4[] = {
+ /* HCI_OP_LE_WRITE_DEF_DATA_LEN */
+ HCI_INIT(hci_le_set_write_def_data_len_sync),
+ /* HCI_OP_LE_SET_DEFAULT_PHY */
+ HCI_INIT(hci_le_set_default_phy_sync),
+ {}
+};
+
+static int hci_init4_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ err = hci_init_stage_sync(hdev, hci_init4);
+ if (err)
+ return err;
+
+ if (lmp_le_capable(hdev))
+ return hci_init_stage_sync(hdev, le_init4);
+
+ return 0;
+}
+
+static int hci_init_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ err = hci_init1_sync(hdev);
+ if (err < 0)
+ return err;
+
+ if (hci_dev_test_flag(hdev, HCI_SETUP))
+ hci_debugfs_create_basic(hdev);
+
+ err = hci_init2_sync(hdev);
+ if (err < 0)
+ return err;
+
+ /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
+ * BR/EDR/LE type controllers. AMP controllers only need the
+ * first two stages of init.
+ */
+ if (hdev->dev_type != HCI_PRIMARY)
+ return 0;
+
+ err = hci_init3_sync(hdev);
+ if (err < 0)
+ return err;
+
+ err = hci_init4_sync(hdev);
+ if (err < 0)
+ return err;
+
+ /* This function is only called when the controller is actually in
+ * configured state. When the controller is marked as unconfigured,
+ * this initialization procedure is not run.
+ *
+ * It means that it is possible that a controller runs through its
+ * setup phase and then discovers missing settings. If that is the
+ * case, then this function will not be called. It then will only
+ * be called during the config phase.
+ *
+ * So only when in setup phase or config phase, create the debugfs
+ * entries and register the SMP channels.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG))
+ return 0;
+
+ hci_debugfs_create_common(hdev);
+
+ if (lmp_bredr_capable(hdev))
+ hci_debugfs_create_bredr(hdev);
+
+ if (lmp_le_capable(hdev))
+ hci_debugfs_create_le(hdev);
+
+ return 0;
+}
+
+int hci_dev_open_sync(struct hci_dev *hdev)
+{
+ int ret = 0;
+
+ bt_dev_dbg(hdev, "");
+
+ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG)) {
+ /* Check for rfkill but allow the HCI setup stage to
+ * proceed (which in itself doesn't cause any RF activity).
+ */
+ if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
+ ret = -ERFKILL;
+ goto done;
+ }
+
+ /* Check for valid public address or a configured static
+ * random address, but let the HCI setup proceed to
+ * be able to determine if there is a public address
+ * or not.
+ *
+ * In case of user channel usage, it is not important
+ * if a public address or static random address is
+ * available.
+ *
+ * This check is only valid for BR/EDR controllers
+ * since AMP controllers do not have an address.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hdev->dev_type == HCI_PRIMARY &&
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY)) {
+ ret = -EADDRNOTAVAIL;
+ goto done;
+ }
+ }
+
+ if (test_bit(HCI_UP, &hdev->flags)) {
+ ret = -EALREADY;
+ goto done;
+ }
+
+ if (hdev->open(hdev)) {
+ ret = -EIO;
+ goto done;
+ }
+
+ set_bit(HCI_RUNNING, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_OPEN);
+
+ atomic_set(&hdev->cmd_cnt, 1);
+ set_bit(HCI_INIT, &hdev->flags);
+
+ if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+ test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ bool invalid_bdaddr;
+
+ hci_sock_dev_event(hdev, HCI_DEV_SETUP);
+
+ if (hdev->setup)
+ ret = hdev->setup(hdev);
+
+ /* The transport driver can set the quirk to mark the
+ * BD_ADDR invalid before creating the HCI device or in
+ * its setup callback.
+ */
+ invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
+ &hdev->quirks);
+
+ if (ret)
+ goto setup_failed;
+
+ if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
+ if (!bacmp(&hdev->public_addr, BDADDR_ANY))
+ hci_dev_get_bd_addr_from_property(hdev);
+
+ if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
+ hdev->set_bdaddr) {
+ ret = hdev->set_bdaddr(hdev,
+ &hdev->public_addr);
+
+ /* If setting of the BD_ADDR from the device
+ * property succeeds, then treat the address
+ * as valid even if the invalid BD_ADDR
+ * quirk indicates otherwise.
+ */
+ if (!ret)
+ invalid_bdaddr = false;
+ }
+ }
+
+setup_failed:
+ /* The transport driver can set these quirks before
+ * creating the HCI device or in its setup callback.
+ *
+ * For the invalid BD_ADDR quirk it is possible that
+ * it becomes a valid address if the bootloader does
+ * provide it (see above).
+ *
+ * In case any of them is set, the controller has to
+ * start up as unconfigured.
+ */
+ if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
+ invalid_bdaddr)
+ hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
+
+ /* For an unconfigured controller it is required to
+ * read at least the version information provided by
+ * the Read Local Version Information command.
+ *
+ * If the set_bdaddr driver callback is provided, then
+ * also the original Bluetooth public device address
+ * will be read using the Read BD Address command.
+ */
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+ ret = hci_unconf_init_sync(hdev);
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
+ /* If public address change is configured, ensure that
+ * the address gets programmed. If the driver does not
+ * support changing the public address, fail the power
+ * on procedure.
+ */
+ if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
+ hdev->set_bdaddr)
+ ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
+ else
+ ret = -EADDRNOTAVAIL;
+ }
+
+ if (!ret) {
+ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ ret = hci_init_sync(hdev);
+ if (!ret && hdev->post_init)
+ ret = hdev->post_init(hdev);
+ }
+ }
+
+ /* If the HCI Reset command is clearing all diagnostic settings,
+ * then they need to be reprogrammed after the init procedure
+ * completed.
+ */
+ if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
+ ret = hdev->set_diag(hdev, true);
+
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ msft_do_open(hdev);
+ aosp_do_open(hdev);
+ }
+
+ clear_bit(HCI_INIT, &hdev->flags);
+
+ if (!ret) {
+ hci_dev_hold(hdev);
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+ hci_adv_instances_set_rpa_expired(hdev, true);
+ set_bit(HCI_UP, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_UP);
+ hci_leds_update_powered(hdev, true);
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG) &&
+ !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_dev_test_flag(hdev, HCI_MGMT) &&
+ hdev->dev_type == HCI_PRIMARY) {
+ ret = hci_powered_update_sync(hdev);
+ }
+ } else {
+ /* Init failed, cleanup */
+ flush_work(&hdev->tx_work);
+
+ /* Since hci_rx_work() is possible to awake new cmd_work
+ * it should be flushed first to avoid unexpected call of
+ * hci_cmd_work()
+ */
+ flush_work(&hdev->rx_work);
+ flush_work(&hdev->cmd_work);
+
+ skb_queue_purge(&hdev->cmd_q);
+ skb_queue_purge(&hdev->rx_q);
+
+ if (hdev->flush)
+ hdev->flush(hdev);
+
+ if (hdev->sent_cmd) {
+ kfree_skb(hdev->sent_cmd);
+ hdev->sent_cmd = NULL;
+ }
+
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+
+ hdev->close(hdev);
+ hdev->flags &= BIT(HCI_RAW);
+ }
+
+done:
+ return ret;
+}
+
+/* This function requires the caller holds hdev->lock */
+static void hci_pend_le_actions_clear(struct hci_dev *hdev)
+{
+ struct hci_conn_params *p;
+
+ list_for_each_entry(p, &hdev->le_conn_params, list) {
+ if (p->conn) {
+ hci_conn_drop(p->conn);
+ hci_conn_put(p->conn);
+ p->conn = NULL;
+ }
+ list_del_init(&p->action);
+ }
+
+ BT_DBG("All LE pending actions cleared");
+}
+
+int hci_dev_close_sync(struct hci_dev *hdev)
+{
+ bool auto_off;
+ int err = 0;
+
+ bt_dev_dbg(hdev, "");
+
+ cancel_delayed_work(&hdev->power_off);
+ cancel_delayed_work(&hdev->ncmd_timer);
+
+ hci_request_cancel_all(hdev);
+
+ if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ /* Execute vendor specific shutdown routine */
+ if (hdev->shutdown)
+ err = hdev->shutdown(hdev);
+ }
+
+ if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ return err;
+ }
+
+ hci_leds_update_powered(hdev, false);
+
+ /* Flush RX and TX works */
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->rx_work);
+
+ if (hdev->discov_timeout > 0) {
+ hdev->discov_timeout = 0;
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+ }
+
+ if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
+ cancel_delayed_work(&hdev->service_cache);
+
+ if (hci_dev_test_flag(hdev, HCI_MGMT)) {
+ struct adv_info *adv_instance;
+
+ cancel_delayed_work_sync(&hdev->rpa_expired);
+
+ list_for_each_entry(adv_instance, &hdev->adv_instances, list)
+ cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
+ }
+
+ /* Avoid potential lockdep warnings from the *_flush() calls by
+ * ensuring the workqueue is empty up front.
+ */
+ drain_workqueue(hdev->workqueue);
+
+ hci_dev_lock(hdev);
+
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
+ auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
+
+ if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_dev_test_flag(hdev, HCI_MGMT))
+ __mgmt_power_off(hdev);
+
+ hci_inquiry_cache_flush(hdev);
+ hci_pend_le_actions_clear(hdev);
+ hci_conn_hash_flush(hdev);
+ hci_dev_unlock(hdev);
+
+ smp_unregister(hdev);
+
+ hci_sock_dev_event(hdev, HCI_DEV_DOWN);
+
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ aosp_do_close(hdev);
+ msft_do_close(hdev);
+ }
+
+ if (hdev->flush)
+ hdev->flush(hdev);
+
+ /* Reset device */
+ skb_queue_purge(&hdev->cmd_q);
+ atomic_set(&hdev->cmd_cnt, 1);
+ if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
+ !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+ set_bit(HCI_INIT, &hdev->flags);
+ hci_reset_sync(hdev);
+ clear_bit(HCI_INIT, &hdev->flags);
+ }
+
+ /* flush cmd work */
+ flush_work(&hdev->cmd_work);
+
+ /* Drop queues */
+ skb_queue_purge(&hdev->rx_q);
+ skb_queue_purge(&hdev->cmd_q);
+ skb_queue_purge(&hdev->raw_q);
+
+ /* Drop last sent command */
+ if (hdev->sent_cmd) {
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ kfree_skb(hdev->sent_cmd);
+ hdev->sent_cmd = NULL;
+ }
+
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+
+ /* After this point our queues are empty and no tasks are scheduled. */
+ hdev->close(hdev);
+
+ /* Clear flags */
+ hdev->flags &= BIT(HCI_RAW);
+ hci_dev_clear_volatile_flags(hdev);
+
+ /* Controller radio is available but is currently powered down */
+ hdev->amp_status = AMP_STATUS_POWERED_DOWN;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+ bacpy(&hdev->random_addr, BDADDR_ANY);
+
+ hci_dev_put(hdev);
+ return err;
+}
+
+/* This function perform power on HCI command sequence as follows:
+ *
+ * If controller is already up (HCI_UP) performs hci_powered_update_sync
+ * sequence otherwise run hci_dev_open_sync which will follow with
+ * hci_powered_update_sync after the init sequence is completed.
+ */
+static int hci_power_on_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ if (test_bit(HCI_UP, &hdev->flags) &&
+ hci_dev_test_flag(hdev, HCI_MGMT) &&
+ hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
+ cancel_delayed_work(&hdev->power_off);
+ return hci_powered_update_sync(hdev);
+ }
+
+ err = hci_dev_open_sync(hdev);
+ if (err < 0)
+ return err;
+
+ /* During the HCI setup phase, a few error conditions are
+ * ignored and they need to be checked now. If they are still
+ * valid, it is important to return the device back off.
+ */
+ if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
+ hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
+ (hdev->dev_type == HCI_PRIMARY &&
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
+ hci_dev_close_sync(hdev);
+ } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
+ queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
+ HCI_AUTO_OFF_TIMEOUT);
+ }
+
+ if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
+ /* For unconfigured devices, set the HCI_RAW flag
+ * so that userspace can easily identify them.
+ */
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+ set_bit(HCI_RAW, &hdev->flags);
+
+ /* For fully configured devices, this will send
+ * the Index Added event. For unconfigured devices,
+ * it will send Unconfigued Index Added event.
+ *
+ * Devices with HCI_QUIRK_RAW_DEVICE are ignored
+ * and no event will be send.
+ */
+ mgmt_index_added(hdev);
+ } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
+ /* When the controller is now configured, then it
+ * is important to clear the HCI_RAW flag.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+ clear_bit(HCI_RAW, &hdev->flags);
+
+ /* Powering on the controller with HCI_CONFIG set only
+ * happens with the transition from unconfigured to
+ * configured. This will send the Index Added event.
+ */
+ mgmt_index_added(hdev);
+ }
+
+ return 0;
+}
+
+static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
+{
+ struct hci_cp_remote_name_req_cancel cp;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, addr);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_stop_discovery_sync(struct hci_dev *hdev)
+{
+ struct discovery_state *d = &hdev->discovery;
+ struct inquiry_entry *e;
+ int err;
+
+ bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
+
+ if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
+ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
+ 0, NULL, HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
+ cancel_delayed_work(&hdev->le_scan_disable);
+ cancel_delayed_work(&hdev->le_scan_restart);
+
+ err = hci_scan_disable_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ } else {
+ err = hci_scan_disable_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ /* Resume advertising if it was paused */
+ if (use_ll_privacy(hdev))
+ hci_resume_advertising_sync(hdev);
+
+ /* No further actions needed for LE-only discovery */
+ if (d->type == DISCOV_TYPE_LE)
+ return 0;
+
+ if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+ NAME_PENDING);
+ if (!e)
+ return 0;
+
+ return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
+ }
+
+ return 0;
+}
+
+static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle,
+ u8 reason)
+{
+ struct hci_cp_disconn_phy_link cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.phy_handle = HCI_PHY_HANDLE(handle);
+ cp.reason = reason;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+{
+ struct hci_cp_disconnect cp;
+
+ if (conn->type == AMP_LINK)
+ return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.reason = reason;
+
+ /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not
+ * suspending.
+ */
+ if (!hdev->suspended)
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
+ sizeof(cp), &cp,
+ HCI_EV_DISCONN_COMPLETE,
+ HCI_CMD_TIMEOUT, NULL);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
+ struct hci_conn *conn)
+{
+ if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
+ 6, &conn->dst, HCI_CMD_TIMEOUT);
+}
+
+static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ if (conn->type == LE_LINK)
+ return hci_le_connect_cancel_sync(hdev, conn);
+
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
+ 6, &conn->dst, HCI_CMD_TIMEOUT);
+}
+
+static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+{
+ struct hci_cp_reject_sync_conn_req cp;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.reason = reason;
+
+ /* SCO rejection has its own limited set of
+ * allowed error values (0x0D-0x0F).
+ */
+ if (reason < 0x0d || reason > 0x0f)
+ cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+{
+ struct hci_cp_reject_conn_req cp;
+
+ if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
+ return hci_reject_sco_sync(hdev, conn, reason);
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.reason = reason;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+{
+ switch (conn->state) {
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ return hci_disconnect_sync(hdev, conn, reason);
+ case BT_CONNECT:
+ return hci_connect_cancel_sync(hdev, conn);
+ case BT_CONNECT2:
+ return hci_reject_conn_sync(hdev, conn, reason);
+ default:
+ conn->state = BT_CLOSED;
+ break;
+ }
+
+ return 0;
+}
+
+static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
+{
+ struct hci_conn *conn, *tmp;
+ int err;
+
+ list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
+ err = hci_abort_conn_sync(hdev, conn, reason);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/* This function perform power off HCI command sequence as follows:
+ *
+ * Clear Advertising
+ * Stop Discovery
+ * Disconnect all connections
+ * hci_dev_close_sync
+ */
+static int hci_power_off_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* If controller is already down there is nothing to do */
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return 0;
+
+ if (test_bit(HCI_ISCAN, &hdev->flags) ||
+ test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = hci_write_scan_enable_sync(hdev, 0x00);
+ if (err)
+ return err;
+ }
+
+ err = hci_clear_adv_sync(hdev, NULL, false);
+ if (err)
+ return err;
+
+ err = hci_stop_discovery_sync(hdev);
+ if (err)
+ return err;
+
+ /* Terminated due to Power Off */
+ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
+ if (err)
+ return err;
+
+ return hci_dev_close_sync(hdev);
+}
+
+int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
+{
+ if (val)
+ return hci_power_on_sync(hdev);
+
+ return hci_power_off_sync(hdev);
+}
+
+static int hci_write_iac_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_current_iac_lap cp;
+
+ if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
+ /* Limited discoverable mode */
+ cp.num_iac = min_t(u8, hdev->num_iac, 2);
+ cp.iac_lap[0] = 0x00; /* LIAC */
+ cp.iac_lap[1] = 0x8b;
+ cp.iac_lap[2] = 0x9e;
+ cp.iac_lap[3] = 0x33; /* GIAC */
+ cp.iac_lap[4] = 0x8b;
+ cp.iac_lap[5] = 0x9e;
+ } else {
+ /* General discoverable mode */
+ cp.num_iac = 1;
+ cp.iac_lap[0] = 0x33; /* GIAC */
+ cp.iac_lap[1] = 0x8b;
+ cp.iac_lap[2] = 0x9e;
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
+ (cp.num_iac * 3) + 1, &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+int hci_update_discoverable_sync(struct hci_dev *hdev)
+{
+ int err = 0;
+
+ if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+ err = hci_write_iac_sync(hdev);
+ if (err)
+ return err;
+
+ err = hci_update_scan_sync(hdev);
+ if (err)
+ return err;
+
+ err = hci_update_class_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ /* Advertising instances don't use the global discoverable setting, so
+ * only update AD if advertising was enabled using Set Advertising.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
+ err = hci_update_adv_data_sync(hdev, 0x00);
+ if (err)
+ return err;
+
+ /* Discoverable mode affects the local advertising
+ * address in limited privacy mode.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
+ if (ext_adv_capable(hdev))
+ err = hci_start_ext_adv_sync(hdev, 0x00);
+ else
+ err = hci_enable_advertising_sync(hdev);
+ }
+ }
+
+ return err;
+}
+
+static int update_discoverable_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_discoverable_sync(hdev);
+}
+
+int hci_update_discoverable(struct hci_dev *hdev)
+{
+ /* Only queue if it would have any effect */
+ if (hdev_is_powered(hdev) &&
+ hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
+ hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
+ return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
+ NULL);
+
+ return 0;
+}
+
+int hci_update_connectable_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ err = hci_update_scan_sync(hdev);
+ if (err)
+ return err;
+
+ /* If BR/EDR is not enabled and we disable advertising as a
+ * by-product of disabling connectable, we need to update the
+ * advertising flags.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
+
+ /* Update the advertising parameters if necessary */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ !list_empty(&hdev->adv_instances)) {
+ if (ext_adv_capable(hdev))
+ err = hci_start_ext_adv_sync(hdev,
+ hdev->cur_adv_instance);
+ else
+ err = hci_enable_advertising_sync(hdev);
+
+ if (err)
+ return err;
+ }
+
+ return hci_update_passive_scan_sync(hdev);
+}
+
+static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
+{
+ const u8 giac[3] = { 0x33, 0x8b, 0x9e };
+ const u8 liac[3] = { 0x00, 0x8b, 0x9e };
+ struct hci_cp_inquiry cp;
+
+ bt_dev_dbg(hdev, "");
+
+ if (hci_dev_test_flag(hdev, HCI_INQUIRY))
+ return 0;
+
+ hci_dev_lock(hdev);
+ hci_inquiry_cache_flush(hdev);
+ hci_dev_unlock(hdev);
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (hdev->discovery.limited)
+ memcpy(&cp.lap, liac, sizeof(cp.lap));
+ else
+ memcpy(&cp.lap, giac, sizeof(cp.lap));
+
+ cp.length = length;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
+{
+ u8 own_addr_type;
+ /* Accept list is not used for discovery */
+ u8 filter_policy = 0x00;
+ /* Default is to enable duplicates filter */
+ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ /* If controller is scanning, it means the passive scanning is
+ * running. Thus, we should temporarily stop it in order to set the
+ * discovery scanning parameters.
+ */
+ err = hci_scan_disable_sync(hdev);
+ if (err) {
+ bt_dev_err(hdev, "Unable to disable scanning: %d", err);
+ return err;
+ }
+
+ cancel_interleave_scan(hdev);
+
+ /* Pause advertising since active scanning disables address resolution
+ * which advertising depend on in order to generate its RPAs.
+ */
+ if (use_ll_privacy(hdev)) {
+ err = hci_pause_advertising_sync(hdev);
+ if (err) {
+ bt_dev_err(hdev, "pause advertising failed: %d", err);
+ goto failed;
+ }
+ }
+
+ /* Disable address resolution while doing active scanning since the
+ * accept list shall not be used and all reports shall reach the host
+ * anyway.
+ */
+ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
+ if (err) {
+ bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
+ err);
+ goto failed;
+ }
+
+ /* All active scans will be done with either a resolvable private
+ * address (when privacy feature has been enabled) or non-resolvable
+ * private address.
+ */
+ err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
+ &own_addr_type);
+ if (err < 0)
+ own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ if (hci_is_adv_monitoring(hdev)) {
+ /* Duplicate filter should be disabled when some advertisement
+ * monitor is activated, otherwise AdvMon can only receive one
+ * advertisement for one peer(*) during active scanning, and
+ * might report loss to these peers.
+ *
+ * Note that different controllers have different meanings of
+ * |duplicate|. Some of them consider packets with the same
+ * address as duplicate, and others consider packets with the
+ * same address and the same RSSI as duplicate. Although in the
+ * latter case we don't need to disable duplicate filter, but
+ * it is common to have active scanning for a short period of
+ * time, the power impact should be neglectable.
+ */
+ filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
+ }
+
+ err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
+ hdev->le_scan_window_discovery,
+ own_addr_type, filter_policy, filter_dup);
+ if (!err)
+ return err;
+
+failed:
+ /* Resume advertising if it was paused */
+ if (use_ll_privacy(hdev))
+ hci_resume_advertising_sync(hdev);
+
+ /* Resume passive scanning */
+ hci_update_passive_scan_sync(hdev);
+ return err;
+}
+
+static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
+ if (err)
+ return err;
+
+ return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
+}
+
+int hci_start_discovery_sync(struct hci_dev *hdev)
+{
+ unsigned long timeout;
+ int err;
+
+ bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
+
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_BREDR:
+ return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
+ case DISCOV_TYPE_INTERLEAVED:
+ /* When running simultaneous discovery, the LE scanning time
+ * should occupy the whole discovery time sine BR/EDR inquiry
+ * and LE scanning are scheduled by the controller.
+ *
+ * For interleaving discovery in comparison, BR/EDR inquiry
+ * and LE scanning are done sequentially with separate
+ * timeouts.
+ */
+ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+ &hdev->quirks)) {
+ timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+ /* During simultaneous discovery, we double LE scan
+ * interval. We must leave some time for the controller
+ * to do BR/EDR inquiry.
+ */
+ err = hci_start_interleaved_discovery_sync(hdev);
+ break;
+ }
+
+ timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
+ err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
+ break;
+ case DISCOV_TYPE_LE:
+ timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+ err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
+
+ /* When service discovery is used and the controller has a
+ * strict duplicate filter, it is important to remember the
+ * start and duration of the scan. This is required for
+ * restarting scanning during the discovery phase.
+ */
+ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
+ hdev->discovery.result_filtering) {
+ hdev->discovery.scan_start = jiffies;
+ hdev->discovery.scan_duration = timeout;
+ }
+
+ queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
+ timeout);
+ return 0;
+}
+
+static void hci_suspend_monitor_sync(struct hci_dev *hdev)
+{
+ switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ case HCI_ADV_MONITOR_EXT_MSFT:
+ msft_suspend_sync(hdev);
+ break;
+ default:
+ return;
+ }
+}
+
+/* This function disables discovery and mark it as paused */
+static int hci_pause_discovery_sync(struct hci_dev *hdev)
+{
+ int old_state = hdev->discovery.state;
+ int err;
+
+ /* If discovery already stopped/stopping/paused there nothing to do */
+ if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
+ hdev->discovery_paused)
+ return 0;
+
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+ err = hci_stop_discovery_sync(hdev);
+ if (err)
+ return err;
+
+ hdev->discovery_paused = true;
+ hdev->discovery_old_state = old_state;
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
+ return 0;
+}
+
+static int hci_update_event_filter_sync(struct hci_dev *hdev)
+{
+ struct bdaddr_list_with_flags *b;
+ u8 scan = SCAN_DISABLED;
+ bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
+ int err;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ /* Always clear event filter when starting */
+ hci_clear_event_filter_sync(hdev);
+
+ list_for_each_entry(b, &hdev->accept_list, list) {
+ if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
+ b->current_flags))
+ continue;
+
+ bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
+
+ err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
+ HCI_CONN_SETUP_ALLOW_BDADDR,
+ &b->bdaddr,
+ HCI_CONN_SETUP_AUTO_ON);
+ if (err)
+ bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
+ &b->bdaddr);
+ else
+ scan = SCAN_PAGE;
+ }
+
+ if (scan && !scanning)
+ hci_write_scan_enable_sync(hdev, scan);
+ else if (!scan && scanning)
+ hci_write_scan_enable_sync(hdev, scan);
+
+ return 0;
+}
+
+/* This function performs the HCI suspend procedures in the follow order:
+ *
+ * Pause discovery (active scanning/inquiry)
+ * Pause Directed Advertising/Advertising
+ * Disconnect all connections
+ * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
+ * otherwise:
+ * Update event mask (only set events that are allowed to wake up the host)
+ * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
+ * Update passive scanning (lower duty cycle)
+ * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
+ */
+int hci_suspend_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* If marked as suspended there nothing to do */
+ if (hdev->suspended)
+ return 0;
+
+ /* Mark device as suspended */
+ hdev->suspended = true;
+
+ /* Pause discovery if not already stopped */
+ hci_pause_discovery_sync(hdev);
+
+ /* Pause other advertisements */
+ hci_pause_advertising_sync(hdev);
+
+ /* Disable page scan if enabled */
+ if (test_bit(HCI_PSCAN, &hdev->flags))
+ hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
+
+ /* Suspend monitor filters */
+ hci_suspend_monitor_sync(hdev);
+
+ /* Prevent disconnects from causing scanning to be re-enabled */
+ hdev->scanning_paused = true;
+
+ /* Soft disconnect everything (power off) */
+ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
+ if (err) {
+ /* Set state to BT_RUNNING so resume doesn't notify */
+ hdev->suspend_state = BT_RUNNING;
+ hci_resume_sync(hdev);
+ return err;
+ }
+
+ /* Only configure accept list if disconnect succeeded and wake
+ * isn't being prevented.
+ */
+ if (!hdev->wakeup || !hdev->wakeup(hdev)) {
+ hdev->suspend_state = BT_SUSPEND_DISCONNECT;
+ return 0;
+ }
+
+ /* Unpause to take care of updating scanning params */
+ hdev->scanning_paused = false;
+
+ /* Update event mask so only the allowed event can wakeup the host */
+ hci_set_event_mask_sync(hdev);
+
+ /* Enable event filter for paired devices */
+ hci_update_event_filter_sync(hdev);
+
+ /* Update LE passive scan if enabled */
+ hci_update_passive_scan_sync(hdev);
+
+ /* Pause scan changes again. */
+ hdev->scanning_paused = true;
+
+ hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
+
+ return 0;
+}
+
+/* This function resumes discovery */
+static int hci_resume_discovery_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* If discovery not paused there nothing to do */
+ if (!hdev->discovery_paused)
+ return 0;
+
+ hdev->discovery_paused = false;
+
+ hci_discovery_set_state(hdev, DISCOVERY_STARTING);
+
+ err = hci_start_discovery_sync(hdev);
+
+ hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
+ DISCOVERY_FINDING);
+
+ return err;
+}
+
+static void hci_resume_monitor_sync(struct hci_dev *hdev)
+{
+ switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ case HCI_ADV_MONITOR_EXT_MSFT:
+ msft_resume_sync(hdev);
+ break;
+ default:
+ return;
+ }
+}
+
+/* This function performs the HCI suspend procedures in the follow order:
+ *
+ * Restore event mask
+ * Clear event filter
+ * Update passive scanning (normal duty cycle)
+ * Resume Directed Advertising/Advertising
+ * Resume discovery (active scanning/inquiry)
+ */
+int hci_resume_sync(struct hci_dev *hdev)
+{
+ /* If not marked as suspended there nothing to do */
+ if (!hdev->suspended)
+ return 0;
+
+ hdev->suspended = false;
+ hdev->scanning_paused = false;
+
+ /* Restore event mask */
+ hci_set_event_mask_sync(hdev);
+
+ /* Clear any event filters and restore scan state */
+ hci_clear_event_filter_sync(hdev);
+ hci_update_scan_sync(hdev);
+
+ /* Reset passive scanning to normal */
+ hci_update_passive_scan_sync(hdev);
+
+ /* Resume monitor filters */
+ hci_resume_monitor_sync(hdev);
+
+ /* Resume other advertisements */
+ hci_resume_advertising_sync(hdev);
+
+ /* Resume discovery */
+ hci_resume_discovery_sync(hdev);
+
+ return 0;
+}
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 7827639ecf5c..4e3e0451b08c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -86,6 +86,8 @@ static void bt_host_release(struct device *dev)
if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
hci_release_dev(hdev);
+ else
+ kfree(hdev);
module_put(THIS_MODULE);
}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 160c016a5dfb..4574c5cb1b59 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -172,6 +172,21 @@ done:
return err;
}
+static void l2cap_sock_init_pid(struct sock *sk)
+{
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+
+ /* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to
+ * group the channels being requested.
+ */
+ if (chan->mode != L2CAP_MODE_EXT_FLOWCTL)
+ return;
+
+ spin_lock(&sk->sk_peer_lock);
+ sk->sk_peer_pid = get_pid(task_tgid(current));
+ spin_unlock(&sk->sk_peer_lock);
+}
+
static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
@@ -243,6 +258,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode)
chan->mode = L2CAP_MODE_LE_FLOWCTL;
+ l2cap_sock_init_pid(sk);
+
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
&la.l2_bdaddr, la.l2_bdaddr_type);
if (err)
@@ -298,6 +315,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
goto done;
}
+ l2cap_sock_init_pid(sk);
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3e5283607b97..f8f74d344297 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -39,6 +39,7 @@
#include "mgmt_config.h"
#include "msft.h"
#include "eir.h"
+#include "aosp.h"
#define MGMT_VERSION 1
#define MGMT_REVISION 21
@@ -276,10 +277,39 @@ static const u8 mgmt_status_table[] = {
MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
};
-static u8 mgmt_status(u8 hci_status)
+static u8 mgmt_errno_status(int err)
{
- if (hci_status < ARRAY_SIZE(mgmt_status_table))
- return mgmt_status_table[hci_status];
+ switch (err) {
+ case 0:
+ return MGMT_STATUS_SUCCESS;
+ case -EPERM:
+ return MGMT_STATUS_REJECTED;
+ case -EINVAL:
+ return MGMT_STATUS_INVALID_PARAMS;
+ case -EOPNOTSUPP:
+ return MGMT_STATUS_NOT_SUPPORTED;
+ case -EBUSY:
+ return MGMT_STATUS_BUSY;
+ case -ETIMEDOUT:
+ return MGMT_STATUS_AUTH_FAILED;
+ case -ENOMEM:
+ return MGMT_STATUS_NO_RESOURCES;
+ case -EISCONN:
+ return MGMT_STATUS_ALREADY_CONNECTED;
+ case -ENOTCONN:
+ return MGMT_STATUS_DISCONNECTED;
+ }
+
+ return MGMT_STATUS_FAILED;
+}
+
+static u8 mgmt_status(int err)
+{
+ if (err < 0)
+ return mgmt_errno_status(err);
+
+ if (err < ARRAY_SIZE(mgmt_status_table))
+ return mgmt_status_table[err];
return MGMT_STATUS_FAILED;
}
@@ -810,12 +840,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
settings |= MGMT_SETTING_SECURE_CONN;
settings |= MGMT_SETTING_PRIVACY;
settings |= MGMT_SETTING_STATIC_ADDRESS;
-
- /* When the experimental feature for LL Privacy support is
- * enabled, then advertising is no longer supported.
- */
- if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- settings |= MGMT_SETTING_ADVERTISING;
+ settings |= MGMT_SETTING_ADVERTISING;
}
if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
@@ -903,13 +928,6 @@ static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
}
-static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
- struct hci_dev *hdev,
- const void *data)
-{
- return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
-}
-
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
{
struct mgmt_pending_cmd *cmd;
@@ -951,32 +969,41 @@ bool mgmt_get_connectable(struct hci_dev *hdev)
return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
}
+static int service_cache_sync(struct hci_dev *hdev, void *data)
+{
+ hci_update_eir_sync(hdev);
+ hci_update_class_sync(hdev);
+
+ return 0;
+}
+
static void service_cache_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
service_cache.work);
- struct hci_request req;
if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
return;
- hci_req_init(&req, hdev);
-
- hci_dev_lock(hdev);
-
- __hci_req_update_eir(&req);
- __hci_req_update_class(&req);
-
- hci_dev_unlock(hdev);
+ hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
+}
- hci_req_run(&req, NULL);
+static int rpa_expired_sync(struct hci_dev *hdev, void *data)
+{
+ /* The generation of a new RPA and programming it into the
+ * controller happens in the hci_req_enable_advertising()
+ * function.
+ */
+ if (ext_adv_capable(hdev))
+ return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
+ else
+ return hci_enable_advertising_sync(hdev);
}
static void rpa_expired(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
rpa_expired.work);
- struct hci_request req;
bt_dev_dbg(hdev, "");
@@ -985,16 +1012,7 @@ static void rpa_expired(struct work_struct *work)
if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
return;
- /* The generation of a new RPA and programming it into the
- * controller happens in the hci_req_enable_advertising()
- * function.
- */
- hci_req_init(&req, hdev);
- if (ext_adv_capable(hdev))
- __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
- else
- __hci_req_enable_advertising(&req);
- hci_req_run(&req, NULL);
+ hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
}
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
@@ -1131,16 +1149,6 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
sizeof(settings));
}
-static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
-{
- bt_dev_dbg(hdev, "status 0x%02x", status);
-
- if (hci_conn_count(hdev) == 0) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
-}
-
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
{
struct mgmt_ev_advertising_added ev;
@@ -1168,38 +1176,77 @@ static void cancel_adv_timeout(struct hci_dev *hdev)
}
}
-static int clean_up_hci_state(struct hci_dev *hdev)
+/* This function requires the caller holds hdev->lock */
+static void restart_le_actions(struct hci_dev *hdev)
{
- struct hci_request req;
- struct hci_conn *conn;
- bool discov_stopped;
- int err;
+ struct hci_conn_params *p;
- hci_req_init(&req, hdev);
+ list_for_each_entry(p, &hdev->le_conn_params, list) {
+ /* Needed for AUTO_OFF case where might not "really"
+ * have been powered off.
+ */
+ list_del_init(&p->action);
- if (test_bit(HCI_ISCAN, &hdev->flags) ||
- test_bit(HCI_PSCAN, &hdev->flags)) {
- u8 scan = 0x00;
- hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ switch (p->auto_connect) {
+ case HCI_AUTO_CONN_DIRECT:
+ case HCI_AUTO_CONN_ALWAYS:
+ list_add(&p->action, &hdev->pend_le_conns);
+ break;
+ case HCI_AUTO_CONN_REPORT:
+ list_add(&p->action, &hdev->pend_le_reports);
+ break;
+ default:
+ break;
+ }
}
+}
+
+static int new_settings(struct hci_dev *hdev, struct sock *skip)
+{
+ __le32 ev = cpu_to_le32(get_current_settings(hdev));
- hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
+ return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
+ sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
+}
- if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- __hci_req_disable_advertising(&req);
+static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
- discov_stopped = hci_req_stop_discovery(&req);
+ bt_dev_dbg(hdev, "err %d", err);
- list_for_each_entry(conn, &hdev->conn_hash.list, list) {
- /* 0x15 == Terminated due to Power Off */
- __hci_abort_conn(&req, conn, 0x15);
+ if (!err) {
+ if (cp->val) {
+ hci_dev_lock(hdev);
+ restart_le_actions(hdev);
+ hci_update_passive_scan(hdev);
+ hci_dev_unlock(hdev);
+ }
+
+ send_settings_rsp(cmd->sk, cmd->opcode, hdev);
+
+ /* Only call new_setting for power on as power off is deferred
+ * to hdev->power_off work which does call hci_dev_do_close.
+ */
+ if (cp->val)
+ new_settings(hdev, cmd->sk);
+ } else {
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
+ mgmt_status(err));
}
- err = hci_req_run(&req, clean_up_hci_complete);
- if (!err && discov_stopped)
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+ mgmt_pending_free(cmd);
+}
- return err;
+static int set_powered_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+
+ BT_DBG("%s", hdev->name);
+
+ return hci_set_powered_sync(hdev, cp->val);
}
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -1228,43 +1275,20 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- if (cp->val) {
- queue_work(hdev->req_workqueue, &hdev->power_on);
- err = 0;
- } else {
- /* Disconnect connections, stop scans, etc */
- err = clean_up_hci_state(hdev);
- if (!err)
- queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
- HCI_POWER_OFF_TIMEOUT);
-
- /* ENODATA means there were no HCI commands queued */
- if (err == -ENODATA) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- err = 0;
- }
- }
+ err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
+ mgmt_set_powered_complete);
failed:
hci_dev_unlock(hdev);
return err;
}
-static int new_settings(struct hci_dev *hdev, struct sock *skip)
-{
- __le32 ev = cpu_to_le32(get_current_settings(hdev));
-
- return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
- sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
-}
-
int mgmt_new_settings(struct hci_dev *hdev)
{
return new_settings(hdev, NULL);
@@ -1346,23 +1370,20 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
return MGMT_STATUS_SUCCESS;
}
-void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
+static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ bt_dev_dbg(hdev, "err %d", err);
hci_dev_lock(hdev);
- cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
- if (!cmd)
- goto unlock;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
- goto remove_cmd;
+ goto done;
}
if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
@@ -1374,13 +1395,18 @@ void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
new_settings(hdev, cmd->sk);
-remove_cmd:
- mgmt_pending_remove(cmd);
-
-unlock:
+done:
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
+static int set_discoverable_sync(struct hci_dev *hdev, void *data)
+{
+ BT_DBG("%s", hdev->name);
+
+ return hci_update_discoverable_sync(hdev);
+}
+
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -1479,7 +1505,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1503,39 +1529,34 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
else
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
- queue_work(hdev->req_workqueue, &hdev->discoverable_update);
- err = 0;
+ err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
+ mgmt_set_discoverable_complete);
failed:
hci_dev_unlock(hdev);
return err;
}
-void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
+static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ bt_dev_dbg(hdev, "err %d", err);
hci_dev_lock(hdev);
- cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
- if (!cmd)
- goto unlock;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
- goto remove_cmd;
+ goto done;
}
send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
new_settings(hdev, cmd->sk);
-remove_cmd:
- mgmt_pending_remove(cmd);
-
-unlock:
+done:
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
@@ -1561,13 +1582,20 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
if (changed) {
hci_req_update_scan(hdev);
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
return new_settings(hdev, sk);
}
return 0;
}
+static int set_connectable_sync(struct hci_dev *hdev, void *data)
+{
+ BT_DBG("%s", hdev->name);
+
+ return hci_update_connectable_sync(hdev);
+}
+
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -1600,7 +1628,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1617,8 +1645,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
}
- queue_work(hdev->req_workqueue, &hdev->connectable_update);
- err = 0;
+ err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
+ mgmt_set_connectable_complete);
failed:
hci_dev_unlock(hdev);
@@ -1653,12 +1681,7 @@ static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
/* In limited privacy mode the change of bondable mode
* may affect the local advertising address.
*/
- if (hdev_is_powered(hdev) &&
- hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
- hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
- hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
- queue_work(hdev->req_workqueue,
- &hdev->discoverable_update);
+ hci_update_discoverable(hdev);
err = new_settings(hdev, sk);
}
@@ -1737,6 +1760,69 @@ failed:
return err;
}
+static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ u8 enable = cp->val;
+ bool changed;
+
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
+
+ if (enable && hci_dev_test_and_clear_flag(hdev,
+ HCI_SSP_ENABLED)) {
+ hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
+ new_settings(hdev, NULL);
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
+ &mgmt_err);
+ return;
+ }
+
+ if (enable) {
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
+ } else {
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
+
+ if (!changed)
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_HS_ENABLED);
+ else
+ hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
+
+ if (changed)
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ hci_update_eir_sync(hdev);
+}
+
+static int set_ssp_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ bool changed = false;
+ int err;
+
+ if (cp->val)
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
+
+ err = hci_write_ssp_mode_sync(hdev, cp->val);
+
+ if (!err && changed)
+ hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
+
+ return err;
+}
+
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
@@ -1798,19 +1884,18 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
-
- if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
- hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
- sizeof(cp->val), &cp->val);
+ else
+ err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
+ set_ssp_complete);
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
if (err < 0) {
- mgmt_pending_remove(cmd);
- goto failed;
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_FAILED);
+
+ if (cmd)
+ mgmt_pending_remove(cmd);
}
failed:
@@ -1879,18 +1964,17 @@ unlock:
return err;
}
-static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static void set_le_complete(struct hci_dev *hdev, void *data, int err)
{
struct cmd_lookup match = { NULL, hdev };
+ u8 status = mgmt_status(err);
- hci_dev_lock(hdev);
+ bt_dev_dbg(hdev, "err %d", err);
if (status) {
- u8 mgmt_err = mgmt_status(status);
-
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
- &mgmt_err);
- goto unlock;
+ &status);
+ return;
}
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
@@ -1899,39 +1983,54 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
if (match.sk)
sock_put(match.sk);
+}
+
+static int set_le_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ u8 val = !!cp->val;
+ int err;
+
+ if (!val) {
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+ hci_disable_advertising_sync(hdev);
+
+ if (ext_adv_capable(hdev))
+ hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
+ } else {
+ hci_dev_set_flag(hdev, HCI_LE_ENABLED);
+ }
+
+ err = hci_write_le_host_supported_sync(hdev, val, 0);
/* Make sure the controller has a good default for
* advertising data. Restrict the update to when LE
* has actually been enabled. During power on, the
* update in powered_update_hci will take care of it.
*/
- if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
- struct hci_request req;
- hci_req_init(&req, hdev);
+ if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
if (ext_adv_capable(hdev)) {
- int err;
+ int status;
- err = __hci_req_setup_ext_adv_instance(&req, 0x00);
- if (!err)
- __hci_req_update_scan_rsp_data(&req, 0x00);
+ status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
+ if (!status)
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
} else {
- __hci_req_update_adv_data(&req, 0x00);
- __hci_req_update_scan_rsp_data(&req, 0x00);
+ hci_update_adv_data_sync(hdev, 0x00);
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
}
- hci_req_run(&req, NULL);
- hci_update_background_scan(hdev);
+
+ hci_update_passive_scan(hdev);
}
-unlock:
- hci_dev_unlock(hdev);
+ return err;
}
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
- struct hci_cp_write_le_host_supported hci_cp;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
u8 val, enabled;
@@ -2001,33 +2100,20 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- memset(&hci_cp, 0, sizeof(hci_cp));
+ else
+ err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
+ set_le_complete);
- if (val) {
- hci_cp.le = val;
- hci_cp.simul = 0x00;
- } else {
- if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- __hci_req_disable_advertising(&req);
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_FAILED);
- if (ext_adv_capable(hdev))
- __hci_req_clear_ext_adv_sets(&req);
+ if (cmd)
+ mgmt_pending_remove(cmd);
}
- hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
- &hci_cp);
-
- err = hci_req_run(&req, le_enable_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
unlock:
hci_dev_unlock(hdev);
return err;
@@ -2075,37 +2161,33 @@ static u8 get_uuid_size(const u8 *uuid)
return 16;
}
-static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
+static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
- hci_dev_lock(hdev);
-
- cmd = pending_find(mgmt_op, hdev);
- if (!cmd)
- goto unlock;
+ bt_dev_dbg(hdev, "err %d", err);
mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status), hdev->dev_class, 3);
-
- mgmt_pending_remove(cmd);
+ mgmt_status(err), hdev->dev_class, 3);
-unlock:
- hci_dev_unlock(hdev);
+ mgmt_pending_free(cmd);
}
-static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int add_uuid_sync(struct hci_dev *hdev, void *data)
{
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ int err;
+
+ err = hci_update_class_sync(hdev);
+ if (err)
+ return err;
- mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
+ return hci_update_eir_sync(hdev);
}
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_cp_add_uuid *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
struct bt_uuid *uuid;
int err;
@@ -2131,28 +2213,17 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
list_add_tail(&uuid->list, &hdev->uuids);
- hci_req_init(&req, hdev);
-
- __hci_req_update_class(&req);
- __hci_req_update_eir(&req);
-
- err = hci_req_run(&req, add_uuid_complete);
- if (err < 0) {
- if (err != -ENODATA)
- goto failed;
-
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
- hdev->dev_class, 3);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- err = 0;
+ err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
+ if (err < 0) {
+ mgmt_pending_free(cmd);
+ goto failed;
+ }
failed:
hci_dev_unlock(hdev);
@@ -2173,11 +2244,15 @@ static bool enable_service_cache(struct hci_dev *hdev)
return false;
}
-static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int remove_uuid_sync(struct hci_dev *hdev, void *data)
{
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ int err;
+
+ err = hci_update_class_sync(hdev);
+ if (err)
+ return err;
- mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
+ return hci_update_eir_sync(hdev);
}
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -2187,7 +2262,6 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
struct mgmt_pending_cmd *cmd;
struct bt_uuid *match, *tmp;
u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- struct hci_request req;
int err, found;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -2231,39 +2305,35 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
}
update_class:
- hci_req_init(&req, hdev);
-
- __hci_req_update_class(&req);
- __hci_req_update_eir(&req);
-
- err = hci_req_run(&req, remove_uuid_complete);
- if (err < 0) {
- if (err != -ENODATA)
- goto unlock;
-
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
- hdev->dev_class, 3);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- err = 0;
+ err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
+ mgmt_class_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
unlock:
hci_dev_unlock(hdev);
return err;
}
-static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int set_class_sync(struct hci_dev *hdev, void *data)
{
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ int err = 0;
+
+ if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
+ cancel_delayed_work_sync(&hdev->service_cache);
+ err = hci_update_eir_sync(hdev);
+ }
+
+ if (err)
+ return err;
- mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
+ return hci_update_class_sync(hdev);
}
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -2271,7 +2341,6 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_set_dev_class *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -2303,34 +2372,16 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- hci_req_init(&req, hdev);
-
- if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
- hci_dev_unlock(hdev);
- cancel_delayed_work_sync(&hdev->service_cache);
- hci_dev_lock(hdev);
- __hci_req_update_eir(&req);
- }
-
- __hci_req_update_class(&req);
-
- err = hci_req_run(&req, set_class_complete);
- if (err < 0) {
- if (err != -ENODATA)
- goto unlock;
-
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
- hdev->dev_class, 3);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- err = 0;
+ err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
+ mgmt_class_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
unlock:
hci_dev_unlock(hdev);
@@ -3228,65 +3279,70 @@ static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
}
-static void adv_expire(struct hci_dev *hdev, u32 flags)
+static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
{
struct adv_info *adv_instance;
- struct hci_request req;
- int err;
adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
if (!adv_instance)
- return;
+ return 0;
/* stop if current instance doesn't need to be changed */
if (!(adv_instance->flags & flags))
- return;
+ return 0;
cancel_adv_timeout(hdev);
adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
if (!adv_instance)
- return;
+ return 0;
- hci_req_init(&req, hdev);
- err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
- true);
- if (err)
- return;
+ hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
- hci_req_run(&req, NULL);
+ return 0;
}
-static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int name_changed_sync(struct hci_dev *hdev, void *data)
{
- struct mgmt_cp_set_local_name *cp;
- struct mgmt_pending_cmd *cmd;
-
- bt_dev_dbg(hdev, "status 0x%02x", status);
-
- hci_dev_lock(hdev);
+ return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
+}
- cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
- if (!cmd)
- goto unlock;
+static void set_name_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_set_local_name *cp = cmd->param;
+ u8 status = mgmt_status(err);
- cp = cmd->param;
+ bt_dev_dbg(hdev, "err %d", err);
if (status) {
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
- mgmt_status(status));
+ status);
} else {
mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
cp, sizeof(*cp));
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
+ hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
}
mgmt_pending_remove(cmd);
+}
-unlock:
- hci_dev_unlock(hdev);
+static int set_name_sync(struct hci_dev *hdev, void *data)
+{
+ if (lmp_bredr_capable(hdev)) {
+ hci_update_name_sync(hdev);
+ hci_update_eir_sync(hdev);
+ }
+
+ /* The name is stored in the scan response data and so
+ * no need to update the advertising data here.
+ */
+ if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
+
+ return 0;
}
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -3294,7 +3350,6 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_set_local_name *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -3330,35 +3385,34 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
+ else
+ err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
+ set_name_complete);
- memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+ MGMT_STATUS_FAILED);
- hci_req_init(&req, hdev);
+ if (cmd)
+ mgmt_pending_remove(cmd);
- if (lmp_bredr_capable(hdev)) {
- __hci_req_update_name(&req);
- __hci_req_update_eir(&req);
+ goto failed;
}
- /* The name is stored in the scan response data and so
- * no need to update the advertising data here.
- */
- if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
- __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
-
- err = hci_req_run(&req, set_name_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
failed:
hci_dev_unlock(hdev);
return err;
}
+static int appearance_changed_sync(struct hci_dev *hdev, void *data)
+{
+ return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
+}
+
static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -3380,7 +3434,8 @@ static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
hdev->appearance = appearance;
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
+ hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
+ NULL);
ext_info_changed(hdev, sk);
}
@@ -3426,23 +3481,26 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
sizeof(ev), skip);
}
-static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
+static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
-
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ struct mgmt_pending_cmd *cmd = data;
+ struct sk_buff *skb = cmd->skb;
+ u8 status = mgmt_status(err);
- hci_dev_lock(hdev);
+ if (!status) {
+ if (!skb)
+ status = MGMT_STATUS_FAILED;
+ else if (IS_ERR(skb))
+ status = mgmt_status(PTR_ERR(skb));
+ else
+ status = mgmt_status(skb->data[0]);
+ }
- cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
- if (!cmd)
- goto unlock;
+ bt_dev_dbg(hdev, "status %d", status);
if (status) {
mgmt_cmd_status(cmd->sk, hdev->id,
- MGMT_OP_SET_PHY_CONFIGURATION,
- mgmt_status(status));
+ MGMT_OP_SET_PHY_CONFIGURATION, status);
} else {
mgmt_cmd_complete(cmd->sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION, 0,
@@ -3451,19 +3509,56 @@ static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
mgmt_phy_configuration_changed(hdev, cmd->sk);
}
+ if (skb && !IS_ERR(skb))
+ kfree_skb(skb);
+
mgmt_pending_remove(cmd);
+}
-unlock:
- hci_dev_unlock(hdev);
+static int set_default_phy_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_set_phy_configuration *cp = cmd->param;
+ struct hci_cp_le_set_default_phy cp_phy;
+ u32 selected_phys = __le32_to_cpu(cp->selected_phys);
+
+ memset(&cp_phy, 0, sizeof(cp_phy));
+
+ if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
+ cp_phy.all_phys |= 0x01;
+
+ if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
+ cp_phy.all_phys |= 0x02;
+
+ if (selected_phys & MGMT_PHY_LE_1M_TX)
+ cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
+
+ if (selected_phys & MGMT_PHY_LE_2M_TX)
+ cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
+
+ if (selected_phys & MGMT_PHY_LE_CODED_TX)
+ cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
+
+ if (selected_phys & MGMT_PHY_LE_1M_RX)
+ cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
+
+ if (selected_phys & MGMT_PHY_LE_2M_RX)
+ cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
+
+ if (selected_phys & MGMT_PHY_LE_CODED_RX)
+ cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
+
+ cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
+ sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
+
+ return 0;
}
static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_set_phy_configuration *cp = data;
- struct hci_cp_le_set_default_phy cp_phy;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
u16 pkt_type = (HCI_DH1 | HCI_DM1);
bool changed = false;
@@ -3567,44 +3662,20 @@ static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- memset(&cp_phy, 0, sizeof(cp_phy));
-
- if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
- cp_phy.all_phys |= 0x01;
-
- if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
- cp_phy.all_phys |= 0x02;
-
- if (selected_phys & MGMT_PHY_LE_1M_TX)
- cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
-
- if (selected_phys & MGMT_PHY_LE_2M_TX)
- cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
-
- if (selected_phys & MGMT_PHY_LE_CODED_TX)
- cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
-
- if (selected_phys & MGMT_PHY_LE_1M_RX)
- cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
-
- if (selected_phys & MGMT_PHY_LE_2M_RX)
- cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
-
- if (selected_phys & MGMT_PHY_LE_CODED_RX)
- cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
+ else
+ err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
+ set_default_phy_complete);
- hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION,
+ MGMT_STATUS_FAILED);
- err = hci_req_run_skb(&req, set_default_phy_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ if (cmd)
+ mgmt_pending_remove(cmd);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -3852,7 +3923,7 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
idx++;
}
- if (hdev && use_ll_privacy(hdev)) {
+ if (hdev && ll_privacy_capable(hdev)) {
if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
flags = BIT(0) | BIT(1);
else
@@ -3863,7 +3934,8 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
idx++;
}
- if (hdev && hdev->set_quality_report) {
+ if (hdev && (aosp_has_quality_report(hdev) ||
+ hdev->set_quality_report)) {
if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
flags = BIT(0);
else
@@ -3927,7 +3999,9 @@ static int exp_debug_feature_changed(bool enabled, struct sock *skip)
}
#endif
-static int exp_quality_report_feature_changed(bool enabled, struct sock *skip)
+static int exp_quality_report_feature_changed(bool enabled,
+ struct hci_dev *hdev,
+ struct sock *skip)
{
struct mgmt_ev_exp_feature_changed ev;
@@ -3935,7 +4009,7 @@ static int exp_quality_report_feature_changed(bool enabled, struct sock *skip)
memcpy(ev.uuid, quality_report_uuid, 16);
ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
- return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
+ return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
&ev, sizeof(ev),
HCI_MGMT_EXP_FEATURE_EVENTS, skip);
}
@@ -4125,7 +4199,7 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
val = !!cp->param[0];
changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
- if (!hdev->set_quality_report) {
+ if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_NOT_SUPPORTED);
@@ -4133,13 +4207,18 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
}
if (changed) {
- err = hdev->set_quality_report(hdev, val);
+ if (hdev->set_quality_report)
+ err = hdev->set_quality_report(hdev, val);
+ else
+ err = aosp_set_quality_report(hdev, val);
+
if (err) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_FAILED);
goto unlock_quality_report;
}
+
if (val)
hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
else
@@ -4151,19 +4230,20 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
memcpy(rp.uuid, quality_report_uuid, 16);
rp.flags = cpu_to_le32(val ? BIT(0) : 0);
hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
- err = mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE, 0,
+
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
&rp, sizeof(rp));
if (changed)
- exp_quality_report_feature_changed(val, sk);
+ exp_quality_report_feature_changed(val, hdev, sk);
unlock_quality_report:
hci_req_sync_unlock(hdev);
return err;
}
-static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip)
+static int exp_offload_codec_feature_changed(bool enabled, struct hci_dev *hdev,
+ struct sock *skip)
{
struct mgmt_ev_exp_feature_changed ev;
@@ -4171,7 +4251,7 @@ static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip)
memcpy(ev.uuid, offload_codecs_uuid, 16);
ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
- return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
+ return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
&ev, sizeof(ev),
HCI_MGMT_EXP_FEATURE_EVENTS, skip);
}
@@ -4229,7 +4309,7 @@ static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
&rp, sizeof(rp));
if (changed)
- exp_offload_codec_feature_changed(val, sk);
+ exp_offload_codec_feature_changed(val, hdev, sk);
return err;
}
@@ -4496,7 +4576,7 @@ int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
hdev->adv_monitors_cnt++;
if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
monitor->state = ADV_MONITOR_STATE_REGISTERED;
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
}
err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
@@ -4722,7 +4802,7 @@ int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
rp.monitor_handle = cp->monitor_handle;
if (!status)
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
mgmt_status(status), &rp, sizeof(rp));
@@ -4801,28 +4881,33 @@ unlock:
status);
}
-static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
+static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_rp_read_local_oob_data mgmt_rp;
size_t rp_size = sizeof(mgmt_rp);
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
+ struct sk_buff *skb = cmd->skb;
+ u8 status = mgmt_status(err);
- bt_dev_dbg(hdev, "status %u", status);
+ if (!status) {
+ if (!skb)
+ status = MGMT_STATUS_FAILED;
+ else if (IS_ERR(skb))
+ status = mgmt_status(PTR_ERR(skb));
+ else
+ status = mgmt_status(skb->data[0]);
+ }
- cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
- if (!cmd)
- return;
+ bt_dev_dbg(hdev, "status %d", status);
- if (status || !skb) {
- mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- status ? mgmt_status(status) : MGMT_STATUS_FAILED);
+ if (status) {
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
goto remove;
}
memset(&mgmt_rp, 0, sizeof(mgmt_rp));
- if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
+ if (!bredr_sc_enabled(hdev)) {
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
if (skb->len < sizeof(*rp)) {
@@ -4857,14 +4942,31 @@ static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
remove:
- mgmt_pending_remove(cmd);
+ if (skb && !IS_ERR(skb))
+ kfree_skb(skb);
+
+ mgmt_pending_free(cmd);
+}
+
+static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+
+ if (bredr_sc_enabled(hdev))
+ cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
+ else
+ cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
+
+ if (IS_ERR(cmd->skb))
+ return PTR_ERR(cmd->skb);
+ else
+ return 0;
}
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -4889,22 +4991,20 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- if (bredr_sc_enabled(hdev))
- hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
else
- hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+ err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
+ read_local_oob_data_complete);
- err = hci_req_run_skb(&req, read_local_oob_data_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_FAILED);
+
+ if (cmd)
+ mgmt_pending_free(cmd);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -5077,13 +5177,6 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
}
hci_dev_unlock(hdev);
-
- /* Handle suspend notifier */
- if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
- hdev->suspend_tasks)) {
- bt_dev_dbg(hdev, "Unpaused discovery");
- wake_up(&hdev->suspend_wait_q);
- }
}
static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
@@ -5113,6 +5206,25 @@ static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
return true;
}
+static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
+ cmd->param, 1);
+ mgmt_pending_free(cmd);
+
+ hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
+ DISCOVERY_FINDING);
+}
+
+static int start_discovery_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_start_discovery_sync(hdev);
+}
+
static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
u16 op, void *data, u16 len)
{
@@ -5164,17 +5276,20 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
else
hdev->discovery.limited = false;
- cmd = mgmt_pending_add(sk, op, hdev, data, len);
+ cmd = mgmt_pending_new(sk, op, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- cmd->cmd_complete = generic_cmd_complete;
+ err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
+ start_discovery_complete);
+ if (err < 0) {
+ mgmt_pending_free(cmd);
+ goto failed;
+ }
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- err = 0;
failed:
hci_dev_unlock(hdev);
@@ -5196,13 +5311,6 @@ static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
data, len);
}
-static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
- u8 status)
-{
- return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
- cmd->param, 1);
-}
-
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -5271,15 +5379,13 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
+ cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY,
hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- cmd->cmd_complete = service_discovery_cmd_complete;
-
/* Clear the discovery filter first to free any previously
* allocated memory for the UUID list.
*/
@@ -5303,9 +5409,14 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
}
}
+ err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
+ start_discovery_complete);
+ if (err < 0) {
+ mgmt_pending_free(cmd);
+ goto failed;
+ }
+
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- err = 0;
failed:
hci_dev_unlock(hdev);
@@ -5327,12 +5438,25 @@ void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
}
hci_dev_unlock(hdev);
+}
- /* Handle suspend notifier */
- if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
- bt_dev_dbg(hdev, "Paused discovery");
- wake_up(&hdev->suspend_wait_q);
- }
+static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
+ cmd->param, 1);
+ mgmt_pending_free(cmd);
+
+ if (!err)
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+}
+
+static int stop_discovery_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_stop_discovery_sync(hdev);
}
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -5360,17 +5484,20 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- cmd->cmd_complete = generic_cmd_complete;
+ err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
+ stop_discovery_complete);
+ if (err < 0) {
+ mgmt_pending_free(cmd);
+ goto unlock;
+ }
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- err = 0;
unlock:
hci_dev_unlock(hdev);
@@ -5491,11 +5618,15 @@ done:
return err;
}
+static int set_device_id_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_eir_sync(hdev);
+}
+
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_set_device_id *cp = data;
- struct hci_request req;
int err;
__u16 source;
@@ -5517,38 +5648,32 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
NULL, 0);
- hci_req_init(&req, hdev);
- __hci_req_update_eir(&req);
- hci_req_run(&req, NULL);
+ hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
hci_dev_unlock(hdev);
return err;
}
-static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void enable_advertising_instance(struct hci_dev *hdev, int err)
{
- bt_dev_dbg(hdev, "status %u", status);
+ if (err)
+ bt_dev_err(hdev, "failed to re-configure advertising %d", err);
+ else
+ bt_dev_dbg(hdev, "status %d", err);
}
-static void set_advertising_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
{
struct cmd_lookup match = { NULL, hdev };
- struct hci_request req;
u8 instance;
struct adv_info *adv_instance;
- int err;
-
- hci_dev_lock(hdev);
+ u8 status = mgmt_status(err);
if (status) {
- u8 mgmt_err = mgmt_status(status);
-
mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
- cmd_status_rsp, &mgmt_err);
- goto unlock;
+ cmd_status_rsp, &status);
+ return;
}
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
@@ -5564,46 +5689,60 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
if (match.sk)
sock_put(match.sk);
- /* Handle suspend notifier */
- if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
- hdev->suspend_tasks)) {
- bt_dev_dbg(hdev, "Paused advertising");
- wake_up(&hdev->suspend_wait_q);
- } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
- hdev->suspend_tasks)) {
- bt_dev_dbg(hdev, "Unpaused advertising");
- wake_up(&hdev->suspend_wait_q);
- }
-
/* If "Set Advertising" was just disabled and instance advertising was
* set up earlier, then re-enable multi-instance advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
list_empty(&hdev->adv_instances))
- goto unlock;
+ return;
instance = hdev->cur_adv_instance;
if (!instance) {
adv_instance = list_first_entry_or_null(&hdev->adv_instances,
struct adv_info, list);
if (!adv_instance)
- goto unlock;
+ return;
instance = adv_instance->instance;
}
- hci_req_init(&req, hdev);
+ err = hci_schedule_adv_instance_sync(hdev, instance, true);
- err = __hci_req_schedule_adv_instance(&req, instance, true);
+ enable_advertising_instance(hdev, err);
+}
- if (!err)
- err = hci_req_run(&req, enable_advertising_instance);
+static int set_adv_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ u8 val = !!cp->val;
- if (err)
- bt_dev_err(hdev, "failed to re-configure advertising");
+ if (cp->val == 0x02)
+ hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+ else
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
-unlock:
- hci_dev_unlock(hdev);
+ cancel_adv_timeout(hdev);
+
+ if (val) {
+ /* Switch to instance "0" for the Set Advertising setting.
+ * We cannot use update_[adv|scan_rsp]_data() here as the
+ * HCI_ADVERTISING flag is not yet set.
+ */
+ hdev->cur_adv_instance = 0x00;
+
+ if (ext_adv_capable(hdev)) {
+ hci_start_ext_adv_sync(hdev, 0x00);
+ } else {
+ hci_update_adv_data_sync(hdev, 0x00);
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
+ hci_enable_advertising_sync(hdev);
+ }
+ } else {
+ hci_disable_advertising_sync(hdev);
+ }
+
+ return 0;
}
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -5611,7 +5750,6 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_mode *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
u8 val, status;
int err;
@@ -5622,13 +5760,6 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
status);
- /* Enabling the experimental LL Privay support disables support for
- * advertising.
- */
- if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
- MGMT_STATUS_NOT_SUPPORTED);
-
if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -5684,40 +5815,13 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- if (cp->val == 0x02)
- hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
else
- hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
-
- cancel_adv_timeout(hdev);
-
- if (val) {
- /* Switch to instance "0" for the Set Advertising setting.
- * We cannot use update_[adv|scan_rsp]_data() here as the
- * HCI_ADVERTISING flag is not yet set.
- */
- hdev->cur_adv_instance = 0x00;
+ err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
+ set_advertising_complete);
- if (ext_adv_capable(hdev)) {
- __hci_req_start_ext_adv(&req, 0x00);
- } else {
- __hci_req_update_adv_data(&req, 0x00);
- __hci_req_update_scan_rsp_data(&req, 0x00);
- __hci_req_enable_advertising(&req);
- }
- } else {
- __hci_req_disable_advertising(&req);
- }
-
- err = hci_req_run(&req, set_advertising_complete);
- if (err < 0)
+ if (err < 0 && cmd)
mgmt_pending_remove(cmd);
unlock:
@@ -5810,38 +5914,23 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
* loaded.
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
- hdev->discovery.state == DISCOVERY_STOPPED) {
- struct hci_request req;
-
- hci_req_init(&req, hdev);
-
- hci_req_add_le_scan_disable(&req, false);
- hci_req_add_le_passive_scan(&req);
-
- hci_req_run(&req, NULL);
- }
+ hdev->discovery.state == DISCOVERY_STOPPED)
+ hci_update_passive_scan(hdev);
hci_dev_unlock(hdev);
return err;
}
-static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
-
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ struct mgmt_pending_cmd *cmd = data;
- hci_dev_lock(hdev);
-
- cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
- if (!cmd)
- goto unlock;
+ bt_dev_dbg(hdev, "err %d", err);
- if (status) {
+ if (err) {
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- mgmt_status(status));
+ mgmt_status(err));
} else {
struct mgmt_mode *cp = cmd->param;
@@ -5854,10 +5943,15 @@ static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
new_settings(hdev, cmd->sk);
}
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
+}
-unlock:
- hci_dev_unlock(hdev);
+static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+
+ return hci_write_fast_connectable_sync(hdev, cp->val);
}
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
@@ -5865,58 +5959,49 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_mode *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
hdev->hci_ver < BLUETOOTH_VER_1_2)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_FAST_CONNECTABLE,
MGMT_STATUS_NOT_SUPPORTED);
if (cp->val != 0x00 && cp->val != 0x01)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_FAST_CONNECTABLE,
MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
- if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
- err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
- hdev);
+ err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
goto unlock;
}
if (!hdev_is_powered(hdev)) {
hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
- err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
- hdev);
+ err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
new_settings(hdev, sk);
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
- data, len);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
+ len);
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- __hci_req_write_fast_connectable(&req, cp->val);
+ else
+ err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
+ fast_connectable_complete);
- err = hci_req_run(&req, fast_connectable_complete);
if (err < 0) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_FAILED);
- mgmt_pending_remove(cmd);
+ mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_FAILED);
+
+ if (cmd)
+ mgmt_pending_free(cmd);
}
unlock:
@@ -5925,20 +6010,14 @@ unlock:
return err;
}
-static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
- bt_dev_dbg(hdev, "status 0x%02x", status);
-
- hci_dev_lock(hdev);
-
- cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
- if (!cmd)
- goto unlock;
+ bt_dev_dbg(hdev, "err %d", err);
- if (status) {
- u8 mgmt_err = mgmt_status(status);
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
/* We need to restore the flag if related HCI commands
* failed.
@@ -5951,17 +6030,31 @@ static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
new_settings(hdev, cmd->sk);
}
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
+}
-unlock:
- hci_dev_unlock(hdev);
+static int set_bredr_sync(struct hci_dev *hdev, void *data)
+{
+ int status;
+
+ status = hci_write_fast_connectable_sync(hdev, false);
+
+ if (!status)
+ status = hci_update_scan_sync(hdev);
+
+ /* Since only the advertising data flags will change, there
+ * is no need to update the scan response data.
+ */
+ if (!status)
+ status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
+
+ return status;
}
static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -6033,15 +6126,19 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
}
- if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
+ if (!cmd)
err = -ENOMEM;
+ else
+ err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
+ set_bredr_complete);
+
+ if (err < 0) {
+ mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_FAILED);
+ if (cmd)
+ mgmt_pending_free(cmd);
+
goto unlock;
}
@@ -6050,42 +6147,23 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
*/
hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
- hci_req_init(&req, hdev);
-
- __hci_req_write_fast_connectable(&req, false);
- __hci_req_update_scan(&req);
-
- /* Since only the advertising data flags will change, there
- * is no need to update the scan response data.
- */
- __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
-
- err = hci_req_run(&req, set_bredr_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
unlock:
hci_dev_unlock(hdev);
return err;
}
-static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
struct mgmt_mode *cp;
- bt_dev_dbg(hdev, "status %u", status);
-
- hci_dev_lock(hdev);
+ bt_dev_dbg(hdev, "err %d", err);
- cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
- if (!cmd)
- goto unlock;
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
- if (status) {
- mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status));
- goto remove;
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ goto done;
}
cp = cmd->param;
@@ -6105,13 +6183,23 @@ static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
break;
}
- send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
+ send_settings_rsp(cmd->sk, cmd->opcode, hdev);
new_settings(hdev, cmd->sk);
-remove:
- mgmt_pending_remove(cmd);
-unlock:
- hci_dev_unlock(hdev);
+done:
+ mgmt_pending_free(cmd);
+}
+
+static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ u8 val = !!cp->val;
+
+ /* Force write of val */
+ hci_dev_set_flag(hdev, HCI_SC_ENABLED);
+
+ return hci_write_sc_support_sync(hdev, val);
}
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
@@ -6119,7 +6207,6 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_mode *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
u8 val;
int err;
@@ -6138,7 +6225,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
- MGMT_STATUS_INVALID_PARAMS);
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
@@ -6169,12 +6256,6 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
- MGMT_STATUS_BUSY);
- goto failed;
- }
-
val = !!cp->val;
if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
@@ -6183,18 +6264,18 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
+ else
+ err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
+ set_secure_conn_complete);
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
- err = hci_req_run(&req, sc_enable_complete);
if (err < 0) {
- mgmt_pending_remove(cmd);
- goto failed;
+ mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ MGMT_STATUS_FAILED);
+ if (cmd)
+ mgmt_pending_free(cmd);
}
failed:
@@ -6508,14 +6589,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
+static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
{
+ struct mgmt_pending_cmd *cmd = data;
struct hci_conn *conn = cmd->user_data;
+ struct mgmt_cp_get_conn_info *cp = cmd->param;
struct mgmt_rp_get_conn_info rp;
- int err;
+ u8 status;
- memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
+ bt_dev_dbg(hdev, "err %d", err);
+
+ memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
+ status = mgmt_status(err);
if (status == MGMT_STATUS_SUCCESS) {
rp.rssi = conn->rssi;
rp.tx_power = conn->tx_power;
@@ -6526,67 +6612,58 @@ static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
rp.max_tx_power = HCI_TX_POWER_INVALID;
}
- err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
- status, &rp, sizeof(rp));
+ mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
+ &rp, sizeof(rp));
- hci_conn_drop(conn);
- hci_conn_put(conn);
+ if (conn) {
+ hci_conn_drop(conn);
+ hci_conn_put(conn);
+ }
- return err;
+ mgmt_pending_free(cmd);
}
-static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
- u16 opcode)
+static int get_conn_info_sync(struct hci_dev *hdev, void *data)
{
- struct hci_cp_read_rssi *cp;
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_get_conn_info *cp = cmd->param;
struct hci_conn *conn;
- u16 handle;
- u8 status;
-
- bt_dev_dbg(hdev, "status 0x%02x", hci_status);
+ int err;
+ __le16 handle;
- hci_dev_lock(hdev);
+ /* Make sure we are still connected */
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
- /* Commands sent in request are either Read RSSI or Read Transmit Power
- * Level so we check which one was last sent to retrieve connection
- * handle. Both commands have handle as first parameter so it's safe to
- * cast data on the same command struct.
- *
- * First command sent is always Read RSSI and we fail only if it fails.
- * In other case we simply override error to indicate success as we
- * already remembered if TX power value is actually valid.
- */
- cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
- if (!cp) {
- cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
- status = MGMT_STATUS_SUCCESS;
- } else {
- status = mgmt_status(hci_status);
+ if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
+ if (cmd->user_data) {
+ hci_conn_drop(cmd->user_data);
+ hci_conn_put(cmd->user_data);
+ cmd->user_data = NULL;
+ }
+ return MGMT_STATUS_NOT_CONNECTED;
}
- if (!cp) {
- bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
- goto unlock;
- }
+ handle = cpu_to_le16(conn->handle);
- handle = __le16_to_cpu(cp->handle);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn) {
- bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
- handle);
- goto unlock;
- }
+ /* Refresh RSSI each time */
+ err = hci_read_rssi_sync(hdev, handle);
- cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
- if (!cmd)
- goto unlock;
+ /* For LE links TX power does not change thus we don't need to
+ * query for it once value is known.
+ */
+ if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
+ conn->tx_power == HCI_TX_POWER_INVALID))
+ err = hci_read_tx_power_sync(hdev, handle, 0x00);
- cmd->cmd_complete(cmd, status);
- mgmt_pending_remove(cmd);
+ /* Max TX power needs to be read only once per connection */
+ if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
+ err = hci_read_tx_power_sync(hdev, handle, 0x01);
-unlock:
- hci_dev_unlock(hdev);
+ return err;
}
static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -6631,12 +6708,6 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
- MGMT_STATUS_BUSY, &rp, sizeof(rp));
- goto unlock;
- }
-
/* To avoid client trying to guess when to poll again for information we
* calculate conn info age as random value between min/max set in hdev.
*/
@@ -6650,49 +6721,28 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
if (time_after(jiffies, conn->conn_info_timestamp +
msecs_to_jiffies(conn_info_age)) ||
!conn->conn_info_timestamp) {
- struct hci_request req;
- struct hci_cp_read_tx_power req_txp_cp;
- struct hci_cp_read_rssi req_rssi_cp;
struct mgmt_pending_cmd *cmd;
- hci_req_init(&req, hdev);
- req_rssi_cp.handle = cpu_to_le16(conn->handle);
- hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
- &req_rssi_cp);
-
- /* For LE links TX power does not change thus we don't need to
- * query for it once value is known.
- */
- if (!bdaddr_type_is_le(cp->addr.type) ||
- conn->tx_power == HCI_TX_POWER_INVALID) {
- req_txp_cp.handle = cpu_to_le16(conn->handle);
- req_txp_cp.type = 0x00;
- hci_req_add(&req, HCI_OP_READ_TX_POWER,
- sizeof(req_txp_cp), &req_txp_cp);
- }
+ cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
+ len);
+ if (!cmd)
+ err = -ENOMEM;
+ else
+ err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
+ cmd, get_conn_info_complete);
- /* Max TX power needs to be read only once per connection */
- if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
- req_txp_cp.handle = cpu_to_le16(conn->handle);
- req_txp_cp.type = 0x01;
- hci_req_add(&req, HCI_OP_READ_TX_POWER,
- sizeof(req_txp_cp), &req_txp_cp);
- }
+ if (err < 0) {
+ mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_FAILED, &rp, sizeof(rp));
- err = hci_req_run(&req, conn_info_refresh_complete);
- if (err < 0)
- goto unlock;
+ if (cmd)
+ mgmt_pending_free(cmd);
- cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
- data, len);
- if (!cmd) {
- err = -ENOMEM;
goto unlock;
}
hci_conn_hold(conn);
cmd->user_data = hci_conn_get(conn);
- cmd->cmd_complete = conn_info_cmd_complete;
conn->conn_info_timestamp = jiffies;
} else {
@@ -6710,82 +6760,76 @@ unlock:
return err;
}
-static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
+static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
{
- struct hci_conn *conn = cmd->user_data;
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_get_clock_info *cp = cmd->param;
struct mgmt_rp_get_clock_info rp;
- struct hci_dev *hdev;
- int err;
+ struct hci_conn *conn = cmd->user_data;
+ u8 status = mgmt_status(err);
+
+ bt_dev_dbg(hdev, "err %d", err);
memset(&rp, 0, sizeof(rp));
- memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
- if (status)
+ if (err)
goto complete;
- hdev = hci_dev_get(cmd->index);
- if (hdev) {
- rp.local_clock = cpu_to_le32(hdev->clock);
- hci_dev_put(hdev);
- }
+ rp.local_clock = cpu_to_le32(hdev->clock);
if (conn) {
rp.piconet_clock = cpu_to_le32(conn->clock);
rp.accuracy = cpu_to_le16(conn->clock_accuracy);
- }
-
-complete:
- err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
- sizeof(rp));
-
- if (conn) {
hci_conn_drop(conn);
hci_conn_put(conn);
}
- return err;
+complete:
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
+ sizeof(rp));
+
+ mgmt_pending_free(cmd);
}
-static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int get_clock_info_sync(struct hci_dev *hdev, void *data)
{
- struct hci_cp_read_clock *hci_cp;
- struct mgmt_pending_cmd *cmd;
- struct hci_conn *conn;
-
- bt_dev_dbg(hdev, "status %u", status);
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_get_clock_info *cp = cmd->param;
+ struct hci_cp_read_clock hci_cp;
+ struct hci_conn *conn = cmd->user_data;
+ int err;
- hci_dev_lock(hdev);
+ memset(&hci_cp, 0, sizeof(hci_cp));
+ err = hci_read_clock_sync(hdev, &hci_cp);
- hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
- if (!hci_cp)
- goto unlock;
+ if (conn) {
+ /* Make sure connection still exists */
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
- if (hci_cp->which) {
- u16 handle = __le16_to_cpu(hci_cp->handle);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- } else {
- conn = NULL;
+ if (conn && conn == cmd->user_data &&
+ conn->state == BT_CONNECTED) {
+ hci_cp.handle = cpu_to_le16(conn->handle);
+ hci_cp.which = 0x01; /* Piconet clock */
+ err = hci_read_clock_sync(hdev, &hci_cp);
+ } else if (cmd->user_data) {
+ hci_conn_drop(cmd->user_data);
+ hci_conn_put(cmd->user_data);
+ cmd->user_data = NULL;
+ }
}
- cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
- if (!cmd)
- goto unlock;
-
- cmd->cmd_complete(cmd, mgmt_status(status));
- mgmt_pending_remove(cmd);
-
-unlock:
- hci_dev_unlock(hdev);
+ return err;
}
static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+ u16 len)
{
struct mgmt_cp_get_clock_info *cp = data;
struct mgmt_rp_get_clock_info rp;
- struct hci_cp_read_clock hci_cp;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
struct hci_conn *conn;
int err;
@@ -6823,31 +6867,25 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
conn = NULL;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- cmd->cmd_complete = clock_info_cmd_complete;
+ else
+ err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
+ get_clock_info_complete);
- hci_req_init(&req, hdev);
+ if (err < 0) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
+ MGMT_STATUS_FAILED, &rp, sizeof(rp));
- memset(&hci_cp, 0, sizeof(hci_cp));
- hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
+ if (cmd)
+ mgmt_pending_free(cmd);
- if (conn) {
+ } else if (conn) {
hci_conn_hold(conn);
cmd->user_data = hci_conn_get(conn);
-
- hci_cp.handle = cpu_to_le16(conn->handle);
- hci_cp.which = 0x01; /* Piconet clock */
- hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
}
- err = hci_req_run(&req, get_clock_info_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
unlock:
hci_dev_unlock(hdev);
@@ -6928,6 +6966,11 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}
+static int add_device_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_passive_scan_sync(hdev);
+}
+
static int add_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -7010,7 +7053,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
current_flags = params->current_flags;
}
- hci_update_background_scan(hdev);
+ err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
+ if (err < 0)
+ goto unlock;
added:
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
@@ -7037,6 +7082,11 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}
+static int remove_device_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_passive_scan_sync(hdev);
+}
+
static int remove_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -7116,7 +7166,6 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
list_del(&params->action);
list_del(&params->list);
kfree(params);
- hci_update_background_scan(hdev);
device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
} else {
@@ -7153,10 +7202,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
}
bt_dev_dbg(hdev, "All LE connection parameters were removed");
-
- hci_update_background_scan(hdev);
}
+ hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
+
complete:
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
MGMT_STATUS_SUCCESS, &cp->addr,
@@ -7359,21 +7408,27 @@ unlock:
return err;
}
-static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
+static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
+ int err)
{
const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
u8 *h192, *r192, *h256, *r256;
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
+ struct sk_buff *skb = cmd->skb;
+ u8 status = mgmt_status(err);
u16 eir_len;
- int err;
- bt_dev_dbg(hdev, "status %u", status);
+ if (!status) {
+ if (!skb)
+ status = MGMT_STATUS_FAILED;
+ else if (IS_ERR(skb))
+ status = mgmt_status(PTR_ERR(skb));
+ else
+ status = mgmt_status(skb->data[0]);
+ }
- cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
- if (!cmd)
- return;
+ bt_dev_dbg(hdev, "status %u", status);
mgmt_cp = cmd->param;
@@ -7385,7 +7440,7 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
r192 = NULL;
h256 = NULL;
r256 = NULL;
- } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
+ } else if (!bredr_sc_enabled(hdev)) {
struct hci_rp_read_local_oob_data *rp;
if (skb->len != sizeof(*rp)) {
@@ -7466,6 +7521,9 @@ send_rsp:
mgmt_rp, sizeof(*mgmt_rp) + eir_len,
HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
done:
+ if (skb && !IS_ERR(skb))
+ kfree_skb(skb);
+
kfree(mgmt_rp);
mgmt_pending_remove(cmd);
}
@@ -7474,7 +7532,6 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
struct mgmt_cp_read_local_oob_ext_data *cp)
{
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
@@ -7482,14 +7539,9 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
if (!cmd)
return -ENOMEM;
- hci_req_init(&req, hdev);
-
- if (bredr_sc_enabled(hdev))
- hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
- else
- hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+ err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
+ read_local_oob_ext_data_complete);
- err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
if (err < 0) {
mgmt_pending_remove(cmd);
return err;
@@ -7713,13 +7765,6 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
MGMT_STATUS_REJECTED);
- /* Enabling the experimental LL Privay support disables support for
- * advertising.
- */
- if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
- MGMT_STATUS_NOT_SUPPORTED);
-
hci_dev_lock(hdev);
rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
@@ -7876,58 +7921,66 @@ static bool adv_busy(struct hci_dev *hdev)
pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
}
-static void add_advertising_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
- struct mgmt_cp_add_advertising *cp;
- struct mgmt_rp_add_advertising rp;
- struct adv_info *adv_instance, *n;
- u8 instance;
+ struct adv_info *adv, *n;
- bt_dev_dbg(hdev, "status %u", status);
+ bt_dev_dbg(hdev, "err %d", err);
hci_dev_lock(hdev);
- cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
- if (!cmd)
- cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
+ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
+ u8 instance;
- list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
- if (!adv_instance->pending)
+ if (!adv->pending)
continue;
- if (!status) {
- adv_instance->pending = false;
+ if (!err) {
+ adv->pending = false;
continue;
}
- instance = adv_instance->instance;
+ instance = adv->instance;
if (hdev->cur_adv_instance == instance)
cancel_adv_timeout(hdev);
hci_remove_adv_instance(hdev, instance);
- mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
+ mgmt_advertising_removed(sk, hdev, instance);
}
- if (!cmd)
- goto unlock;
+ hci_dev_unlock(hdev);
+}
+
+static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_advertising *cp = cmd->param;
+ struct mgmt_rp_add_advertising rp;
+
+ memset(&rp, 0, sizeof(rp));
- cp = cmd->param;
rp.instance = cp->instance;
- if (status)
+ if (err)
mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status));
+ mgmt_status(err));
else
mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status), &rp, sizeof(rp));
+ mgmt_status(err), &rp, sizeof(rp));
- mgmt_pending_remove(cmd);
+ add_adv_complete(hdev, cmd->sk, cp->instance, err);
-unlock:
- hci_dev_unlock(hdev);
+ mgmt_pending_free(cmd);
+}
+
+static int add_advertising_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_advertising *cp = cmd->param;
+
+ return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
}
static int add_advertising(struct sock *sk, struct hci_dev *hdev,
@@ -7943,7 +7996,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
struct adv_info *next_instance;
int err;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -7952,13 +8004,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
status);
- /* Enabling the experimental LL Privay support disables support for
- * advertising.
- */
- if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
- MGMT_STATUS_NOT_SUPPORTED);
-
if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -8051,25 +8096,19 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
/* We're good to go, update advertising data, parameters, and start
* advertising.
*/
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
data_len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- hci_req_init(&req, hdev);
-
- err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
-
- if (!err)
- err = hci_req_run(&req, add_advertising_complete);
+ cp->instance = schedule_instance;
- if (err < 0) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
- MGMT_STATUS_FAILED);
- mgmt_pending_remove(cmd);
- }
+ err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
+ add_advertising_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
unlock:
hci_dev_unlock(hdev);
@@ -8077,30 +8116,25 @@ unlock:
return err;
}
-static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
- struct mgmt_cp_add_ext_adv_params *cp;
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
struct mgmt_rp_add_ext_adv_params rp;
- struct adv_info *adv_instance;
+ struct adv_info *adv;
u32 flags;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
- if (!cmd)
- goto unlock;
-
- cp = cmd->param;
- adv_instance = hci_find_adv_instance(hdev, cp->instance);
- if (!adv_instance)
+ adv = hci_find_adv_instance(hdev, cp->instance);
+ if (!adv)
goto unlock;
rp.instance = cp->instance;
- rp.tx_power = adv_instance->tx_power;
+ rp.tx_power = adv->tx_power;
/* While we're at it, inform userspace of the available space for this
* advertisement, given the flags that will be used.
@@ -8109,39 +8143,44 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
- if (status) {
+ if (err) {
/* If this advertisement was previously advertising and we
* failed to update it, we signal that it has been removed and
* delete its structure
*/
- if (!adv_instance->pending)
+ if (!adv->pending)
mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
hci_remove_adv_instance(hdev, cp->instance);
mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status));
-
+ mgmt_status(err));
} else {
mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status), &rp, sizeof(rp));
+ mgmt_status(err), &rp, sizeof(rp));
}
unlock:
if (cmd)
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
+static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
+
+ return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
+}
+
static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
struct mgmt_cp_add_ext_adv_params *cp = data;
struct mgmt_rp_add_ext_adv_params rp;
struct mgmt_pending_cmd *cmd = NULL;
- struct adv_info *adv_instance;
- struct hci_request req;
u32 flags, min_interval, max_interval;
u16 timeout, duration;
u8 status;
@@ -8223,29 +8262,18 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
/* Submit request for advertising params if ext adv available */
if (ext_adv_capable(hdev)) {
- hci_req_init(&req, hdev);
- adv_instance = hci_find_adv_instance(hdev, cp->instance);
-
- /* Updating parameters of an active instance will return a
- * Command Disallowed error, so we must first disable the
- * instance if it is active.
- */
- if (!adv_instance->pending)
- __hci_req_disable_ext_adv_instance(&req, cp->instance);
-
- __hci_req_setup_ext_adv_instance(&req, cp->instance);
-
- err = hci_req_run(&req, add_ext_adv_params_complete);
-
- if (!err)
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
- hdev, data, data_len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
+ data, data_len);
if (!cmd) {
err = -ENOMEM;
hci_remove_adv_instance(hdev, cp->instance);
goto unlock;
}
+ err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
+ add_ext_adv_params_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
} else {
rp.instance = cp->instance;
rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
@@ -8262,6 +8290,49 @@ unlock:
return err;
}
+static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
+ struct mgmt_rp_add_advertising rp;
+
+ add_adv_complete(hdev, cmd->sk, cp->instance, err);
+
+ memset(&rp, 0, sizeof(rp));
+
+ rp.instance = cp->instance;
+
+ if (err)
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(err));
+ else
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(err), &rp, sizeof(rp));
+
+ mgmt_pending_free(cmd);
+}
+
+static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
+ int err;
+
+ if (ext_adv_capable(hdev)) {
+ err = hci_update_adv_data_sync(hdev, cp->instance);
+ if (err)
+ return err;
+
+ err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
+ if (err)
+ return err;
+
+ return hci_enable_ext_advertising_sync(hdev, cp->instance);
+ }
+
+ return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
+}
+
static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
@@ -8272,7 +8343,6 @@ static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
struct adv_info *adv_instance;
int err = 0;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
BT_DBG("%s", hdev->name);
@@ -8314,78 +8384,52 @@ static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
cp->data, cp->scan_rsp_len,
cp->data + cp->adv_data_len);
- /* We're good to go, update advertising data, parameters, and start
- * advertising.
- */
-
- hci_req_init(&req, hdev);
-
- hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
-
- if (ext_adv_capable(hdev)) {
- __hci_req_update_adv_data(&req, cp->instance);
- __hci_req_update_scan_rsp_data(&req, cp->instance);
- __hci_req_enable_ext_advertising(&req, cp->instance);
-
- } else {
- /* If using software rotation, determine next instance to use */
-
- if (hdev->cur_adv_instance == cp->instance) {
- /* If the currently advertised instance is being changed
- * then cancel the current advertising and schedule the
- * next instance. If there is only one instance then the
- * overridden advertising data will be visible right
- * away
- */
- cancel_adv_timeout(hdev);
-
- next_instance = hci_get_next_instance(hdev,
- cp->instance);
- if (next_instance)
- schedule_instance = next_instance->instance;
- } else if (!hdev->adv_instance_timeout) {
- /* Immediately advertise the new instance if no other
- * instance is currently being advertised.
- */
- schedule_instance = cp->instance;
- }
+ /* If using software rotation, determine next instance to use */
+ if (hdev->cur_adv_instance == cp->instance) {
+ /* If the currently advertised instance is being changed
+ * then cancel the current advertising and schedule the
+ * next instance. If there is only one instance then the
+ * overridden advertising data will be visible right
+ * away
+ */
+ cancel_adv_timeout(hdev);
- /* If the HCI_ADVERTISING flag is set or there is no instance to
- * be advertised then we have no HCI communication to make.
- * Simply return.
+ next_instance = hci_get_next_instance(hdev, cp->instance);
+ if (next_instance)
+ schedule_instance = next_instance->instance;
+ } else if (!hdev->adv_instance_timeout) {
+ /* Immediately advertise the new instance if no other
+ * instance is currently being advertised.
*/
- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
- !schedule_instance) {
- if (adv_instance->pending) {
- mgmt_advertising_added(sk, hdev, cp->instance);
- adv_instance->pending = false;
- }
- rp.instance = cp->instance;
- err = mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_ADD_EXT_ADV_DATA,
- MGMT_STATUS_SUCCESS, &rp,
- sizeof(rp));
- goto unlock;
- }
+ schedule_instance = cp->instance;
+ }
- err = __hci_req_schedule_adv_instance(&req, schedule_instance,
- true);
+ /* If the HCI_ADVERTISING flag is set or there is no instance to
+ * be advertised then we have no HCI communication to make.
+ * Simply return.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
+ if (adv_instance->pending) {
+ mgmt_advertising_added(sk, hdev, cp->instance);
+ adv_instance->pending = false;
+ }
+ rp.instance = cp->instance;
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+ goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
data_len);
if (!cmd) {
err = -ENOMEM;
goto clear_new_instance;
}
- if (!err)
- err = hci_req_run(&req, add_advertising_complete);
-
+ err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
+ add_ext_adv_data_complete);
if (err < 0) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
- MGMT_STATUS_FAILED);
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
goto clear_new_instance;
}
@@ -8408,54 +8452,53 @@ unlock:
return err;
}
-static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void remove_advertising_complete(struct hci_dev *hdev, void *data,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
- struct mgmt_cp_remove_advertising *cp;
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_remove_advertising *cp = cmd->param;
struct mgmt_rp_remove_advertising rp;
- bt_dev_dbg(hdev, "status %u", status);
+ bt_dev_dbg(hdev, "err %d", err);
- hci_dev_lock(hdev);
+ memset(&rp, 0, sizeof(rp));
+ rp.instance = cp->instance;
- /* A failure status here only means that we failed to disable
- * advertising. Otherwise, the advertising instance has been removed,
- * so report success.
- */
- cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
- if (!cmd)
- goto unlock;
+ if (err)
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(err));
+ else
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
- cp = cmd->param;
- rp.instance = cp->instance;
+ mgmt_pending_free(cmd);
+}
- mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
- &rp, sizeof(rp));
- mgmt_pending_remove(cmd);
+static int remove_advertising_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_remove_advertising *cp = cmd->param;
+ int err;
-unlock:
- hci_dev_unlock(hdev);
+ err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
+ if (err)
+ return err;
+
+ if (list_empty(&hdev->adv_instances))
+ err = hci_disable_advertising_sync(hdev);
+
+ return err;
}
static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
struct mgmt_cp_remove_advertising *cp = data;
- struct mgmt_rp_remove_advertising rp;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
- /* Enabling the experimental LL Privay support disables support for
- * advertising.
- */
- if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
- MGMT_STATUS_NOT_SUPPORTED);
-
hci_dev_lock(hdev);
if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
@@ -8479,44 +8522,17 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- hci_req_init(&req, hdev);
-
- /* If we use extended advertising, instance is disabled and removed */
- if (ext_adv_capable(hdev)) {
- __hci_req_disable_ext_adv_instance(&req, cp->instance);
- __hci_req_remove_ext_adv_instance(&req, cp->instance);
- }
-
- hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
-
- if (list_empty(&hdev->adv_instances))
- __hci_req_disable_advertising(&req);
-
- /* If no HCI commands have been collected so far or the HCI_ADVERTISING
- * flag is set or the device isn't powered then we have no HCI
- * communication to make. Simply return.
- */
- if (skb_queue_empty(&req.cmd_q) ||
- !hdev_is_powered(hdev) ||
- hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
- hci_req_purge(&req);
- rp.instance = cp->instance;
- err = mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_REMOVE_ADVERTISING,
- MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
+ cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
data_len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- err = hci_req_run(&req, remove_advertising_complete);
+ err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
+ remove_advertising_complete);
if (err < 0)
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
unlock:
hci_dev_unlock(hdev);
@@ -8758,31 +8774,6 @@ void mgmt_index_removed(struct hci_dev *hdev)
HCI_MGMT_EXT_INDEX_EVENTS);
}
-/* This function requires the caller holds hdev->lock */
-static void restart_le_actions(struct hci_dev *hdev)
-{
- struct hci_conn_params *p;
-
- list_for_each_entry(p, &hdev->le_conn_params, list) {
- /* Needed for AUTO_OFF case where might not "really"
- * have been powered off.
- */
- list_del_init(&p->action);
-
- switch (p->auto_connect) {
- case HCI_AUTO_CONN_DIRECT:
- case HCI_AUTO_CONN_ALWAYS:
- list_add(&p->action, &hdev->pend_le_conns);
- break;
- case HCI_AUTO_CONN_REPORT:
- list_add(&p->action, &hdev->pend_le_reports);
- break;
- default:
- break;
- }
- }
-}
-
void mgmt_power_on(struct hci_dev *hdev, int err)
{
struct cmd_lookup match = { NULL, hdev };
@@ -8793,7 +8784,7 @@ void mgmt_power_on(struct hci_dev *hdev, int err)
if (!err) {
restart_le_actions(hdev);
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
}
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
@@ -9349,74 +9340,6 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
sock_put(match.sk);
}
-static void clear_eir(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_write_eir cp;
-
- if (!lmp_ext_inq_capable(hdev))
- return;
-
- memset(hdev->eir, 0, sizeof(hdev->eir));
-
- memset(&cp, 0, sizeof(cp));
-
- hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
-}
-
-void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
-{
- struct cmd_lookup match = { NULL, hdev };
- struct hci_request req;
- bool changed = false;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
-
- if (enable && hci_dev_test_and_clear_flag(hdev,
- HCI_SSP_ENABLED)) {
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
- new_settings(hdev, NULL);
- }
-
- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
- &mgmt_err);
- return;
- }
-
- if (enable) {
- changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
- } else {
- changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
- }
-
- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
-
- if (changed)
- new_settings(hdev, match.sk);
-
- if (match.sk)
- sock_put(match.sk);
-
- hci_req_init(&req, hdev);
-
- if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
- if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
- hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
- sizeof(enable), &enable);
- __hci_req_update_eir(&req);
- } else {
- clear_eir(&req);
- }
-
- hci_req_run(&req, NULL);
-}
-
static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
{
struct cmd_lookup *match = data;
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
index 0d0a6d77b9e8..83875f2a0604 100644
--- a/net/bluetooth/mgmt_util.c
+++ b/net/bluetooth/mgmt_util.c
@@ -227,7 +227,7 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
}
}
-struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
struct hci_dev *hdev,
void *data, u16 len)
{
@@ -251,6 +251,19 @@ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
cmd->sk = sk;
sock_hold(sk);
+ return cmd;
+}
+
+struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+ struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_pending_cmd *cmd;
+
+ cmd = mgmt_pending_new(sk, opcode, hdev, data, len);
+ if (!cmd)
+ return NULL;
+
list_add(&cmd->list, &hdev->mgmt_pending);
return cmd;
diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
index 6559f189213c..63b965eaaaac 100644
--- a/net/bluetooth/mgmt_util.h
+++ b/net/bluetooth/mgmt_util.h
@@ -27,6 +27,7 @@ struct mgmt_pending_cmd {
void *param;
size_t param_len;
struct sock *sk;
+ struct sk_buff *skb;
void *user_data;
int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
};
@@ -49,5 +50,8 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
struct hci_dev *hdev,
void *data, u16 len);
+struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
+ struct hci_dev *hdev,
+ void *data, u16 len);
void mgmt_pending_free(struct mgmt_pending_cmd *cmd);
void mgmt_pending_remove(struct mgmt_pending_cmd *cmd);
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index 255cffa554ee..1122097e1e49 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -93,7 +93,7 @@ struct msft_data {
struct list_head handle_map;
__u16 pending_add_handle;
__u16 pending_remove_handle;
- __u8 reregistering;
+ __u8 resuming;
__u8 suspending;
__u8 filter_enabled;
};
@@ -156,7 +156,6 @@ failed:
return false;
}
-/* This function requires the caller holds hdev->lock */
static void reregister_monitor(struct hci_dev *hdev, int handle)
{
struct adv_monitor *monitor;
@@ -166,9 +165,9 @@ static void reregister_monitor(struct hci_dev *hdev, int handle)
while (1) {
monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
if (!monitor) {
- /* All monitors have been reregistered */
- msft->reregistering = false;
- hci_update_background_scan(hdev);
+ /* All monitors have been resumed */
+ msft->resuming = false;
+ hci_update_passive_scan(hdev);
return;
}
@@ -185,67 +184,317 @@ static void reregister_monitor(struct hci_dev *hdev, int handle)
}
}
-/* This function requires the caller holds hdev->lock */
-static void remove_monitor_on_suspend(struct hci_dev *hdev, int handle)
+/* is_mgmt = true matches the handle exposed to userspace via mgmt.
+ * is_mgmt = false matches the handle used by the msft controller.
+ * This function requires the caller holds hdev->lock
+ */
+static struct msft_monitor_advertisement_handle_data *msft_find_handle_data
+ (struct hci_dev *hdev, u16 handle, bool is_mgmt)
+{
+ struct msft_monitor_advertisement_handle_data *entry;
+ struct msft_data *msft = hdev->msft_data;
+
+ list_for_each_entry(entry, &msft->handle_map, list) {
+ if (is_mgmt && entry->mgmt_handle == handle)
+ return entry;
+ if (!is_mgmt && entry->msft_handle == handle)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev,
+ u8 status, u16 opcode,
+ struct sk_buff *skb)
+{
+ struct msft_rp_le_monitor_advertisement *rp;
+ struct adv_monitor *monitor;
+ struct msft_monitor_advertisement_handle_data *handle_data;
+ struct msft_data *msft = hdev->msft_data;
+
+ hci_dev_lock(hdev);
+
+ monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle);
+ if (!monitor) {
+ bt_dev_err(hdev, "msft add advmon: monitor %u is not found!",
+ msft->pending_add_handle);
+ status = HCI_ERROR_UNSPECIFIED;
+ goto unlock;
+ }
+
+ if (status)
+ goto unlock;
+
+ rp = (struct msft_rp_le_monitor_advertisement *)skb->data;
+ if (skb->len < sizeof(*rp)) {
+ status = HCI_ERROR_UNSPECIFIED;
+ goto unlock;
+ }
+
+ handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL);
+ if (!handle_data) {
+ status = HCI_ERROR_UNSPECIFIED;
+ goto unlock;
+ }
+
+ handle_data->mgmt_handle = monitor->handle;
+ handle_data->msft_handle = rp->handle;
+ INIT_LIST_HEAD(&handle_data->list);
+ list_add(&handle_data->list, &msft->handle_map);
+
+ monitor->state = ADV_MONITOR_STATE_OFFLOADED;
+
+unlock:
+ if (status && monitor)
+ hci_free_adv_monitor(hdev, monitor);
+
+ hci_dev_unlock(hdev);
+
+ if (!msft->resuming)
+ hci_add_adv_patterns_monitor_complete(hdev, status);
+}
+
+static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
+ u8 status, u16 opcode,
+ struct sk_buff *skb)
{
+ struct msft_cp_le_cancel_monitor_advertisement *cp;
+ struct msft_rp_le_cancel_monitor_advertisement *rp;
struct adv_monitor *monitor;
+ struct msft_monitor_advertisement_handle_data *handle_data;
struct msft_data *msft = hdev->msft_data;
int err;
+ bool pending;
- while (1) {
- monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
- if (!monitor) {
- /* All monitors have been removed */
- msft->suspending = false;
- hci_update_background_scan(hdev);
+ if (status)
+ goto done;
+
+ rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data;
+ if (skb->len < sizeof(*rp)) {
+ status = HCI_ERROR_UNSPECIFIED;
+ goto done;
+ }
+
+ hci_dev_lock(hdev);
+
+ cp = hci_sent_cmd_data(hdev, hdev->msft_opcode);
+ handle_data = msft_find_handle_data(hdev, cp->handle, false);
+
+ if (handle_data) {
+ monitor = idr_find(&hdev->adv_monitors_idr,
+ handle_data->mgmt_handle);
+
+ if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED)
+ monitor->state = ADV_MONITOR_STATE_REGISTERED;
+
+ /* Do not free the monitor if it is being removed due to
+ * suspend. It will be re-monitored on resume.
+ */
+ if (monitor && !msft->suspending)
+ hci_free_adv_monitor(hdev, monitor);
+
+ list_del(&handle_data->list);
+ kfree(handle_data);
+ }
+
+ /* If remove all monitors is required, we need to continue the process
+ * here because the earlier it was paused when waiting for the
+ * response from controller.
+ */
+ if (msft->pending_remove_handle == 0) {
+ pending = hci_remove_all_adv_monitor(hdev, &err);
+ if (pending) {
+ hci_dev_unlock(hdev);
return;
}
- msft->pending_remove_handle = (u16)handle;
- err = __msft_remove_monitor(hdev, monitor, handle);
+ if (err)
+ status = HCI_ERROR_UNSPECIFIED;
+ }
- /* If success, return and wait for monitor removed callback */
- if (!err)
- return;
+ hci_dev_unlock(hdev);
+
+done:
+ if (!msft->suspending)
+ hci_remove_adv_monitor_complete(hdev, status);
+}
+
+static int msft_remove_monitor_sync(struct hci_dev *hdev,
+ struct adv_monitor *monitor)
+{
+ struct msft_cp_le_cancel_monitor_advertisement cp;
+ struct msft_monitor_advertisement_handle_data *handle_data;
+ struct sk_buff *skb;
+ u8 status;
+
+ handle_data = msft_find_handle_data(hdev, monitor->handle, true);
+
+ /* If no matched handle, just remove without telling controller */
+ if (!handle_data)
+ return -ENOENT;
+
+ cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT;
+ cp.handle = handle_data->msft_handle;
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ status = skb->data[0];
+ skb_pull(skb, 1);
+
+ msft_le_cancel_monitor_advertisement_cb(hdev, status, hdev->msft_opcode,
+ skb);
+
+ return status;
+}
+
+/* This function requires the caller holds hci_req_sync_lock */
+int msft_suspend_sync(struct hci_dev *hdev)
+{
+ struct msft_data *msft = hdev->msft_data;
+ struct adv_monitor *monitor;
+ int handle = 0;
+
+ if (!msft || !msft_monitor_supported(hdev))
+ return 0;
+
+ msft->suspending = true;
+
+ while (1) {
+ monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
+ if (!monitor)
+ break;
+
+ msft_remove_monitor_sync(hdev, monitor);
- /* Otherwise free the monitor and keep removing */
- hci_free_adv_monitor(hdev, monitor);
handle++;
}
+
+ /* All monitors have been removed */
+ msft->suspending = false;
+
+ return 0;
}
-/* This function requires the caller holds hdev->lock */
-void msft_suspend(struct hci_dev *hdev)
+static bool msft_monitor_rssi_valid(struct adv_monitor *monitor)
{
- struct msft_data *msft = hdev->msft_data;
+ struct adv_rssi_thresholds *r = &monitor->rssi;
- if (!msft)
- return;
+ if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
+ r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX ||
+ r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
+ r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX)
+ return false;
- if (msft_monitor_supported(hdev)) {
- msft->suspending = true;
- /* Quitely remove all monitors on suspend to avoid waking up
- * the system.
- */
- remove_monitor_on_suspend(hdev, 0);
+ /* High_threshold_timeout is not supported,
+ * once high_threshold is reached, events are immediately reported.
+ */
+ if (r->high_threshold_timeout != 0)
+ return false;
+
+ if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX)
+ return false;
+
+ /* Sampling period from 0x00 to 0xFF are all allowed */
+ return true;
+}
+
+static bool msft_monitor_pattern_valid(struct adv_monitor *monitor)
+{
+ return msft_monitor_rssi_valid(monitor);
+ /* No additional check needed for pattern-based monitor */
+}
+
+static int msft_add_monitor_sync(struct hci_dev *hdev,
+ struct adv_monitor *monitor)
+{
+ struct msft_cp_le_monitor_advertisement *cp;
+ struct msft_le_monitor_advertisement_pattern_data *pattern_data;
+ struct msft_le_monitor_advertisement_pattern *pattern;
+ struct adv_pattern *entry;
+ size_t total_size = sizeof(*cp) + sizeof(*pattern_data);
+ ptrdiff_t offset = 0;
+ u8 pattern_count = 0;
+ struct sk_buff *skb;
+ u8 status;
+
+ if (!msft_monitor_pattern_valid(monitor))
+ return -EINVAL;
+
+ list_for_each_entry(entry, &monitor->patterns, list) {
+ pattern_count++;
+ total_size += sizeof(*pattern) + entry->length;
}
+
+ cp = kmalloc(total_size, GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
+ cp->rssi_high = monitor->rssi.high_threshold;
+ cp->rssi_low = monitor->rssi.low_threshold;
+ cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout;
+ cp->rssi_sampling_period = monitor->rssi.sampling_period;
+
+ cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN;
+
+ pattern_data = (void *)cp->data;
+ pattern_data->count = pattern_count;
+
+ list_for_each_entry(entry, &monitor->patterns, list) {
+ pattern = (void *)(pattern_data->data + offset);
+ /* the length also includes data_type and offset */
+ pattern->length = entry->length + 2;
+ pattern->data_type = entry->ad_type;
+ pattern->start_byte = entry->offset;
+ memcpy(pattern->pattern, entry->value, entry->length);
+ offset += sizeof(*pattern) + entry->length;
+ }
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp,
+ HCI_CMD_TIMEOUT);
+ kfree(cp);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ status = skb->data[0];
+ skb_pull(skb, 1);
+
+ msft_le_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, skb);
+
+ return status;
}
-/* This function requires the caller holds hdev->lock */
-void msft_resume(struct hci_dev *hdev)
+/* This function requires the caller holds hci_req_sync_lock */
+int msft_resume_sync(struct hci_dev *hdev)
{
struct msft_data *msft = hdev->msft_data;
+ struct adv_monitor *monitor;
+ int handle = 0;
- if (!msft)
- return;
+ if (!msft || !msft_monitor_supported(hdev))
+ return 0;
- if (msft_monitor_supported(hdev)) {
- msft->reregistering = true;
- /* Monitors are removed on suspend, so we need to add all
- * monitors on resume.
- */
- reregister_monitor(hdev, 0);
+ msft->resuming = true;
+
+ while (1) {
+ monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
+ if (!monitor)
+ break;
+
+ msft_add_monitor_sync(hdev, monitor);
+
+ handle++;
}
+
+ /* All monitors have been resumed */
+ msft->resuming = false;
+
+ return 0;
}
void msft_do_open(struct hci_dev *hdev)
@@ -275,7 +524,7 @@ void msft_do_open(struct hci_dev *hdev)
}
if (msft_monitor_supported(hdev)) {
- msft->reregistering = true;
+ msft->resuming = true;
msft_set_filter_enable(hdev, true);
/* Monitors get removed on power off, so we need to explicitly
* tell the controller to re-monitor.
@@ -381,151 +630,6 @@ __u64 msft_get_features(struct hci_dev *hdev)
return msft ? msft->features : 0;
}
-/* is_mgmt = true matches the handle exposed to userspace via mgmt.
- * is_mgmt = false matches the handle used by the msft controller.
- * This function requires the caller holds hdev->lock
- */
-static struct msft_monitor_advertisement_handle_data *msft_find_handle_data
- (struct hci_dev *hdev, u16 handle, bool is_mgmt)
-{
- struct msft_monitor_advertisement_handle_data *entry;
- struct msft_data *msft = hdev->msft_data;
-
- list_for_each_entry(entry, &msft->handle_map, list) {
- if (is_mgmt && entry->mgmt_handle == handle)
- return entry;
- if (!is_mgmt && entry->msft_handle == handle)
- return entry;
- }
-
- return NULL;
-}
-
-static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev,
- u8 status, u16 opcode,
- struct sk_buff *skb)
-{
- struct msft_rp_le_monitor_advertisement *rp;
- struct adv_monitor *monitor;
- struct msft_monitor_advertisement_handle_data *handle_data;
- struct msft_data *msft = hdev->msft_data;
-
- hci_dev_lock(hdev);
-
- monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle);
- if (!monitor) {
- bt_dev_err(hdev, "msft add advmon: monitor %u is not found!",
- msft->pending_add_handle);
- status = HCI_ERROR_UNSPECIFIED;
- goto unlock;
- }
-
- if (status)
- goto unlock;
-
- rp = (struct msft_rp_le_monitor_advertisement *)skb->data;
- if (skb->len < sizeof(*rp)) {
- status = HCI_ERROR_UNSPECIFIED;
- goto unlock;
- }
-
- handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL);
- if (!handle_data) {
- status = HCI_ERROR_UNSPECIFIED;
- goto unlock;
- }
-
- handle_data->mgmt_handle = monitor->handle;
- handle_data->msft_handle = rp->handle;
- INIT_LIST_HEAD(&handle_data->list);
- list_add(&handle_data->list, &msft->handle_map);
-
- monitor->state = ADV_MONITOR_STATE_OFFLOADED;
-
-unlock:
- if (status && monitor)
- hci_free_adv_monitor(hdev, monitor);
-
- /* If in restart/reregister sequence, keep registering. */
- if (msft->reregistering)
- reregister_monitor(hdev, msft->pending_add_handle + 1);
-
- hci_dev_unlock(hdev);
-
- if (!msft->reregistering)
- hci_add_adv_patterns_monitor_complete(hdev, status);
-}
-
-static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
- u8 status, u16 opcode,
- struct sk_buff *skb)
-{
- struct msft_cp_le_cancel_monitor_advertisement *cp;
- struct msft_rp_le_cancel_monitor_advertisement *rp;
- struct adv_monitor *monitor;
- struct msft_monitor_advertisement_handle_data *handle_data;
- struct msft_data *msft = hdev->msft_data;
- int err;
- bool pending;
-
- if (status)
- goto done;
-
- rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data;
- if (skb->len < sizeof(*rp)) {
- status = HCI_ERROR_UNSPECIFIED;
- goto done;
- }
-
- hci_dev_lock(hdev);
-
- cp = hci_sent_cmd_data(hdev, hdev->msft_opcode);
- handle_data = msft_find_handle_data(hdev, cp->handle, false);
-
- if (handle_data) {
- monitor = idr_find(&hdev->adv_monitors_idr,
- handle_data->mgmt_handle);
-
- if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED)
- monitor->state = ADV_MONITOR_STATE_REGISTERED;
-
- /* Do not free the monitor if it is being removed due to
- * suspend. It will be re-monitored on resume.
- */
- if (monitor && !msft->suspending)
- hci_free_adv_monitor(hdev, monitor);
-
- list_del(&handle_data->list);
- kfree(handle_data);
- }
-
- /* If in suspend/remove sequence, keep removing. */
- if (msft->suspending)
- remove_monitor_on_suspend(hdev,
- msft->pending_remove_handle + 1);
-
- /* If remove all monitors is required, we need to continue the process
- * here because the earlier it was paused when waiting for the
- * response from controller.
- */
- if (msft->pending_remove_handle == 0) {
- pending = hci_remove_all_adv_monitor(hdev, &err);
- if (pending) {
- hci_dev_unlock(hdev);
- return;
- }
-
- if (err)
- status = HCI_ERROR_UNSPECIFIED;
- }
-
- hci_dev_unlock(hdev);
-
-done:
- if (!msft->suspending)
- hci_remove_adv_monitor_complete(hdev, status);
-}
-
static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev,
u8 status, u16 opcode,
struct sk_buff *skb)
@@ -560,35 +664,6 @@ static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static bool msft_monitor_rssi_valid(struct adv_monitor *monitor)
-{
- struct adv_rssi_thresholds *r = &monitor->rssi;
-
- if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
- r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX ||
- r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
- r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX)
- return false;
-
- /* High_threshold_timeout is not supported,
- * once high_threshold is reached, events are immediately reported.
- */
- if (r->high_threshold_timeout != 0)
- return false;
-
- if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX)
- return false;
-
- /* Sampling period from 0x00 to 0xFF are all allowed */
- return true;
-}
-
-static bool msft_monitor_pattern_valid(struct adv_monitor *monitor)
-{
- return msft_monitor_rssi_valid(monitor);
- /* No additional check needed for pattern-based monitor */
-}
-
/* This function requires the caller holds hdev->lock */
static int __msft_add_monitor_pattern(struct hci_dev *hdev,
struct adv_monitor *monitor)
@@ -656,7 +731,7 @@ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor)
if (!msft)
return -EOPNOTSUPP;
- if (msft->reregistering || msft->suspending)
+ if (msft->resuming || msft->suspending)
return -EBUSY;
return __msft_add_monitor_pattern(hdev, monitor);
@@ -700,7 +775,7 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
if (!msft)
return -EOPNOTSUPP;
- if (msft->reregistering || msft->suspending)
+ if (msft->resuming || msft->suspending)
return -EBUSY;
return __msft_remove_monitor(hdev, monitor, handle);
diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h
index 59c6e081c789..b59b63dc0ea8 100644
--- a/net/bluetooth/msft.h
+++ b/net/bluetooth/msft.h
@@ -24,8 +24,8 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
u16 handle);
void msft_req_add_set_filter_enable(struct hci_request *req, bool enable);
int msft_set_filter_enable(struct hci_dev *hdev, bool enable);
-void msft_suspend(struct hci_dev *hdev);
-void msft_resume(struct hci_dev *hdev);
+int msft_suspend_sync(struct hci_dev *hdev);
+int msft_resume_sync(struct hci_dev *hdev);
bool msft_curve_validity(struct hci_dev *hdev);
#else
@@ -61,8 +61,15 @@ static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable)
return -EOPNOTSUPP;
}
-static inline void msft_suspend(struct hci_dev *hdev) {}
-static inline void msft_resume(struct hci_dev *hdev) {}
+static inline int msft_suspend_sync(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int msft_resume_sync(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
static inline bool msft_curve_validity(struct hci_dev *hdev)
{
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index c1183fef1f21..3915832a03c2 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -397,10 +397,10 @@ static int find_portno(struct net_bridge *br)
if (!inuse)
return -ENOMEM;
- set_bit(0, inuse); /* zero is reserved */
- list_for_each_entry(p, &br->port_list, list) {
- set_bit(p->port_no, inuse);
- }
+ __set_bit(0, inuse); /* zero is reserved */
+ list_for_each_entry(p, &br->port_list, list)
+ __set_bit(p->port_no, inuse);
+
index = find_first_zero_bit(inuse, BR_MAX_PORTS);
bitmap_free(inuse);
@@ -525,8 +525,8 @@ static void br_set_gso_limits(struct net_bridge *br)
gso_max_size = min(gso_max_size, p->dev->gso_max_size);
gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
}
- br->dev->gso_max_size = gso_max_size;
- br->dev->gso_max_segs = gso_max_segs;
+ netif_set_gso_max_size(br->dev, gso_max_size);
+ netif_set_gso_max_segs(br->dev, gso_max_segs);
}
/*
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index d9a89ddd0331..159590d5c2af 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -36,15 +36,14 @@ static ssize_t store_bridge_parm(struct device *d,
struct net_bridge *br = to_bridge(d);
struct netlink_ext_ack extack = {0};
unsigned long val;
- char *endp;
int err;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
- val = simple_strtoul(buf, &endp, 0);
- if (endp == buf)
- return -EINVAL;
+ err = kstrtoul(buf, 0, &val);
+ if (err != 0)
+ return err;
if (!rtnl_trylock())
return restart_syscall();
diff --git a/net/core/Makefile b/net/core/Makefile
index 4268846f2f47..a8e4f737692b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,7 +11,9 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
- fib_notifier.o xdp.o flow_offload.o
+ fib_notifier.o xdp.o flow_offload.o gro.o
+
+obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 15ac064b5562..823917de0d2b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -153,16 +153,10 @@
#include "net-sysfs.h"
-#define MAX_GRO_SKBS 8
-
-/* This should be increased if a protocol with a bigger head is added. */
-#define GRO_MAX_HEAD (MAX_HEADER + 128)
static DEFINE_SPINLOCK(ptype_lock);
-static DEFINE_SPINLOCK(offload_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
struct list_head ptype_all __read_mostly; /* Taps */
-static struct list_head offload_base __read_mostly;
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_info(unsigned long val,
@@ -604,84 +598,6 @@ void dev_remove_pack(struct packet_type *pt)
EXPORT_SYMBOL(dev_remove_pack);
-/**
- * dev_add_offload - register offload handlers
- * @po: protocol offload declaration
- *
- * Add protocol offload handlers to the networking stack. The passed
- * &proto_offload is linked into kernel lists and may not be freed until
- * it has been removed from the kernel lists.
- *
- * This call does not sleep therefore it can not
- * guarantee all CPU's that are in middle of receiving packets
- * will see the new offload handlers (until the next received packet).
- */
-void dev_add_offload(struct packet_offload *po)
-{
- struct packet_offload *elem;
-
- spin_lock(&offload_lock);
- list_for_each_entry(elem, &offload_base, list) {
- if (po->priority < elem->priority)
- break;
- }
- list_add_rcu(&po->list, elem->list.prev);
- spin_unlock(&offload_lock);
-}
-EXPORT_SYMBOL(dev_add_offload);
-
-/**
- * __dev_remove_offload - remove offload handler
- * @po: packet offload declaration
- *
- * Remove a protocol offload handler that was previously added to the
- * kernel offload handlers by dev_add_offload(). The passed &offload_type
- * is removed from the kernel lists and can be freed or reused once this
- * function returns.
- *
- * The packet type might still be in use by receivers
- * and must not be freed until after all the CPU's have gone
- * through a quiescent state.
- */
-static void __dev_remove_offload(struct packet_offload *po)
-{
- struct list_head *head = &offload_base;
- struct packet_offload *po1;
-
- spin_lock(&offload_lock);
-
- list_for_each_entry(po1, head, list) {
- if (po == po1) {
- list_del_rcu(&po->list);
- goto out;
- }
- }
-
- pr_warn("dev_remove_offload: %p not found\n", po);
-out:
- spin_unlock(&offload_lock);
-}
-
-/**
- * dev_remove_offload - remove packet offload handler
- * @po: packet offload declaration
- *
- * Remove a packet offload handler that was previously added to the kernel
- * offload handlers by dev_add_offload(). The passed &offload_type is
- * removed from the kernel lists and can be freed or reused once this
- * function returns.
- *
- * This call sleeps to guarantee that no CPU is looking at the packet
- * type after return.
- */
-void dev_remove_offload(struct packet_offload *po)
-{
- __dev_remove_offload(po);
-
- synchronize_net();
-}
-EXPORT_SYMBOL(dev_remove_offload);
-
/*******************************************************************************
*
* Device Interface Subroutines
@@ -1461,6 +1377,7 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
int ret;
ASSERT_RTNL();
+ dev_addr_check(dev);
if (!netif_device_present(dev)) {
/* may be detached because parent is runtime-suspended */
@@ -3315,40 +3232,6 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
return __vlan_get_protocol(skb, type, depth);
}
-/**
- * skb_mac_gso_segment - mac layer segmentation handler.
- * @skb: buffer to segment
- * @features: features for the output path (see dev->features)
- */
-struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
- netdev_features_t features)
-{
- struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
- struct packet_offload *ptype;
- int vlan_depth = skb->mac_len;
- __be16 type = skb_network_protocol(skb, &vlan_depth);
-
- if (unlikely(!type))
- return ERR_PTR(-EINVAL);
-
- __skb_pull(skb, vlan_depth);
-
- rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
- if (ptype->type == type && ptype->callbacks.gso_segment) {
- segs = ptype->callbacks.gso_segment(skb, features);
- break;
- }
- }
- rcu_read_unlock();
-
- __skb_push(skb, skb->data - skb_mac_header(skb));
-
- return segs;
-}
-EXPORT_SYMBOL(skb_mac_gso_segment);
-
-
/* openvswitch calls this on rx path, so we need a different check.
*/
static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
@@ -3513,7 +3396,7 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
{
u16 gso_segs = skb_shinfo(skb)->gso_segs;
- if (gso_segs > dev->gso_max_segs)
+ if (gso_segs > READ_ONCE(dev->gso_max_segs))
return features & ~NETIF_F_GSO_MASK;
if (!skb_shinfo(skb)->gso_type) {
@@ -4320,8 +4203,6 @@ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
int dev_rx_weight __read_mostly = 64;
int dev_tx_weight __read_mostly = 64;
-/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
-int gro_normal_batch __read_mostly = 8;
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
@@ -5664,7 +5545,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
return ret;
}
-static void netif_receive_skb_list_internal(struct list_head *head)
+void netif_receive_skb_list_internal(struct list_head *head)
{
struct sk_buff *skb, *next;
struct list_head sublist;
@@ -5842,550 +5723,6 @@ static void flush_all_backlogs(void)
cpus_read_unlock();
}
-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static void gro_normal_list(struct napi_struct *napi)
-{
- if (!napi->rx_count)
- return;
- netif_receive_skb_list_internal(&napi->rx_list);
- INIT_LIST_HEAD(&napi->rx_list);
- napi->rx_count = 0;
-}
-
-/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
- * pass the whole batch up to the stack.
- */
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
-{
- list_add_tail(&skb->list, &napi->rx_list);
- napi->rx_count += segs;
- if (napi->rx_count >= gro_normal_batch)
- gro_normal_list(napi);
-}
-
-static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
-{
- struct packet_offload *ptype;
- __be16 type = skb->protocol;
- struct list_head *head = &offload_base;
- int err = -ENOENT;
-
- BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
-
- if (NAPI_GRO_CB(skb)->count == 1) {
- skb_shinfo(skb)->gso_size = 0;
- goto out;
- }
-
- rcu_read_lock();
- list_for_each_entry_rcu(ptype, head, list) {
- if (ptype->type != type || !ptype->callbacks.gro_complete)
- continue;
-
- err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
- ipv6_gro_complete, inet_gro_complete,
- skb, 0);
- break;
- }
- rcu_read_unlock();
-
- if (err) {
- WARN_ON(&ptype->list == head);
- kfree_skb(skb);
- return;
- }
-
-out:
- gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
-}
-
-static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
- bool flush_old)
-{
- struct list_head *head = &napi->gro_hash[index].list;
- struct sk_buff *skb, *p;
-
- list_for_each_entry_safe_reverse(skb, p, head, list) {
- if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
- return;
- skb_list_del_init(skb);
- napi_gro_complete(napi, skb);
- napi->gro_hash[index].count--;
- }
-
- if (!napi->gro_hash[index].count)
- __clear_bit(index, &napi->gro_bitmask);
-}
-
-/* napi->gro_hash[].list contains packets ordered by age.
- * youngest packets at the head of it.
- * Complete skbs in reverse order to reduce latencies.
- */
-void napi_gro_flush(struct napi_struct *napi, bool flush_old)
-{
- unsigned long bitmask = napi->gro_bitmask;
- unsigned int i, base = ~0U;
-
- while ((i = ffs(bitmask)) != 0) {
- bitmask >>= i;
- base += i;
- __napi_gro_flush_chain(napi, base, flush_old);
- }
-}
-EXPORT_SYMBOL(napi_gro_flush);
-
-static void gro_list_prepare(const struct list_head *head,
- const struct sk_buff *skb)
-{
- unsigned int maclen = skb->dev->hard_header_len;
- u32 hash = skb_get_hash_raw(skb);
- struct sk_buff *p;
-
- list_for_each_entry(p, head, list) {
- unsigned long diffs;
-
- NAPI_GRO_CB(p)->flush = 0;
-
- if (hash != skb_get_hash_raw(p)) {
- NAPI_GRO_CB(p)->same_flow = 0;
- continue;
- }
-
- diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
- diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
- if (skb_vlan_tag_present(p))
- diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
- diffs |= skb_metadata_differs(p, skb);
- if (maclen == ETH_HLEN)
- diffs |= compare_ether_header(skb_mac_header(p),
- skb_mac_header(skb));
- else if (!diffs)
- diffs = memcmp(skb_mac_header(p),
- skb_mac_header(skb),
- maclen);
-
- /* in most common scenarions 'slow_gro' is 0
- * otherwise we are already on some slower paths
- * either skip all the infrequent tests altogether or
- * avoid trying too hard to skip each of them individually
- */
- if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
-#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- struct tc_skb_ext *skb_ext;
- struct tc_skb_ext *p_ext;
-#endif
-
- diffs |= p->sk != skb->sk;
- diffs |= skb_metadata_dst_cmp(p, skb);
- diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
-
-#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- skb_ext = skb_ext_find(skb, TC_SKB_EXT);
- p_ext = skb_ext_find(p, TC_SKB_EXT);
-
- diffs |= (!!p_ext) ^ (!!skb_ext);
- if (!diffs && unlikely(skb_ext))
- diffs |= p_ext->chain ^ skb_ext->chain;
-#endif
- }
-
- NAPI_GRO_CB(p)->same_flow = !diffs;
- }
-}
-
-static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
-{
- const struct skb_shared_info *pinfo = skb_shinfo(skb);
- const skb_frag_t *frag0 = &pinfo->frags[0];
-
- NAPI_GRO_CB(skb)->data_offset = 0;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
-
- if (!skb_headlen(skb) && pinfo->nr_frags &&
- !PageHighMem(skb_frag_page(frag0)) &&
- (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
- NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
- NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
- skb_frag_size(frag0),
- skb->end - skb->tail);
- }
-}
-
-static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
-{
- struct skb_shared_info *pinfo = skb_shinfo(skb);
-
- BUG_ON(skb->end - skb->tail < grow);
-
- memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
-
- skb->data_len -= grow;
- skb->tail += grow;
-
- skb_frag_off_add(&pinfo->frags[0], grow);
- skb_frag_size_sub(&pinfo->frags[0], grow);
-
- if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
- skb_frag_unref(skb, 0);
- memmove(pinfo->frags, pinfo->frags + 1,
- --pinfo->nr_frags * sizeof(pinfo->frags[0]));
- }
-}
-
-static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
-{
- struct sk_buff *oldest;
-
- oldest = list_last_entry(head, struct sk_buff, list);
-
- /* We are called with head length >= MAX_GRO_SKBS, so this is
- * impossible.
- */
- if (WARN_ON_ONCE(!oldest))
- return;
-
- /* Do not adjust napi->gro_hash[].count, caller is adding a new
- * SKB to the chain.
- */
- skb_list_del_init(oldest);
- napi_gro_complete(napi, oldest);
-}
-
-static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
-{
- u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
- struct gro_list *gro_list = &napi->gro_hash[bucket];
- struct list_head *head = &offload_base;
- struct packet_offload *ptype;
- __be16 type = skb->protocol;
- struct sk_buff *pp = NULL;
- enum gro_result ret;
- int same_flow;
- int grow;
-
- if (netif_elide_gro(skb->dev))
- goto normal;
-
- gro_list_prepare(&gro_list->list, skb);
-
- rcu_read_lock();
- list_for_each_entry_rcu(ptype, head, list) {
- if (ptype->type != type || !ptype->callbacks.gro_receive)
- continue;
-
- skb_set_network_header(skb, skb_gro_offset(skb));
- skb_reset_mac_len(skb);
- NAPI_GRO_CB(skb)->same_flow = 0;
- NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
- NAPI_GRO_CB(skb)->free = 0;
- NAPI_GRO_CB(skb)->encap_mark = 0;
- NAPI_GRO_CB(skb)->recursion_counter = 0;
- NAPI_GRO_CB(skb)->is_fou = 0;
- NAPI_GRO_CB(skb)->is_atomic = 1;
- NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
-
- /* Setup for GRO checksum validation */
- switch (skb->ip_summed) {
- case CHECKSUM_COMPLETE:
- NAPI_GRO_CB(skb)->csum = skb->csum;
- NAPI_GRO_CB(skb)->csum_valid = 1;
- NAPI_GRO_CB(skb)->csum_cnt = 0;
- break;
- case CHECKSUM_UNNECESSARY:
- NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
- NAPI_GRO_CB(skb)->csum_valid = 0;
- break;
- default:
- NAPI_GRO_CB(skb)->csum_cnt = 0;
- NAPI_GRO_CB(skb)->csum_valid = 0;
- }
-
- pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
- ipv6_gro_receive, inet_gro_receive,
- &gro_list->list, skb);
- break;
- }
- rcu_read_unlock();
-
- if (&ptype->list == head)
- goto normal;
-
- if (PTR_ERR(pp) == -EINPROGRESS) {
- ret = GRO_CONSUMED;
- goto ok;
- }
-
- same_flow = NAPI_GRO_CB(skb)->same_flow;
- ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
-
- if (pp) {
- skb_list_del_init(pp);
- napi_gro_complete(napi, pp);
- gro_list->count--;
- }
-
- if (same_flow)
- goto ok;
-
- if (NAPI_GRO_CB(skb)->flush)
- goto normal;
-
- if (unlikely(gro_list->count >= MAX_GRO_SKBS))
- gro_flush_oldest(napi, &gro_list->list);
- else
- gro_list->count++;
-
- NAPI_GRO_CB(skb)->count = 1;
- NAPI_GRO_CB(skb)->age = jiffies;
- NAPI_GRO_CB(skb)->last = skb;
- skb_shinfo(skb)->gso_size = skb_gro_len(skb);
- list_add(&skb->list, &gro_list->list);
- ret = GRO_HELD;
-
-pull:
- grow = skb_gro_offset(skb) - skb_headlen(skb);
- if (grow > 0)
- gro_pull_from_frag0(skb, grow);
-ok:
- if (gro_list->count) {
- if (!test_bit(bucket, &napi->gro_bitmask))
- __set_bit(bucket, &napi->gro_bitmask);
- } else if (test_bit(bucket, &napi->gro_bitmask)) {
- __clear_bit(bucket, &napi->gro_bitmask);
- }
-
- return ret;
-
-normal:
- ret = GRO_NORMAL;
- goto pull;
-}
-
-struct packet_offload *gro_find_receive_by_type(__be16 type)
-{
- struct list_head *offload_head = &offload_base;
- struct packet_offload *ptype;
-
- list_for_each_entry_rcu(ptype, offload_head, list) {
- if (ptype->type != type || !ptype->callbacks.gro_receive)
- continue;
- return ptype;
- }
- return NULL;
-}
-EXPORT_SYMBOL(gro_find_receive_by_type);
-
-struct packet_offload *gro_find_complete_by_type(__be16 type)
-{
- struct list_head *offload_head = &offload_base;
- struct packet_offload *ptype;
-
- list_for_each_entry_rcu(ptype, offload_head, list) {
- if (ptype->type != type || !ptype->callbacks.gro_complete)
- continue;
- return ptype;
- }
- return NULL;
-}
-EXPORT_SYMBOL(gro_find_complete_by_type);
-
-static gro_result_t napi_skb_finish(struct napi_struct *napi,
- struct sk_buff *skb,
- gro_result_t ret)
-{
- switch (ret) {
- case GRO_NORMAL:
- gro_normal_one(napi, skb, 1);
- break;
-
- case GRO_MERGED_FREE:
- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
- napi_skb_free_stolen_head(skb);
- else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
- __kfree_skb(skb);
- else
- __kfree_skb_defer(skb);
- break;
-
- case GRO_HELD:
- case GRO_MERGED:
- case GRO_CONSUMED:
- break;
- }
-
- return ret;
-}
-
-gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
-{
- gro_result_t ret;
-
- skb_mark_napi_id(skb, napi);
- trace_napi_gro_receive_entry(skb);
-
- skb_gro_reset_offset(skb, 0);
-
- ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
- trace_napi_gro_receive_exit(ret);
-
- return ret;
-}
-EXPORT_SYMBOL(napi_gro_receive);
-
-static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
-{
- if (unlikely(skb->pfmemalloc)) {
- consume_skb(skb);
- return;
- }
- __skb_pull(skb, skb_headlen(skb));
- /* restore the reserve we had after netdev_alloc_skb_ip_align() */
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
- __vlan_hwaccel_clear_tag(skb);
- skb->dev = napi->dev;
- skb->skb_iif = 0;
-
- /* eth_type_trans() assumes pkt_type is PACKET_HOST */
- skb->pkt_type = PACKET_HOST;
-
- skb->encapsulation = 0;
- skb_shinfo(skb)->gso_type = 0;
- skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
- if (unlikely(skb->slow_gro)) {
- skb_orphan(skb);
- skb_ext_reset(skb);
- nf_reset_ct(skb);
- skb->slow_gro = 0;
- }
-
- napi->skb = skb;
-}
-
-struct sk_buff *napi_get_frags(struct napi_struct *napi)
-{
- struct sk_buff *skb = napi->skb;
-
- if (!skb) {
- skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
- if (skb) {
- napi->skb = skb;
- skb_mark_napi_id(skb, napi);
- }
- }
- return skb;
-}
-EXPORT_SYMBOL(napi_get_frags);
-
-static gro_result_t napi_frags_finish(struct napi_struct *napi,
- struct sk_buff *skb,
- gro_result_t ret)
-{
- switch (ret) {
- case GRO_NORMAL:
- case GRO_HELD:
- __skb_push(skb, ETH_HLEN);
- skb->protocol = eth_type_trans(skb, skb->dev);
- if (ret == GRO_NORMAL)
- gro_normal_one(napi, skb, 1);
- break;
-
- case GRO_MERGED_FREE:
- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
- napi_skb_free_stolen_head(skb);
- else
- napi_reuse_skb(napi, skb);
- break;
-
- case GRO_MERGED:
- case GRO_CONSUMED:
- break;
- }
-
- return ret;
-}
-
-/* Upper GRO stack assumes network header starts at gro_offset=0
- * Drivers could call both napi_gro_frags() and napi_gro_receive()
- * We copy ethernet header into skb->data to have a common layout.
- */
-static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
-{
- struct sk_buff *skb = napi->skb;
- const struct ethhdr *eth;
- unsigned int hlen = sizeof(*eth);
-
- napi->skb = NULL;
-
- skb_reset_mac_header(skb);
- skb_gro_reset_offset(skb, hlen);
-
- if (unlikely(skb_gro_header_hard(skb, hlen))) {
- eth = skb_gro_header_slow(skb, hlen, 0);
- if (unlikely(!eth)) {
- net_warn_ratelimited("%s: dropping impossible skb from %s\n",
- __func__, napi->dev->name);
- napi_reuse_skb(napi, skb);
- return NULL;
- }
- } else {
- eth = (const struct ethhdr *)skb->data;
- gro_pull_from_frag0(skb, hlen);
- NAPI_GRO_CB(skb)->frag0 += hlen;
- NAPI_GRO_CB(skb)->frag0_len -= hlen;
- }
- __skb_pull(skb, hlen);
-
- /*
- * This works because the only protocols we care about don't require
- * special handling.
- * We'll fix it up properly in napi_frags_finish()
- */
- skb->protocol = eth->h_proto;
-
- return skb;
-}
-
-gro_result_t napi_gro_frags(struct napi_struct *napi)
-{
- gro_result_t ret;
- struct sk_buff *skb = napi_frags_skb(napi);
-
- trace_napi_gro_frags_entry(skb);
-
- ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
- trace_napi_gro_frags_exit(ret);
-
- return ret;
-}
-EXPORT_SYMBOL(napi_gro_frags);
-
-/* Compute the checksum from gro_offset and return the folded value
- * after adding in any pseudo checksum.
- */
-__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
-{
- __wsum wsum;
- __sum16 sum;
-
- wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
-
- /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
- sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
- /* See comments in __skb_checksum_complete(). */
- if (likely(!sum)) {
- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
- !skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev, skb);
- }
-
- NAPI_GRO_CB(skb)->csum = wsum;
- NAPI_GRO_CB(skb)->csum_valid = 1;
-
- return sum;
-}
-EXPORT_SYMBOL(__skb_gro_checksum_complete);
-
static void net_rps_send_ipi(struct softnet_data *remsd)
{
#ifdef CONFIG_RPS
@@ -9221,35 +8558,17 @@ bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
EXPORT_SYMBOL(netdev_port_same_parent_id);
/**
- * dev_change_proto_down - update protocol port state information
+ * dev_change_proto_down - set carrier according to proto_down.
+ *
* @dev: device
* @proto_down: new value
- *
- * This info can be used by switch drivers to set the phys state of the
- * port.
*/
int dev_change_proto_down(struct net_device *dev, bool proto_down)
{
- const struct net_device_ops *ops = dev->netdev_ops;
-
- if (!ops->ndo_change_proto_down)
+ if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
return -EOPNOTSUPP;
if (!netif_device_present(dev))
return -ENODEV;
- return ops->ndo_change_proto_down(dev, proto_down);
-}
-EXPORT_SYMBOL(dev_change_proto_down);
-
-/**
- * dev_change_proto_down_generic - generic implementation for
- * ndo_change_proto_down that sets carrier according to
- * proto_down.
- *
- * @dev: device
- * @proto_down: new value
- */
-int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
-{
if (proto_down)
netif_carrier_off(dev);
else
@@ -9257,7 +8576,7 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
dev->proto_down = proto_down;
return 0;
}
-EXPORT_SYMBOL(dev_change_proto_down_generic);
+EXPORT_SYMBOL(dev_change_proto_down);
/**
* dev_change_proto_down_reason - proto down reason
@@ -11640,8 +10959,6 @@ static int __init net_dev_init(void)
for (i = 0; i < PTYPE_HASH_SIZE; i++)
INIT_LIST_HEAD(&ptype_base[i]);
- INIT_LIST_HEAD(&offload_base);
-
if (register_pernet_subsys(&netdev_net_ops))
goto out;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index f0cb38344126..bead38ca50bd 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -16,6 +16,35 @@
* General list handling functions
*/
+static int __hw_addr_insert(struct netdev_hw_addr_list *list,
+ struct netdev_hw_addr *new, int addr_len)
+{
+ struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL;
+ struct netdev_hw_addr *ha;
+
+ while (*ins_point) {
+ int diff;
+
+ ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
+ diff = memcmp(new->addr, ha->addr, addr_len);
+ if (diff == 0)
+ diff = memcmp(&new->type, &ha->type, sizeof(new->type));
+
+ parent = *ins_point;
+ if (diff < 0)
+ ins_point = &parent->rb_left;
+ else if (diff > 0)
+ ins_point = &parent->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node_rcu(&new->node, parent, ins_point);
+ rb_insert_color(&new->node, &list->tree);
+
+ return 0;
+}
+
static struct netdev_hw_addr*
__hw_addr_create(const unsigned char *addr, int addr_len,
unsigned char addr_type, bool global, bool sync)
@@ -50,11 +79,6 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
if (addr_len > MAX_ADDR_LEN)
return -EINVAL;
- ha = list_first_entry(&list->list, struct netdev_hw_addr, list);
- if (ha && !memcmp(addr, ha->addr, addr_len) &&
- (!addr_type || addr_type == ha->type))
- goto found_it;
-
while (*ins_point) {
int diff;
@@ -69,7 +93,6 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
} else if (diff > 0) {
ins_point = &parent->rb_right;
} else {
-found_it:
if (exclusive)
return -EEXIST;
if (global) {
@@ -94,16 +117,8 @@ found_it:
if (!ha)
return -ENOMEM;
- /* The first address in dev->dev_addrs is pointed to by dev->dev_addr
- * and mutated freely by device drivers and netdev ops, so if we insert
- * it into the tree we'll end up with an invalid rbtree.
- */
- if (list->count > 0) {
- rb_link_node(&ha->node, parent, ins_point);
- rb_insert_color(&ha->node, &list->tree);
- } else {
- RB_CLEAR_NODE(&ha->node);
- }
+ rb_link_node(&ha->node, parent, ins_point);
+ rb_insert_color(&ha->node, &list->tree);
list_add_tail_rcu(&ha->list, &list->list);
list->count++;
@@ -138,8 +153,7 @@ static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
if (--ha->refcount)
return 0;
- if (!RB_EMPTY_NODE(&ha->node))
- rb_erase(&ha->node, &list->tree);
+ rb_erase(&ha->node, &list->tree);
list_del_rcu(&ha->list);
kfree_rcu(ha, rcu_head);
@@ -151,18 +165,8 @@ static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list,
const unsigned char *addr, int addr_len,
unsigned char addr_type)
{
- struct netdev_hw_addr *ha;
struct rb_node *node;
- /* The first address isn't inserted into the tree because in the dev->dev_addrs
- * list it's the address pointed to by dev->dev_addr which is freely mutated
- * in place, so we need to check it separately.
- */
- ha = list_first_entry(&list->list, struct netdev_hw_addr, list);
- if (ha && !memcmp(addr, ha->addr, addr_len) &&
- (!addr_type || addr_type == ha->type))
- return ha;
-
node = list->tree.rb_node;
while (node) {
@@ -498,6 +502,21 @@ EXPORT_SYMBOL(__hw_addr_init);
* Device addresses handling functions
*/
+/* Check that netdev->dev_addr is not written to directly as this would
+ * break the rbtree layout. All changes should go thru dev_addr_set() and co.
+ * Remove this check in mid-2024.
+ */
+void dev_addr_check(struct net_device *dev)
+{
+ if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN))
+ return;
+
+ netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr);
+ netdev_warn(dev, "Expected addr: %*ph\n",
+ MAX_ADDR_LEN, dev->dev_addr_shadow);
+ netdev_WARN(dev, "Incorrect netdev->dev_addr\n");
+}
+
/**
* dev_addr_flush - Flush device address list
* @dev: device
@@ -509,11 +528,11 @@ EXPORT_SYMBOL(__hw_addr_init);
void dev_addr_flush(struct net_device *dev)
{
/* rtnl_mutex must be held here */
+ dev_addr_check(dev);
__hw_addr_flush(&dev->dev_addrs);
dev->dev_addr = NULL;
}
-EXPORT_SYMBOL(dev_addr_flush);
/**
* dev_addr_init - Init device address list
@@ -547,7 +566,21 @@ int dev_addr_init(struct net_device *dev)
}
return err;
}
-EXPORT_SYMBOL(dev_addr_init);
+
+void dev_addr_mod(struct net_device *dev, unsigned int offset,
+ const void *addr, size_t len)
+{
+ struct netdev_hw_addr *ha;
+
+ dev_addr_check(dev);
+
+ ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]);
+ rb_erase(&ha->node, &dev->dev_addrs.tree);
+ memcpy(&ha->addr[offset], addr, len);
+ memcpy(&dev->dev_addr_shadow[offset], addr, len);
+ WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len));
+}
+EXPORT_SYMBOL(dev_addr_mod);
/**
* dev_addr_add - Add a device address
diff --git a/net/core/dev_addr_lists_test.c b/net/core/dev_addr_lists_test.c
new file mode 100644
index 000000000000..049cfbc58aa9
--- /dev/null
+++ b/net/core/dev_addr_lists_test.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <kunit/test.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+static const struct net_device_ops dummy_netdev_ops = {
+};
+
+struct dev_addr_test_priv {
+ u32 addr_seen;
+};
+
+static int dev_addr_test_sync(struct net_device *netdev, const unsigned char *a)
+{
+ struct dev_addr_test_priv *datp = netdev_priv(netdev);
+
+ if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN))
+ datp->addr_seen |= 1 << a[0];
+ return 0;
+}
+
+static int dev_addr_test_unsync(struct net_device *netdev,
+ const unsigned char *a)
+{
+ struct dev_addr_test_priv *datp = netdev_priv(netdev);
+
+ if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN))
+ datp->addr_seen &= ~(1 << a[0]);
+ return 0;
+}
+
+static int dev_addr_test_init(struct kunit *test)
+{
+ struct dev_addr_test_priv *datp;
+ struct net_device *netdev;
+ int err;
+
+ netdev = alloc_etherdev(sizeof(*datp));
+ KUNIT_ASSERT_TRUE(test, !!netdev);
+
+ test->priv = netdev;
+ netdev->netdev_ops = &dummy_netdev_ops;
+
+ err = register_netdev(netdev);
+ if (err) {
+ free_netdev(netdev);
+ KUNIT_FAIL(test, "Can't register netdev %d", err);
+ }
+
+ rtnl_lock();
+ return 0;
+}
+
+static void dev_addr_test_exit(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+
+ rtnl_unlock();
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+}
+
+static void dev_addr_test_basic(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ u8 addr[ETH_ALEN];
+
+ KUNIT_EXPECT_TRUE(test, !!netdev->dev_addr);
+
+ memset(addr, 2, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+ KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr)));
+
+ memset(addr, 3, sizeof(addr));
+ dev_addr_set(netdev, addr);
+ KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr)));
+}
+
+static void dev_addr_test_sync_one(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ struct dev_addr_test_priv *datp;
+ u8 addr[ETH_ALEN];
+
+ datp = netdev_priv(netdev);
+
+ memset(addr, 1, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 2, datp->addr_seen);
+
+ memset(addr, 2, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+
+ datp->addr_seen = 0;
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ /* It's not going to sync anything because the main address is
+ * considered synced and we overwrite in place.
+ */
+ KUNIT_EXPECT_EQ(test, 0, datp->addr_seen);
+}
+
+static void dev_addr_test_add_del(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ struct dev_addr_test_priv *datp;
+ u8 addr[ETH_ALEN];
+ int i;
+
+ datp = netdev_priv(netdev);
+
+ for (i = 1; i < 4; i++) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ }
+ /* Add 3 again */
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 0xf, datp->addr_seen);
+
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 0xf, datp->addr_seen);
+
+ for (i = 1; i < 4; i++) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ }
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 1, datp->addr_seen);
+}
+
+static void dev_addr_test_del_main(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ u8 addr[ETH_ALEN];
+
+ memset(addr, 1, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+
+ KUNIT_EXPECT_EQ(test, -ENOENT, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ KUNIT_EXPECT_EQ(test, -ENOENT, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+}
+
+static void dev_addr_test_add_set(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ struct dev_addr_test_priv *datp;
+ u8 addr[ETH_ALEN];
+ int i;
+
+ datp = netdev_priv(netdev);
+
+ /* There is no external API like dev_addr_add_excl(),
+ * so shuffle the tree a little bit and exploit aliasing.
+ */
+ for (i = 1; i < 16; i++) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ }
+
+ memset(addr, i, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ memset(addr, 0, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 0xffff, datp->addr_seen);
+}
+
+static void dev_addr_test_add_excl(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ u8 addr[ETH_ALEN];
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_uc_add_excl(netdev, addr));
+ }
+ KUNIT_EXPECT_EQ(test, -EEXIST, dev_uc_add_excl(netdev, addr));
+
+ for (i = 0; i < 10; i += 2) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_uc_del(netdev, addr));
+ }
+ for (i = 1; i < 10; i += 2) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, -EEXIST, dev_uc_add_excl(netdev, addr));
+ }
+}
+
+static struct kunit_case dev_addr_test_cases[] = {
+ KUNIT_CASE(dev_addr_test_basic),
+ KUNIT_CASE(dev_addr_test_sync_one),
+ KUNIT_CASE(dev_addr_test_add_del),
+ KUNIT_CASE(dev_addr_test_del_main),
+ KUNIT_CASE(dev_addr_test_add_set),
+ KUNIT_CASE(dev_addr_test_add_excl),
+ {}
+};
+
+static struct kunit_suite dev_addr_test_suite = {
+ .name = "dev-addr-list-test",
+ .test_cases = dev_addr_test_cases,
+ .init = dev_addr_test_init,
+ .exit = dev_addr_test_exit,
+};
+kunit_test_suite(dev_addr_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 5ad72dbfcd07..d144a62cbf73 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -4432,6 +4432,11 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME,
.type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
diff --git a/net/core/filter.c b/net/core/filter.c
index 6102f093d59a..8271624a19aa 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -301,7 +301,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
break;
case SKF_AD_PKTTYPE:
- *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
+ *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET);
*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
@@ -323,7 +323,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
offsetof(struct sk_buff, vlan_tci));
break;
case SKF_AD_VLAN_TAG_PRESENT:
- *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
+ *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET);
if (PKT_VLAN_PRESENT_BIT)
*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
if (PKT_VLAN_PRESENT_BIT < 7)
@@ -8029,7 +8029,7 @@ static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
* (Fast-path, otherwise approximation that we might be
* a clone, do the rest in helper.)
*/
- *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
+ *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET);
*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
*insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
@@ -8617,7 +8617,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct __sk_buff, pkt_type):
*target_size = 1;
*insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
- PKT_TYPE_OFFSET());
+ PKT_TYPE_OFFSET);
*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
@@ -8642,7 +8642,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct __sk_buff, vlan_present):
*target_size = 1;
*insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
- PKT_VLAN_PRESENT_OFFSET());
+ PKT_VLAN_PRESENT_OFFSET);
if (PKT_VLAN_PRESENT_BIT)
*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
if (PKT_VLAN_PRESENT_BIT < 7)
@@ -10543,6 +10543,7 @@ static bool sk_lookup_is_valid_access(int off, int size,
case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
case bpf_ctx_range(struct bpf_sk_lookup, local_port):
+ case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex):
bpf_ctx_record_field_size(info, sizeof(__u32));
return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
@@ -10632,6 +10633,12 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
bpf_target_off(struct bpf_sk_lookup_kern,
dport, 2, target_size));
break;
+
+ case offsetof(struct bpf_sk_lookup, ingress_ifindex):
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
+ bpf_target_off(struct bpf_sk_lookup_kern,
+ ingress_ifindex, 4, target_size));
+ break;
}
return insn - insn_buf;
@@ -10656,14 +10663,10 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
}
-#ifdef CONFIG_DEBUG_INFO_BTF
-BTF_ID_LIST_GLOBAL(btf_sock_ids)
+BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE)
#define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type)
BTF_SOCK_TYPE_xxx
#undef BTF_SOCK_TYPE
-#else
-u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
-#endif
BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
{
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 3255f57f5131..257976cb55ce 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1460,7 +1460,7 @@ out_bad:
}
EXPORT_SYMBOL(__skb_flow_dissect);
-static siphash_key_t hashrnd __read_mostly;
+static siphash_aligned_key_t hashrnd;
static __always_inline void __flow_hash_secret_init(void)
{
net_get_random_once(&hashrnd, sizeof(hashrnd));
diff --git a/net/core/gro.c b/net/core/gro.c
new file mode 100644
index 000000000000..8ec8b44596da
--- /dev/null
+++ b/net/core/gro.c
@@ -0,0 +1,766 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/gro.h>
+#include <net/dst_metadata.h>
+#include <net/busy_poll.h>
+#include <trace/events/net.h>
+
+#define MAX_GRO_SKBS 8
+
+/* This should be increased if a protocol with a bigger head is added. */
+#define GRO_MAX_HEAD (MAX_HEADER + 128)
+
+static DEFINE_SPINLOCK(offload_lock);
+static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
+/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
+int gro_normal_batch __read_mostly = 8;
+
+/**
+ * dev_add_offload - register offload handlers
+ * @po: protocol offload declaration
+ *
+ * Add protocol offload handlers to the networking stack. The passed
+ * &proto_offload is linked into kernel lists and may not be freed until
+ * it has been removed from the kernel lists.
+ *
+ * This call does not sleep therefore it can not
+ * guarantee all CPU's that are in middle of receiving packets
+ * will see the new offload handlers (until the next received packet).
+ */
+void dev_add_offload(struct packet_offload *po)
+{
+ struct packet_offload *elem;
+
+ spin_lock(&offload_lock);
+ list_for_each_entry(elem, &offload_base, list) {
+ if (po->priority < elem->priority)
+ break;
+ }
+ list_add_rcu(&po->list, elem->list.prev);
+ spin_unlock(&offload_lock);
+}
+EXPORT_SYMBOL(dev_add_offload);
+
+/**
+ * __dev_remove_offload - remove offload handler
+ * @po: packet offload declaration
+ *
+ * Remove a protocol offload handler that was previously added to the
+ * kernel offload handlers by dev_add_offload(). The passed &offload_type
+ * is removed from the kernel lists and can be freed or reused once this
+ * function returns.
+ *
+ * The packet type might still be in use by receivers
+ * and must not be freed until after all the CPU's have gone
+ * through a quiescent state.
+ */
+static void __dev_remove_offload(struct packet_offload *po)
+{
+ struct list_head *head = &offload_base;
+ struct packet_offload *po1;
+
+ spin_lock(&offload_lock);
+
+ list_for_each_entry(po1, head, list) {
+ if (po == po1) {
+ list_del_rcu(&po->list);
+ goto out;
+ }
+ }
+
+ pr_warn("dev_remove_offload: %p not found\n", po);
+out:
+ spin_unlock(&offload_lock);
+}
+
+/**
+ * dev_remove_offload - remove packet offload handler
+ * @po: packet offload declaration
+ *
+ * Remove a packet offload handler that was previously added to the kernel
+ * offload handlers by dev_add_offload(). The passed &offload_type is
+ * removed from the kernel lists and can be freed or reused once this
+ * function returns.
+ *
+ * This call sleeps to guarantee that no CPU is looking at the packet
+ * type after return.
+ */
+void dev_remove_offload(struct packet_offload *po)
+{
+ __dev_remove_offload(po);
+
+ synchronize_net();
+}
+EXPORT_SYMBOL(dev_remove_offload);
+
+/**
+ * skb_mac_gso_segment - mac layer segmentation handler.
+ * @skb: buffer to segment
+ * @features: features for the output path (see dev->features)
+ */
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+ struct packet_offload *ptype;
+ int vlan_depth = skb->mac_len;
+ __be16 type = skb_network_protocol(skb, &vlan_depth);
+
+ if (unlikely(!type))
+ return ERR_PTR(-EINVAL);
+
+ __skb_pull(skb, vlan_depth);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &offload_base, list) {
+ if (ptype->type == type && ptype->callbacks.gso_segment) {
+ segs = ptype->callbacks.gso_segment(skb, features);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+
+ return segs;
+}
+EXPORT_SYMBOL(skb_mac_gso_segment);
+
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
+{
+ struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
+ unsigned int offset = skb_gro_offset(skb);
+ unsigned int headlen = skb_headlen(skb);
+ unsigned int len = skb_gro_len(skb);
+ unsigned int delta_truesize;
+ unsigned int new_truesize;
+ struct sk_buff *lp;
+
+ if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
+ return -E2BIG;
+
+ lp = NAPI_GRO_CB(p)->last;
+ pinfo = skb_shinfo(lp);
+
+ if (headlen <= offset) {
+ skb_frag_t *frag;
+ skb_frag_t *frag2;
+ int i = skbinfo->nr_frags;
+ int nr_frags = pinfo->nr_frags + i;
+
+ if (nr_frags > MAX_SKB_FRAGS)
+ goto merge;
+
+ offset -= headlen;
+ pinfo->nr_frags = nr_frags;
+ skbinfo->nr_frags = 0;
+
+ frag = pinfo->frags + nr_frags;
+ frag2 = skbinfo->frags + i;
+ do {
+ *--frag = *--frag2;
+ } while (--i);
+
+ skb_frag_off_add(frag, offset);
+ skb_frag_size_sub(frag, offset);
+
+ /* all fragments truesize : remove (head size + sk_buff) */
+ new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
+ delta_truesize = skb->truesize - new_truesize;
+
+ skb->truesize = new_truesize;
+ skb->len -= skb->data_len;
+ skb->data_len = 0;
+
+ NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
+ goto done;
+ } else if (skb->head_frag) {
+ int nr_frags = pinfo->nr_frags;
+ skb_frag_t *frag = pinfo->frags + nr_frags;
+ struct page *page = virt_to_head_page(skb->head);
+ unsigned int first_size = headlen - offset;
+ unsigned int first_offset;
+
+ if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
+ goto merge;
+
+ first_offset = skb->data -
+ (unsigned char *)page_address(page) +
+ offset;
+
+ pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
+
+ __skb_frag_set_page(frag, page);
+ skb_frag_off_set(frag, first_offset);
+ skb_frag_size_set(frag, first_size);
+
+ memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
+ /* We dont need to clear skbinfo->nr_frags here */
+
+ new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ delta_truesize = skb->truesize - new_truesize;
+ skb->truesize = new_truesize;
+ NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
+ goto done;
+ }
+
+merge:
+ /* sk owenrship - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
+ delta_truesize = skb->truesize;
+ if (offset > headlen) {
+ unsigned int eat = offset - headlen;
+
+ skb_frag_off_add(&skbinfo->frags[0], eat);
+ skb_frag_size_sub(&skbinfo->frags[0], eat);
+ skb->data_len -= eat;
+ skb->len -= eat;
+ offset = headlen;
+ }
+
+ __skb_pull(skb, offset);
+
+ if (NAPI_GRO_CB(p)->last == p)
+ skb_shinfo(p)->frag_list = skb;
+ else
+ NAPI_GRO_CB(p)->last->next = skb;
+ NAPI_GRO_CB(p)->last = skb;
+ __skb_header_release(skb);
+ lp = p;
+
+done:
+ NAPI_GRO_CB(p)->count++;
+ p->data_len += len;
+ p->truesize += delta_truesize;
+ p->len += len;
+ if (lp != p) {
+ lp->data_len += len;
+ lp->truesize += delta_truesize;
+ lp->len += len;
+ }
+ NAPI_GRO_CB(skb)->same_flow = 1;
+ return 0;
+}
+
+
+static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
+{
+ struct packet_offload *ptype;
+ __be16 type = skb->protocol;
+ struct list_head *head = &offload_base;
+ int err = -ENOENT;
+
+ BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
+
+ if (NAPI_GRO_CB(skb)->count == 1) {
+ skb_shinfo(skb)->gso_size = 0;
+ goto out;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_complete)
+ continue;
+
+ err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
+ ipv6_gro_complete, inet_gro_complete,
+ skb, 0);
+ break;
+ }
+ rcu_read_unlock();
+
+ if (err) {
+ WARN_ON(&ptype->list == head);
+ kfree_skb(skb);
+ return;
+ }
+
+out:
+ gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
+}
+
+static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
+ bool flush_old)
+{
+ struct list_head *head = &napi->gro_hash[index].list;
+ struct sk_buff *skb, *p;
+
+ list_for_each_entry_safe_reverse(skb, p, head, list) {
+ if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
+ return;
+ skb_list_del_init(skb);
+ napi_gro_complete(napi, skb);
+ napi->gro_hash[index].count--;
+ }
+
+ if (!napi->gro_hash[index].count)
+ __clear_bit(index, &napi->gro_bitmask);
+}
+
+/* napi->gro_hash[].list contains packets ordered by age.
+ * youngest packets at the head of it.
+ * Complete skbs in reverse order to reduce latencies.
+ */
+void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+{
+ unsigned long bitmask = napi->gro_bitmask;
+ unsigned int i, base = ~0U;
+
+ while ((i = ffs(bitmask)) != 0) {
+ bitmask >>= i;
+ base += i;
+ __napi_gro_flush_chain(napi, base, flush_old);
+ }
+}
+EXPORT_SYMBOL(napi_gro_flush);
+
+static void gro_list_prepare(const struct list_head *head,
+ const struct sk_buff *skb)
+{
+ unsigned int maclen = skb->dev->hard_header_len;
+ u32 hash = skb_get_hash_raw(skb);
+ struct sk_buff *p;
+
+ list_for_each_entry(p, head, list) {
+ unsigned long diffs;
+
+ NAPI_GRO_CB(p)->flush = 0;
+
+ if (hash != skb_get_hash_raw(p)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+ diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
+ if (skb_vlan_tag_present(p))
+ diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
+ diffs |= skb_metadata_differs(p, skb);
+ if (maclen == ETH_HLEN)
+ diffs |= compare_ether_header(skb_mac_header(p),
+ skb_mac_header(skb));
+ else if (!diffs)
+ diffs = memcmp(skb_mac_header(p),
+ skb_mac_header(skb),
+ maclen);
+
+ /* in most common scenarions 'slow_gro' is 0
+ * otherwise we are already on some slower paths
+ * either skip all the infrequent tests altogether or
+ * avoid trying too hard to skip each of them individually
+ */
+ if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
+#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ struct tc_skb_ext *skb_ext;
+ struct tc_skb_ext *p_ext;
+#endif
+
+ diffs |= p->sk != skb->sk;
+ diffs |= skb_metadata_dst_cmp(p, skb);
+ diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
+
+#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ skb_ext = skb_ext_find(skb, TC_SKB_EXT);
+ p_ext = skb_ext_find(p, TC_SKB_EXT);
+
+ diffs |= (!!p_ext) ^ (!!skb_ext);
+ if (!diffs && unlikely(skb_ext))
+ diffs |= p_ext->chain ^ skb_ext->chain;
+#endif
+ }
+
+ NAPI_GRO_CB(p)->same_flow = !diffs;
+ }
+}
+
+static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
+{
+ const struct skb_shared_info *pinfo = skb_shinfo(skb);
+ const skb_frag_t *frag0 = &pinfo->frags[0];
+
+ NAPI_GRO_CB(skb)->data_offset = 0;
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+
+ if (!skb_headlen(skb) && pinfo->nr_frags &&
+ !PageHighMem(skb_frag_page(frag0)) &&
+ (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
+ NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+ NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+ skb_frag_size(frag0),
+ skb->end - skb->tail);
+ }
+}
+
+static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
+{
+ struct skb_shared_info *pinfo = skb_shinfo(skb);
+
+ BUG_ON(skb->end - skb->tail < grow);
+
+ memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
+
+ skb->data_len -= grow;
+ skb->tail += grow;
+
+ skb_frag_off_add(&pinfo->frags[0], grow);
+ skb_frag_size_sub(&pinfo->frags[0], grow);
+
+ if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
+ skb_frag_unref(skb, 0);
+ memmove(pinfo->frags, pinfo->frags + 1,
+ --pinfo->nr_frags * sizeof(pinfo->frags[0]));
+ }
+}
+
+static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
+{
+ struct sk_buff *oldest;
+
+ oldest = list_last_entry(head, struct sk_buff, list);
+
+ /* We are called with head length >= MAX_GRO_SKBS, so this is
+ * impossible.
+ */
+ if (WARN_ON_ONCE(!oldest))
+ return;
+
+ /* Do not adjust napi->gro_hash[].count, caller is adding a new
+ * SKB to the chain.
+ */
+ skb_list_del_init(oldest);
+ napi_gro_complete(napi, oldest);
+}
+
+static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
+ struct gro_list *gro_list = &napi->gro_hash[bucket];
+ struct list_head *head = &offload_base;
+ struct packet_offload *ptype;
+ __be16 type = skb->protocol;
+ struct sk_buff *pp = NULL;
+ enum gro_result ret;
+ int same_flow;
+ int grow;
+
+ if (netif_elide_gro(skb->dev))
+ goto normal;
+
+ gro_list_prepare(&gro_list->list, skb);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_receive)
+ continue;
+
+ skb_set_network_header(skb, skb_gro_offset(skb));
+ skb_reset_mac_len(skb);
+ NAPI_GRO_CB(skb)->same_flow = 0;
+ NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
+ NAPI_GRO_CB(skb)->free = 0;
+ NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->recursion_counter = 0;
+ NAPI_GRO_CB(skb)->is_fou = 0;
+ NAPI_GRO_CB(skb)->is_atomic = 1;
+ NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
+
+ /* Setup for GRO checksum validation */
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ NAPI_GRO_CB(skb)->csum = skb->csum;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+ NAPI_GRO_CB(skb)->csum_cnt = 0;
+ break;
+ case CHECKSUM_UNNECESSARY:
+ NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
+ NAPI_GRO_CB(skb)->csum_valid = 0;
+ break;
+ default:
+ NAPI_GRO_CB(skb)->csum_cnt = 0;
+ NAPI_GRO_CB(skb)->csum_valid = 0;
+ }
+
+ pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
+ ipv6_gro_receive, inet_gro_receive,
+ &gro_list->list, skb);
+ break;
+ }
+ rcu_read_unlock();
+
+ if (&ptype->list == head)
+ goto normal;
+
+ if (PTR_ERR(pp) == -EINPROGRESS) {
+ ret = GRO_CONSUMED;
+ goto ok;
+ }
+
+ same_flow = NAPI_GRO_CB(skb)->same_flow;
+ ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
+
+ if (pp) {
+ skb_list_del_init(pp);
+ napi_gro_complete(napi, pp);
+ gro_list->count--;
+ }
+
+ if (same_flow)
+ goto ok;
+
+ if (NAPI_GRO_CB(skb)->flush)
+ goto normal;
+
+ if (unlikely(gro_list->count >= MAX_GRO_SKBS))
+ gro_flush_oldest(napi, &gro_list->list);
+ else
+ gro_list->count++;
+
+ NAPI_GRO_CB(skb)->count = 1;
+ NAPI_GRO_CB(skb)->age = jiffies;
+ NAPI_GRO_CB(skb)->last = skb;
+ skb_shinfo(skb)->gso_size = skb_gro_len(skb);
+ list_add(&skb->list, &gro_list->list);
+ ret = GRO_HELD;
+
+pull:
+ grow = skb_gro_offset(skb) - skb_headlen(skb);
+ if (grow > 0)
+ gro_pull_from_frag0(skb, grow);
+ok:
+ if (gro_list->count) {
+ if (!test_bit(bucket, &napi->gro_bitmask))
+ __set_bit(bucket, &napi->gro_bitmask);
+ } else if (test_bit(bucket, &napi->gro_bitmask)) {
+ __clear_bit(bucket, &napi->gro_bitmask);
+ }
+
+ return ret;
+
+normal:
+ ret = GRO_NORMAL;
+ goto pull;
+}
+
+struct packet_offload *gro_find_receive_by_type(__be16 type)
+{
+ struct list_head *offload_head = &offload_base;
+ struct packet_offload *ptype;
+
+ list_for_each_entry_rcu(ptype, offload_head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_receive)
+ continue;
+ return ptype;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(gro_find_receive_by_type);
+
+struct packet_offload *gro_find_complete_by_type(__be16 type)
+{
+ struct list_head *offload_head = &offload_base;
+ struct packet_offload *ptype;
+
+ list_for_each_entry_rcu(ptype, offload_head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_complete)
+ continue;
+ return ptype;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(gro_find_complete_by_type);
+
+static gro_result_t napi_skb_finish(struct napi_struct *napi,
+ struct sk_buff *skb,
+ gro_result_t ret)
+{
+ switch (ret) {
+ case GRO_NORMAL:
+ gro_normal_one(napi, skb, 1);
+ break;
+
+ case GRO_MERGED_FREE:
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ __kfree_skb(skb);
+ else
+ __kfree_skb_defer(skb);
+ break;
+
+ case GRO_HELD:
+ case GRO_MERGED:
+ case GRO_CONSUMED:
+ break;
+ }
+
+ return ret;
+}
+
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ gro_result_t ret;
+
+ skb_mark_napi_id(skb, napi);
+ trace_napi_gro_receive_entry(skb);
+
+ skb_gro_reset_offset(skb, 0);
+
+ ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
+ trace_napi_gro_receive_exit(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(napi_gro_receive);
+
+static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+{
+ if (unlikely(skb->pfmemalloc)) {
+ consume_skb(skb);
+ return;
+ }
+ __skb_pull(skb, skb_headlen(skb));
+ /* restore the reserve we had after netdev_alloc_skb_ip_align() */
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
+ __vlan_hwaccel_clear_tag(skb);
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
+
+ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
+ skb->pkt_type = PACKET_HOST;
+
+ skb->encapsulation = 0;
+ skb_shinfo(skb)->gso_type = 0;
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+ if (unlikely(skb->slow_gro)) {
+ skb_orphan(skb);
+ skb_ext_reset(skb);
+ nf_reset_ct(skb);
+ skb->slow_gro = 0;
+ }
+
+ napi->skb = skb;
+}
+
+struct sk_buff *napi_get_frags(struct napi_struct *napi)
+{
+ struct sk_buff *skb = napi->skb;
+
+ if (!skb) {
+ skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
+ if (skb) {
+ napi->skb = skb;
+ skb_mark_napi_id(skb, napi);
+ }
+ }
+ return skb;
+}
+EXPORT_SYMBOL(napi_get_frags);
+
+static gro_result_t napi_frags_finish(struct napi_struct *napi,
+ struct sk_buff *skb,
+ gro_result_t ret)
+{
+ switch (ret) {
+ case GRO_NORMAL:
+ case GRO_HELD:
+ __skb_push(skb, ETH_HLEN);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (ret == GRO_NORMAL)
+ gro_normal_one(napi, skb, 1);
+ break;
+
+ case GRO_MERGED_FREE:
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else
+ napi_reuse_skb(napi, skb);
+ break;
+
+ case GRO_MERGED:
+ case GRO_CONSUMED:
+ break;
+ }
+
+ return ret;
+}
+
+/* Upper GRO stack assumes network header starts at gro_offset=0
+ * Drivers could call both napi_gro_frags() and napi_gro_receive()
+ * We copy ethernet header into skb->data to have a common layout.
+ */
+static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
+{
+ struct sk_buff *skb = napi->skb;
+ const struct ethhdr *eth;
+ unsigned int hlen = sizeof(*eth);
+
+ napi->skb = NULL;
+
+ skb_reset_mac_header(skb);
+ skb_gro_reset_offset(skb, hlen);
+
+ if (unlikely(skb_gro_header_hard(skb, hlen))) {
+ eth = skb_gro_header_slow(skb, hlen, 0);
+ if (unlikely(!eth)) {
+ net_warn_ratelimited("%s: dropping impossible skb from %s\n",
+ __func__, napi->dev->name);
+ napi_reuse_skb(napi, skb);
+ return NULL;
+ }
+ } else {
+ eth = (const struct ethhdr *)skb->data;
+ gro_pull_from_frag0(skb, hlen);
+ NAPI_GRO_CB(skb)->frag0 += hlen;
+ NAPI_GRO_CB(skb)->frag0_len -= hlen;
+ }
+ __skb_pull(skb, hlen);
+
+ /*
+ * This works because the only protocols we care about don't require
+ * special handling.
+ * We'll fix it up properly in napi_frags_finish()
+ */
+ skb->protocol = eth->h_proto;
+
+ return skb;
+}
+
+gro_result_t napi_gro_frags(struct napi_struct *napi)
+{
+ gro_result_t ret;
+ struct sk_buff *skb = napi_frags_skb(napi);
+
+ trace_napi_gro_frags_entry(skb);
+
+ ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+ trace_napi_gro_frags_exit(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(napi_gro_frags);
+
+/* Compute the checksum from gro_offset and return the folded value
+ * after adding in any pseudo checksum.
+ */
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
+{
+ __wsum wsum;
+ __sum16 sum;
+
+ wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
+
+ /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
+ sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
+ /* See comments in __skb_checksum_complete(). */
+ if (likely(!sum)) {
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !skb->csum_complete_sw)
+ netdev_rx_csum_fault(skb->dev, skb);
+ }
+
+ NAPI_GRO_CB(skb)->csum = wsum;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+
+ return sum;
+}
+EXPORT_SYMBOL(__skb_gro_checksum_complete);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 9c01c642cf9e..affe34d71d31 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -488,14 +488,6 @@ static ssize_t proto_down_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct net_device *netdev = to_net_dev(dev);
-
- /* The check is also done in change_proto_down; this helps returning
- * early without hitting the trylock/restart in netdev_store.
- */
- if (!netdev->netdev_ops->ndo_change_proto_down)
- return -EOPNOTSUPP;
-
return netdev_store(dev, attr, buf, len, change_proto_down);
}
NETDEVICE_SHOW_RW(proto_down, fmt_dec);
@@ -1201,11 +1193,7 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = {
static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
{
- unsigned long trans_timeout;
-
- spin_lock_irq(&queue->_xmit_lock);
- trans_timeout = queue->trans_timeout;
- spin_unlock_irq(&queue->_xmit_lock);
+ unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout);
return sprintf(buf, fmt_ulong, trans_timeout);
}
@@ -1452,7 +1440,7 @@ static ssize_t xps_queue_show(struct net_device *dev, unsigned int index,
for (i = map->len; i--;) {
if (map->queues[i] == index) {
- set_bit(j, mask);
+ __set_bit(j, mask);
break;
}
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2af8aeeadadf..6f25c0a8aebe 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2539,13 +2539,12 @@ static int do_set_proto_down(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
- const struct net_device_ops *ops = dev->netdev_ops;
unsigned long mask = 0;
u32 value;
bool proto_down;
int err;
- if (!ops->ndo_change_proto_down) {
+ if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
NL_SET_ERR_MSG(extack, "Protodown not supported by device");
return -EOPNOTSUPP;
}
@@ -2768,7 +2767,7 @@ static int do_setlink(const struct sk_buff *skb,
}
if (dev->gso_max_segs ^ max_segs) {
- dev->gso_max_segs = max_segs;
+ netif_set_gso_max_segs(dev, max_segs);
status |= DO_SETLINK_MODIFIED;
}
}
@@ -3222,7 +3221,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
if (tb[IFLA_GSO_MAX_SIZE])
netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
if (tb[IFLA_GSO_MAX_SEGS])
- dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
+ netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
return dev;
}
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index b5bc680d4755..9b8443774449 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -19,8 +19,8 @@
#include <linux/in6.h>
#include <net/tcp.h>
-static siphash_key_t net_secret __read_mostly;
-static siphash_key_t ts_secret __read_mostly;
+static siphash_aligned_key_t net_secret;
+static siphash_aligned_key_t ts_secret;
static __always_inline void net_secret_init(void)
{
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ba2f38246f07..a33247fdb8f5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -992,12 +992,10 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
}
EXPORT_SYMBOL(napi_consume_skb);
-/* Make sure a field is enclosed inside headers_start/headers_end section */
+/* Make sure a field is contained by headers group */
#define CHECK_SKB_FIELD(field) \
- BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
- offsetof(struct sk_buff, headers_start)); \
- BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
- offsetof(struct sk_buff, headers_end)); \
+ BUILD_BUG_ON(offsetof(struct sk_buff, field) != \
+ offsetof(struct sk_buff, headers.field)); \
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
@@ -1009,14 +1007,12 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
__skb_ext_copy(new, old);
__nf_copy(new, old, false);
- /* Note : this field could be in headers_start/headers_end section
+ /* Note : this field could be in the headers group.
* It is not yet because we do not want to have a 16 bit hole
*/
new->queue_mapping = old->queue_mapping;
- memcpy(&new->headers_start, &old->headers_start,
- offsetof(struct sk_buff, headers_end) -
- offsetof(struct sk_buff, headers_start));
+ memcpy(&new->headers, &old->headers, sizeof(new->headers));
CHECK_SKB_FIELD(protocol);
CHECK_SKB_FIELD(csum);
CHECK_SKB_FIELD(hash);
@@ -3919,32 +3915,6 @@ err_linearize:
}
EXPORT_SYMBOL_GPL(skb_segment_list);
-int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
-{
- if (unlikely(p->len + skb->len >= 65536))
- return -E2BIG;
-
- if (NAPI_GRO_CB(p)->last == p)
- skb_shinfo(p)->frag_list = skb;
- else
- NAPI_GRO_CB(p)->last->next = skb;
-
- skb_pull(skb, skb_gro_offset(skb));
-
- NAPI_GRO_CB(p)->last = skb;
- NAPI_GRO_CB(p)->count++;
- p->data_len += skb->len;
-
- /* sk owenrship - if any - completely transferred to the aggregated packet */
- skb->destructor = NULL;
- p->truesize += skb->truesize;
- p->len += skb->len;
-
- NAPI_GRO_CB(skb)->same_flow = 1;
-
- return 0;
-}
-
/**
* skb_segment - Perform protocol segmentation on skb.
* @head_skb: buffer to segment
@@ -4297,122 +4267,6 @@ err:
}
EXPORT_SYMBOL_GPL(skb_segment);
-int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
-{
- struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
- unsigned int offset = skb_gro_offset(skb);
- unsigned int headlen = skb_headlen(skb);
- unsigned int len = skb_gro_len(skb);
- unsigned int delta_truesize;
- unsigned int new_truesize;
- struct sk_buff *lp;
-
- if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
- return -E2BIG;
-
- lp = NAPI_GRO_CB(p)->last;
- pinfo = skb_shinfo(lp);
-
- if (headlen <= offset) {
- skb_frag_t *frag;
- skb_frag_t *frag2;
- int i = skbinfo->nr_frags;
- int nr_frags = pinfo->nr_frags + i;
-
- if (nr_frags > MAX_SKB_FRAGS)
- goto merge;
-
- offset -= headlen;
- pinfo->nr_frags = nr_frags;
- skbinfo->nr_frags = 0;
-
- frag = pinfo->frags + nr_frags;
- frag2 = skbinfo->frags + i;
- do {
- *--frag = *--frag2;
- } while (--i);
-
- skb_frag_off_add(frag, offset);
- skb_frag_size_sub(frag, offset);
-
- /* all fragments truesize : remove (head size + sk_buff) */
- new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
- delta_truesize = skb->truesize - new_truesize;
-
- skb->truesize = new_truesize;
- skb->len -= skb->data_len;
- skb->data_len = 0;
-
- NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
- goto done;
- } else if (skb->head_frag) {
- int nr_frags = pinfo->nr_frags;
- skb_frag_t *frag = pinfo->frags + nr_frags;
- struct page *page = virt_to_head_page(skb->head);
- unsigned int first_size = headlen - offset;
- unsigned int first_offset;
-
- if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
- goto merge;
-
- first_offset = skb->data -
- (unsigned char *)page_address(page) +
- offset;
-
- pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
-
- __skb_frag_set_page(frag, page);
- skb_frag_off_set(frag, first_offset);
- skb_frag_size_set(frag, first_size);
-
- memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
- /* We dont need to clear skbinfo->nr_frags here */
-
- new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
- delta_truesize = skb->truesize - new_truesize;
- skb->truesize = new_truesize;
- NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
- goto done;
- }
-
-merge:
- /* sk owenrship - if any - completely transferred to the aggregated packet */
- skb->destructor = NULL;
- delta_truesize = skb->truesize;
- if (offset > headlen) {
- unsigned int eat = offset - headlen;
-
- skb_frag_off_add(&skbinfo->frags[0], eat);
- skb_frag_size_sub(&skbinfo->frags[0], eat);
- skb->data_len -= eat;
- skb->len -= eat;
- offset = headlen;
- }
-
- __skb_pull(skb, offset);
-
- if (NAPI_GRO_CB(p)->last == p)
- skb_shinfo(p)->frag_list = skb;
- else
- NAPI_GRO_CB(p)->last->next = skb;
- NAPI_GRO_CB(p)->last = skb;
- __skb_header_release(skb);
- lp = p;
-
-done:
- NAPI_GRO_CB(p)->count++;
- p->data_len += len;
- p->truesize += delta_truesize;
- p->len += len;
- if (lp != p) {
- lp->data_len += len;
- lp->truesize += delta_truesize;
- lp->len += len;
- }
- NAPI_GRO_CB(skb)->same_flow = 1;
- return 0;
-}
-
#ifdef CONFIG_SKB_EXTENSIONS
#define SKB_EXT_ALIGN_VALUE 8
#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
@@ -4849,8 +4703,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
- if (sk->sk_protocol == IPPROTO_TCP &&
- sk->sk_type == SOCK_STREAM)
+ if (sk_is_tcp(sk))
serr->ee.ee_data -= sk->sk_tskey;
}
@@ -4919,8 +4772,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
if (tsonly) {
#ifdef CONFIG_INET
if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
- sk->sk_protocol == IPPROTO_TCP &&
- sk->sk_type == SOCK_STREAM) {
+ sk_is_tcp(sk)) {
skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
ack_skb);
opt_stats = true;
diff --git a/net/core/sock.c b/net/core/sock.c
index 41e91d0f7061..4a499d255f40 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -144,8 +144,6 @@
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
-static void sock_inuse_add(struct net *net, int val);
-
/**
* sk_ns_capable - General socket capability test
* @sk: Socket to use a capability on or through
@@ -327,7 +325,10 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
noreclaim_flag = memalloc_noreclaim_save();
- ret = sk->sk_backlog_rcv(sk, skb);
+ ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv,
+ tcp_v6_do_rcv,
+ tcp_v4_do_rcv,
+ sk, skb);
memalloc_noreclaim_restore(noreclaim_flag);
return ret;
@@ -872,8 +873,7 @@ int sock_set_timestamping(struct sock *sk, int optname,
if (val & SOF_TIMESTAMPING_OPT_ID &&
!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
- if (sk->sk_protocol == IPPROTO_TCP &&
- sk->sk_type == SOCK_STREAM) {
+ if (sk_is_tcp(sk)) {
if ((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN))
return -EINVAL;
@@ -1135,6 +1135,7 @@ set_sndbuf:
case SO_PRIORITY:
if ((val >= 0 && val <= 6) ||
+ ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk->sk_priority = val;
else
@@ -1280,7 +1281,8 @@ set_sndbuf:
clear_bit(SOCK_PASSSEC, &sock->flags);
break;
case SO_MARK:
- if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
+ !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
@@ -1370,8 +1372,7 @@ set_sndbuf:
case SO_ZEROCOPY:
if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
- if (!((sk->sk_type == SOCK_STREAM &&
- sk->sk_protocol == IPPROTO_TCP) ||
+ if (!(sk_is_tcp(sk) ||
(sk->sk_type == SOCK_DGRAM &&
sk->sk_protocol == IPPROTO_UDP)))
ret = -ENOTSUPP;
@@ -2246,17 +2247,22 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
u32 max_segs = 1;
sk_dst_set(sk, dst);
- sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
+ sk->sk_route_caps = dst->dev->features;
+ if (sk_is_tcp(sk))
+ sk->sk_route_caps |= NETIF_F_GSO;
if (sk->sk_route_caps & NETIF_F_GSO)
sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
- sk->sk_route_caps &= ~sk->sk_route_nocaps;
+ if (unlikely(sk->sk_gso_disabled))
+ sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
if (sk_can_gso(sk)) {
if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
- sk->sk_gso_max_size = dst->dev->gso_max_size;
- max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
+ /* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */
+ sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size);
+ /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
+ max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
}
}
sk->sk_gso_max_segs = max_segs;
@@ -3532,19 +3538,8 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
}
#ifdef CONFIG_PROC_FS
-#define PROTO_INUSE_NR 64 /* should be enough for the first time */
-struct prot_inuse {
- int val[PROTO_INUSE_NR];
-};
-
static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
-void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
-{
- __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
-}
-EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
-
int sock_prot_inuse_get(struct net *net, struct proto *prot)
{
int cpu, idx = prot->inuse_idx;
@@ -3557,17 +3552,12 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
}
EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
-static void sock_inuse_add(struct net *net, int val)
-{
- this_cpu_add(*net->core.sock_inuse, val);
-}
-
int sock_inuse_get(struct net *net)
{
int cpu, res = 0;
for_each_possible_cpu(cpu)
- res += *per_cpu_ptr(net->core.sock_inuse, cpu);
+ res += per_cpu_ptr(net->core.prot_inuse, cpu)->all;
return res;
}
@@ -3579,22 +3569,12 @@ static int __net_init sock_inuse_init_net(struct net *net)
net->core.prot_inuse = alloc_percpu(struct prot_inuse);
if (net->core.prot_inuse == NULL)
return -ENOMEM;
-
- net->core.sock_inuse = alloc_percpu(int);
- if (net->core.sock_inuse == NULL)
- goto out;
-
return 0;
-
-out:
- free_percpu(net->core.prot_inuse);
- return -ENOMEM;
}
static void __net_exit sock_inuse_exit_net(struct net *net)
{
free_percpu(net->core.prot_inuse);
- free_percpu(net->core.sock_inuse);
}
static struct pernet_operations net_inuse_ops = {
@@ -3640,9 +3620,6 @@ static inline void release_proto_idx(struct proto *prot)
{
}
-static void sock_inuse_add(struct net *net, int val)
-{
-}
#endif
static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index fc44dadc778b..a976b4d29892 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -238,17 +238,6 @@ void dccp_destroy_sock(struct sock *sk)
EXPORT_SYMBOL_GPL(dccp_destroy_sock);
-static inline int dccp_listen_start(struct sock *sk, int backlog)
-{
- struct dccp_sock *dp = dccp_sk(sk);
-
- dp->dccps_role = DCCP_ROLE_LISTEN;
- /* do not start to listen if feature negotiation setup fails */
- if (dccp_feat_finalise_settings(dp))
- return -EPROTO;
- return inet_csk_listen_start(sk, backlog);
-}
-
static inline int dccp_need_reset(int state)
{
return state != DCCP_CLOSED && state != DCCP_LISTEN &&
@@ -931,11 +920,17 @@ int inet_dccp_listen(struct socket *sock, int backlog)
* we can only allow the backlog to be adjusted.
*/
if (old_state != DCCP_LISTEN) {
- /*
- * FIXME: here it probably should be sk->sk_prot->listen_start
- * see tcp_listen_start
- */
- err = dccp_listen_start(sk, backlog);
+ struct dccp_sock *dp = dccp_sk(sk);
+
+ dp->dccps_role = DCCP_ROLE_LISTEN;
+
+ /* do not start to listen if feature negotiation setup fails */
+ if (dccp_feat_finalise_settings(dp)) {
+ err = -EPROTO;
+ goto out;
+ }
+
+ err = inet_csk_listen_start(sk);
if (err)
goto out;
}
diff --git a/net/dccp/trace.h b/net/dccp/trace.h
index 5062421beee9..5a43b3508c7f 100644
--- a/net/dccp/trace.h
+++ b/net/dccp/trace.h
@@ -60,9 +60,7 @@ TRACE_EVENT(dccp_probe,
__entry->tx_t_ipi = hc->tx_t_ipi;
} else {
__entry->tx_s = 0;
- memset(&__entry->tx_rtt, 0, (void *)&__entry->tx_t_ipi -
- (void *)&__entry->tx_rtt +
- sizeof(__entry->tx_t_ipi));
+ memset_startat(__entry, 0, tx_rtt);
}
),
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index c7d9e08107cb..ebcc812735a4 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -436,11 +436,10 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
type = eh->h_proto;
- rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (ptype == NULL) {
flush = 1;
- goto out_unlock;
+ goto out;
}
skb_gro_pull(skb, sizeof(*eh));
@@ -450,8 +449,6 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
ipv6_gro_receive, inet_gro_receive,
head, skb);
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
@@ -469,14 +466,12 @@ int eth_gro_complete(struct sk_buff *skb, int nhoff)
if (skb->encapsulation)
skb_set_inner_mac_header(skb, nhoff);
- rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype != NULL)
err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
ipv6_gro_complete, inet_gro_complete,
skb, nhoff + sizeof(*eh));
- rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(eth_gro_complete);
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index c63e0739dc6a..0c5210015911 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -89,6 +89,7 @@ tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
[ETHTOOL_RX_COPYBREAK] = "rx-copybreak",
[ETHTOOL_TX_COPYBREAK] = "tx-copybreak",
[ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout",
+ [ETHTOOL_TX_COPYBREAK_BUF_SIZE] = "tx-copybreak-buf-size",
};
const char
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 20bcf86970ff..06d3866c69b4 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1743,11 +1743,13 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
+ struct kernel_ethtool_ringparam kernel_ringparam = {};
if (!dev->ethtool_ops->get_ringparam)
return -EOPNOTSUPP;
- dev->ethtool_ops->get_ringparam(dev, &ringparam);
+ dev->ethtool_ops->get_ringparam(dev, &ringparam,
+ &kernel_ringparam, NULL);
if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
return -EFAULT;
@@ -1757,6 +1759,7 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
+ struct kernel_ethtool_ringparam kernel_ringparam;
int ret;
if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
@@ -1765,7 +1768,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
return -EFAULT;
- dev->ethtool_ops->get_ringparam(dev, &max);
+ dev->ethtool_ops->get_ringparam(dev, &max, &kernel_ringparam, NULL);
/* ensure new ring parameters are within the maximums */
if (ringparam.rx_pending > max.rx_max_pending ||
@@ -1774,7 +1777,8 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
ringparam.tx_pending > max.tx_max_pending)
return -EINVAL;
- ret = dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
+ &kernel_ringparam, NULL);
if (!ret)
ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
return ret;
@@ -2396,6 +2400,7 @@ static int ethtool_tunable_valid(const struct ethtool_tunable *tuna)
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
case ETHTOOL_TX_COPYBREAK:
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
if (tuna->len != sizeof(u32) ||
tuna->type_id != ETHTOOL_TUNABLE_U32)
return -EINVAL;
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 836ee7157848..490598e5eedd 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -356,7 +356,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
-extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX + 1];
+extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_BUF_LEN + 1];
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
index 4e097812a967..450b8866373d 100644
--- a/net/ethtool/rings.c
+++ b/net/ethtool/rings.c
@@ -10,6 +10,7 @@ struct rings_req_info {
struct rings_reply_data {
struct ethnl_reply_data base;
struct ethtool_ringparam ringparam;
+ struct kernel_ethtool_ringparam kernel_ringparam;
};
#define RINGS_REPDATA(__reply_base) \
@@ -25,6 +26,7 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base,
struct genl_info *info)
{
struct rings_reply_data *data = RINGS_REPDATA(reply_base);
+ struct netlink_ext_ack *extack = info ? info->extack : NULL;
struct net_device *dev = reply_base->dev;
int ret;
@@ -33,7 +35,8 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base,
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- dev->ethtool_ops->get_ringparam(dev, &data->ringparam);
+ dev->ethtool_ops->get_ringparam(dev, &data->ringparam,
+ &data->kernel_ringparam, extack);
ethnl_ops_complete(dev);
return 0;
@@ -49,7 +52,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RINGS_RX */
nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */
nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */
- nla_total_size(sizeof(u32)); /* _RINGS_TX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_TX */
+ nla_total_size(sizeof(u32)); /* _RINGS_RX_BUF_LEN */
}
static int rings_fill_reply(struct sk_buff *skb,
@@ -57,6 +61,7 @@ static int rings_fill_reply(struct sk_buff *skb,
const struct ethnl_reply_data *reply_base)
{
const struct rings_reply_data *data = RINGS_REPDATA(reply_base);
+ const struct kernel_ethtool_ringparam *kernel_ringparam = &data->kernel_ringparam;
const struct ethtool_ringparam *ringparam = &data->ringparam;
if ((ringparam->rx_max_pending &&
@@ -78,7 +83,10 @@ static int rings_fill_reply(struct sk_buff *skb,
(nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX,
ringparam->tx_max_pending) ||
nla_put_u32(skb, ETHTOOL_A_RINGS_TX,
- ringparam->tx_pending))))
+ ringparam->tx_pending))) ||
+ (kernel_ringparam->rx_buf_len &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN,
+ kernel_ringparam->rx_buf_len))))
return -EMSGSIZE;
return 0;
@@ -105,10 +113,12 @@ const struct nla_policy ethnl_rings_set_policy[] = {
[ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
+ [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
};
int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
{
+ struct kernel_ethtool_ringparam kernel_ringparam = {};
struct ethtool_ringparam ringparam = {};
struct ethnl_req_info req_info = {};
struct nlattr **tb = info->attrs;
@@ -134,7 +144,7 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
ret = ethnl_ops_begin(dev);
if (ret < 0)
goto out_rtnl;
- ops->get_ringparam(dev, &ringparam);
+ ops->get_ringparam(dev, &ringparam, &kernel_ringparam, info->extack);
ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
ethnl_update_u32(&ringparam.rx_mini_pending,
@@ -142,6 +152,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
ethnl_update_u32(&ringparam.rx_jumbo_pending,
tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod);
ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod);
+ ethnl_update_u32(&kernel_ringparam.rx_buf_len,
+ tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod);
ret = 0;
if (!mod)
goto out_ops;
@@ -164,7 +176,17 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
goto out_ops;
}
- ret = dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ if (kernel_ringparam.rx_buf_len != 0 &&
+ !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) {
+ ret = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RINGS_RX_BUF_LEN],
+ "setting rx buf len not supported");
+ goto out_ops;
+ }
+
+ ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
+ &kernel_ringparam, info->extack);
if (ret < 0)
goto out_ops;
ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c
index ec07f5765e03..a20e0a24ff61 100644
--- a/net/ethtool/stats.c
+++ b/net/ethtool/stats.c
@@ -14,10 +14,12 @@ struct stats_req_info {
struct stats_reply_data {
struct ethnl_reply_data base;
- struct ethtool_eth_phy_stats phy_stats;
- struct ethtool_eth_mac_stats mac_stats;
- struct ethtool_eth_ctrl_stats ctrl_stats;
- struct ethtool_rmon_stats rmon_stats;
+ struct_group(stats,
+ struct ethtool_eth_phy_stats phy_stats;
+ struct ethtool_eth_mac_stats mac_stats;
+ struct ethtool_eth_ctrl_stats ctrl_stats;
+ struct ethtool_rmon_stats rmon_stats;
+ );
const struct ethtool_rmon_hist_range *rmon_ranges;
};
@@ -117,10 +119,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
/* Mark all stats as unset (see ETHTOOL_STAT_NOT_SET) to prevent them
* from being reported to user space in case driver did not set them.
*/
- memset(&data->phy_stats, 0xff, sizeof(data->phy_stats));
- memset(&data->mac_stats, 0xff, sizeof(data->mac_stats));
- memset(&data->ctrl_stats, 0xff, sizeof(data->ctrl_stats));
- memset(&data->rmon_stats, 0xff, sizeof(data->rmon_stats));
+ memset(&data->stats, 0xff, sizeof(data->stats));
if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) &&
dev->ethtool_ops->get_eth_phy_stats)
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 7bb9ef35c570..3b2366a88c3c 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -174,8 +174,8 @@ static int raw_hash(struct sock *sk)
{
write_lock_bh(&raw_lock);
sk_add_node(sk, &raw_head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&raw_lock);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
@@ -453,8 +453,8 @@ static int dgram_hash(struct sock *sk)
{
write_lock_bh(&dgram_lock);
sk_add_node(sk, &dgram_head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&dgram_lock);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 0189e3cd4a7d..04067b249bf3 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -99,6 +99,7 @@
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/inet_connection_sock.h>
+#include <net/gro.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/udplite.h>
@@ -224,7 +225,7 @@ int inet_listen(struct socket *sock, int backlog)
tcp_fastopen_init_key_once(sock_net(sk));
}
- err = inet_csk_listen_start(sk, backlog);
+ err = inet_csk_listen_start(sk);
if (err)
goto out;
tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
@@ -488,11 +489,8 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
* is temporarily down)
*/
err = -EADDRNOTAVAIL;
- if (!inet_can_nonlocal_bind(net, inet) &&
- addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
- chk_addr_ret != RTN_LOCAL &&
- chk_addr_ret != RTN_MULTICAST &&
- chk_addr_ret != RTN_BROADCAST)
+ if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
+ chk_addr_ret))
goto out;
snum = ntohs(addr->sin_port);
@@ -1454,19 +1452,18 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
proto = iph->protocol;
- rcu_read_lock();
ops = rcu_dereference(inet_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive)
- goto out_unlock;
+ goto out;
if (*(u8 *)iph != 0x45)
- goto out_unlock;
+ goto out;
if (ip_is_fragment(iph))
- goto out_unlock;
+ goto out;
if (unlikely(ip_fast_csum((u8 *)iph, 5)))
- goto out_unlock;
+ goto out;
id = ntohl(*(__be32 *)&iph->id);
flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
@@ -1543,9 +1540,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
ops->callbacks.gro_receive, head, skb);
-out_unlock:
- rcu_read_unlock();
-
out:
skb_gro_flush_final(skb, pp, flush);
@@ -1618,10 +1612,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
- rcu_read_lock();
ops = rcu_dereference(inet_offloads[proto]);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
- goto out_unlock;
+ goto out;
/* Only need to add sizeof(*iph) to get to the next hdr below
* because any hdr with option will have been flushed in
@@ -1631,9 +1624,7 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
tcp4_gro_complete, udp4_gro_complete,
skb, nhoff + sizeof(*iph));
-out_unlock:
- rcu_read_unlock();
-
+out:
return err;
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 857a144b1ea9..4db0325f6e1a 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1299,21 +1299,6 @@ static struct packet_type arp_packet_type __read_mostly = {
.func = arp_rcv,
};
-static int arp_proc_init(void);
-
-void __init arp_init(void)
-{
- neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl);
-
- dev_add_pack(&arp_packet_type);
- arp_proc_init();
-#ifdef CONFIG_SYSCTL
- neigh_sysctl_register(NULL, &arp_tbl.parms, NULL);
-#endif
- register_netdevice_notifier(&arp_netdev_notifier);
-}
-
-#ifdef CONFIG_PROC_FS
#if IS_ENABLED(CONFIG_AX25)
/* ------------------------------------------------------------------------ */
@@ -1451,16 +1436,14 @@ static struct pernet_operations arp_net_ops = {
.exit = arp_net_exit,
};
-static int __init arp_proc_init(void)
+void __init arp_init(void)
{
- return register_pernet_subsys(&arp_net_ops);
-}
-
-#else /* CONFIG_PROC_FS */
+ neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl);
-static int __init arp_proc_init(void)
-{
- return 0;
+ dev_add_pack(&arp_packet_type);
+ register_pernet_subsys(&arp_net_ops);
+#ifdef CONFIG_SYSCTL
+ neigh_sysctl_register(NULL, &arp_tbl.parms, NULL);
+#endif
+ register_netdevice_notifier(&arp_netdev_notifier);
}
-
-#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 8e4e9aa12130..d87f02a6e934 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -16,6 +16,7 @@
#include <crypto/authenc.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 8fcbc6258ec5..0d085cc8d96c 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <net/genetlink.h>
+#include <net/gro.h>
#include <net/gue.h>
#include <net/fou.h>
#include <net/ip.h>
@@ -246,17 +247,14 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
/* Flag this frame as already having an outer encap header */
NAPI_GRO_CB(skb)->is_fou = 1;
- rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
if (!ops || !ops->callbacks.gro_receive)
- goto out_unlock;
+ goto out;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
-out_unlock:
- rcu_read_unlock();
-
+out:
return pp;
}
@@ -268,19 +266,16 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
const struct net_offload *ops;
int err = -ENOSYS;
- rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
- goto out_unlock;
+ goto out;
err = ops->callbacks.gro_complete(skb, nhoff);
skb_set_inner_mac_header(skb, nhoff);
-out_unlock:
- rcu_read_unlock();
-
+out:
return err;
}
@@ -438,17 +433,14 @@ next_proto:
/* Flag this frame as already having an outer encap header */
NAPI_GRO_CB(skb)->is_fou = 1;
- rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
- goto out_unlock;
+ goto out;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
flush = 0;
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
@@ -485,18 +477,16 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return err;
}
- rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
- goto out_unlock;
+ goto out;
err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
skb_set_inner_mac_header(skb, nhoff + guehlen);
-out_unlock:
- rcu_read_unlock();
+out:
return err;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 1121a9d5fed9..07073fa35205 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <net/protocol.h>
#include <net/gre.h>
+#include <net/gro.h>
static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t features)
@@ -162,10 +163,9 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
type = greh->protocol;
- rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (!ptype)
- goto out_unlock;
+ goto out;
grehlen = GRE_HEADER_SECTION;
@@ -179,13 +179,13 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
if (skb_gro_header_hard(skb, hlen)) {
greh = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!greh))
- goto out_unlock;
+ goto out;
}
/* Don't bother verifying checksum if we're going to flush anyway. */
if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
if (skb_gro_checksum_simple_validate(skb))
- goto out_unlock;
+ goto out;
skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
null_compute_pseudo);
@@ -229,8 +229,6 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
@@ -255,13 +253,10 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
if (greh->flags & GRE_CSUM)
grehlen += GRE_HEADER_SECTION;
- rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
- rcu_read_unlock();
-
skb_set_inner_mac_header(skb, nhoff + grehlen);
return err;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d2e2b3d18c66..2ad3c7b42d6d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2558,7 +2558,6 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
msf->imsf_fmode = pmc->sfmode;
psl = rtnl_dereference(pmc->sflist);
if (!psl) {
- len = 0;
count = 0;
} else {
count = psl->sl_count;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f7fea3a7c5e6..23da67a3fc06 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1035,7 +1035,7 @@ void inet_csk_prepare_forced_close(struct sock *sk)
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);
-int inet_csk_listen_start(struct sock *sk, int backlog)
+int inet_csk_listen_start(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 75737267746f..30ab717ff1b8 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -307,7 +307,7 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
- __be32 daddr, u16 hnum)
+ __be32 daddr, u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@@ -315,8 +315,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
if (hashinfo != &tcp_hashinfo)
return NULL; /* only TCP is supported */
- no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP,
- saddr, sport, daddr, hnum, &sk);
+ no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport,
+ daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@@ -340,7 +340,7 @@ struct sock *__inet_lookup_listener(struct net *net,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet_lookup_run_bpf(net, hashinfo, skb, doff,
- saddr, sport, daddr, hnum);
+ saddr, sport, daddr, hnum, dif);
if (result)
goto done;
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9bca57ef8b83..57c1d8431386 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -672,7 +672,6 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
struct sk_buff *skb2;
struct iphdr *iph;
- len = state->left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > state->mtu)
len = state->mtu;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 38d29b175ca6..445a9ecaefa1 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -576,7 +576,7 @@ out:
return err;
}
-static void __ip_sock_set_tos(struct sock *sk, int val)
+void __ip_sock_set_tos(struct sock *sk, int val)
{
if (sk->sk_type == SOCK_STREAM) {
val &= ~INET_ECN_MASK;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 1e44a43acfe2..e540b0dcf085 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -311,15 +311,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
- if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
- chk_addr_ret = RTN_LOCAL;
- else
- chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
-
- if ((!inet_can_nonlocal_bind(net, isk) &&
- chk_addr_ret != RTN_LOCAL) ||
- chk_addr_ret == RTN_MULTICAST ||
- chk_addr_ret == RTN_BROADCAST)
+ chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+
+ if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk),
+ addr->sin_addr.s_addr,
+ chk_addr_ret))
return -EADDRNOTAVAIL;
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bb446e60cf58..a53f256bf9d3 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -99,8 +99,8 @@ int raw_hash_sk(struct sock *sk)
write_lock_bh(&h->lock);
sk_add_node(sk, head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&h->lock);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
@@ -717,6 +717,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+ struct net *net = sock_net(sk);
u32 tb_id = RT_TABLE_LOCAL;
int ret = -EINVAL;
int chk_addr_ret;
@@ -725,16 +726,16 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
goto out;
if (sk->sk_bound_dev_if)
- tb_id = l3mdev_fib_table_by_index(sock_net(sk),
- sk->sk_bound_dev_if) ? : tb_id;
+ tb_id = l3mdev_fib_table_by_index(net,
+ sk->sk_bound_dev_if) ? : tb_id;
- chk_addr_ret = inet_addr_type_table(sock_net(sk), addr->sin_addr.s_addr,
- tb_id);
+ chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
ret = -EADDRNOTAVAIL;
- if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
- chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
+ if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
+ chk_addr_ret))
goto out;
+
inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0b4103b1e622..243a0c52be42 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -602,7 +602,7 @@ static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
static u32 fnhe_hashfun(__be32 daddr)
{
- static siphash_key_t fnhe_hash_key __read_mostly;
+ static siphash_aligned_key_t fnhe_hash_key;
u64 hval;
net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 8696dc343ad2..2cb3b852d148 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -14,7 +14,7 @@
#include <net/tcp.h>
#include <net/route.h>
-static siphash_key_t syncookie_secret[2] __read_mostly;
+static siphash_aligned_key_t syncookie_secret[2];
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bbb3d39c69af..6ab82e1a1d41 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -292,7 +292,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);
-atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
+atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
#if IS_ENABLED(CONFIG_SMC)
@@ -303,7 +303,7 @@ EXPORT_SYMBOL(tcp_have_smc);
/*
* Current number of TCP sockets.
*/
-struct percpu_counter tcp_sockets_allocated;
+struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp;
EXPORT_SYMBOL(tcp_sockets_allocated);
/*
@@ -456,7 +456,6 @@ void tcp_init_sock(struct sock *sk)
WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
sk_sockets_allocated_inc(sk);
- sk->sk_route_forced_caps = NETIF_F_GSO;
}
EXPORT_SYMBOL(tcp_init_sock);
@@ -546,10 +545,11 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (state != TCP_SYN_SENT &&
(state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
int target = sock_rcvlowat(sk, 0, INT_MAX);
+ u16 urg_data = READ_ONCE(tp->urg_data);
- if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
- !sock_flag(sk, SOCK_URGINLINE) &&
- tp->urg_data)
+ if (unlikely(urg_data) &&
+ READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
+ !sock_flag(sk, SOCK_URGINLINE))
target++;
if (tcp_stream_is_readable(sk, target))
@@ -574,7 +574,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
} else
mask |= EPOLLOUT | EPOLLWRNORM;
- if (tp->urg_data & TCP_URG_VALID)
+ if (urg_data & TCP_URG_VALID)
mask |= EPOLLPRI;
} else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
/* Active TCP fastopen socket with defer_connect
@@ -608,7 +608,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
unlock_sock_fast(sk, slow);
break;
case SIOCATMARK:
- answ = tp->urg_data &&
+ answ = READ_ONCE(tp->urg_data) &&
READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
break;
case SIOCOUTQ:
@@ -1466,7 +1466,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
char c = tp->urg_data;
if (!(flags & MSG_PEEK))
- tp->urg_data = TCP_URG_READ;
+ WRITE_ONCE(tp->urg_data, TCP_URG_READ);
/* Read urgent data. */
msg->msg_flags |= MSG_OOB;
@@ -1580,6 +1580,36 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
tcp_send_ack(sk);
}
+void __sk_defer_free_flush(struct sock *sk)
+{
+ struct llist_node *head;
+ struct sk_buff *skb, *n;
+
+ head = llist_del_all(&sk->defer_list);
+ llist_for_each_entry_safe(skb, n, head, ll_node) {
+ prefetch(n);
+ skb_mark_not_on_list(skb);
+ __kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL(__sk_defer_free_flush);
+
+static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ if (likely(skb->destructor == sock_rfree)) {
+ sock_rfree(skb);
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ !llist_empty(&sk->defer_list)) {
+ llist_add(&skb->ll_node, &sk->defer_list);
+ return;
+ }
+ }
+ __kfree_skb(skb);
+}
+
static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
{
struct sk_buff *skb;
@@ -1599,7 +1629,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
* splitted a fat GRO packet, while we released socket lock
* in skb_splice_bits()
*/
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
}
return NULL;
}
@@ -1633,7 +1663,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
len = skb->len - offset;
/* Stop reading if we hit a patch of urgent data */
- if (tp->urg_data) {
+ if (unlikely(tp->urg_data)) {
u32 urg_offset = tp->urg_seq - seq;
if (urg_offset < len)
len = urg_offset;
@@ -1665,11 +1695,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
continue;
}
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
++seq;
break;
}
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
if (!desc->count)
break;
WRITE_ONCE(tp->copied_seq, seq);
@@ -2329,7 +2359,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
u32 offset;
/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
- if (tp->urg_data && tp->urg_seq == *seq) {
+ if (unlikely(tp->urg_data) && tp->urg_seq == *seq) {
if (copied)
break;
if (signal_pending(current)) {
@@ -2372,10 +2402,10 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
break;
if (copied) {
- if (sk->sk_err ||
+ if (!timeo ||
+ sk->sk_err ||
sk->sk_state == TCP_CLOSE ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
- !timeo ||
signal_pending(current))
break;
} else {
@@ -2409,13 +2439,12 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- tcp_cleanup_rbuf(sk, copied);
-
if (copied >= target) {
/* Do not sleep, just process backlog. */
- release_sock(sk);
- lock_sock(sk);
+ __sk_flush_backlog(sk);
} else {
+ tcp_cleanup_rbuf(sk, copied);
+ sk_defer_free_flush(sk);
sk_wait_data(sk, &timeo, last);
}
@@ -2435,7 +2464,7 @@ found_ok_skb:
used = len;
/* Do we have urgent data here? */
- if (tp->urg_data) {
+ if (unlikely(tp->urg_data)) {
u32 urg_offset = tp->urg_seq - *seq;
if (urg_offset < used) {
if (!urg_offset) {
@@ -2469,8 +2498,8 @@ found_ok_skb:
tcp_rcv_space_adjust(sk);
skip_copy:
- if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
- tp->urg_data = 0;
+ if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) {
+ WRITE_ONCE(tp->urg_data, 0);
tcp_fast_path_check(sk);
}
@@ -2485,14 +2514,14 @@ skip_copy:
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
if (!(flags & MSG_PEEK))
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
continue;
found_fin_ok:
/* Process the FIN. */
WRITE_ONCE(*seq, *seq + 1);
if (!(flags & MSG_PEEK))
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
break;
} while (len > 0);
@@ -2534,6 +2563,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
ret = tcp_recvmsg_locked(sk, msg, len, nonblock, flags, &tss,
&cmsg_flags);
release_sock(sk);
+ sk_defer_free_flush(sk);
if (cmsg_flags && ret >= 0) {
if (cmsg_flags & TCP_CMSG_TS)
@@ -2964,7 +2994,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
- tp->urg_data = 0;
+ WRITE_ONCE(tp->urg_data, 0);
tcp_write_queue_purge(sk);
tcp_fastopen_active_disable_ofo_check(sk);
skb_rbtree_purge(&tp->out_of_order_queue);
@@ -3059,7 +3089,7 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_frag.page = NULL;
sk->sk_frag.offset = 0;
}
-
+ sk_defer_free_flush(sk);
sk_error_report(sk);
return 0;
}
@@ -3774,10 +3804,12 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
tcp_get_info_chrono_stats(tp, info);
info->tcpi_segs_out = tp->segs_out;
- info->tcpi_segs_in = tp->segs_in;
+
+ /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */
+ info->tcpi_segs_in = READ_ONCE(tp->segs_in);
+ info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in);
info->tcpi_min_rtt = tcp_min_rtt(tp);
- info->tcpi_data_segs_in = tp->data_segs_in;
info->tcpi_data_segs_out = tp->data_segs_out;
info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
@@ -4186,6 +4218,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
&zc, &len, err);
release_sock(sk);
+ sk_defer_free_flush(sk);
if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags))
goto zerocopy_rcv_cmsg;
switch (len) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 246ab7b5e857..3658b9c3dd2b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5591,7 +5591,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
}
}
- tp->urg_data = TCP_URG_NOTYET;
+ WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET);
WRITE_ONCE(tp->urg_seq, ptr);
/* Disable header prediction. */
@@ -5604,11 +5604,11 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
struct tcp_sock *tp = tcp_sk(sk);
/* Check if we get a new urgent pointer - normally not. */
- if (th->urg)
+ if (unlikely(th->urg))
tcp_check_urg(sk, th);
/* Do we wait for any urgent data? - normally not... */
- if (tp->urg_data == TCP_URG_NOTYET) {
+ if (unlikely(tp->urg_data == TCP_URG_NOTYET)) {
u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
th->syn;
@@ -5617,7 +5617,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
u8 tmp;
if (skb_copy_bits(skb, ptr, &tmp, 1))
BUG();
- tp->urg_data = TCP_URG_VALID | tmp;
+ WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 13d868c43284..3dd19a2bf06c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1182,7 +1182,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
if (!md5sig)
return -ENOMEM;
- sk_nocaps_add(sk, NETIF_F_GSO_MASK);
+ sk_gso_disable(sk);
INIT_HLIST_HEAD(&md5sig->head);
rcu_assign_pointer(tp->md5sig_info, md5sig);
}
@@ -1620,7 +1620,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
*/
tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags,
key->key, key->keylen, GFP_ATOMIC);
- sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
+ sk_gso_disable(newsk);
}
#endif
@@ -1800,8 +1800,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
- u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
- u32 tail_gso_size, tail_gso_segs;
+ u32 limit, tail_gso_size, tail_gso_segs;
struct skb_shared_info *shinfo;
const struct tcphdr *th;
struct tcphdr *thtail;
@@ -1909,7 +1908,7 @@ no_coalesce:
* to reduce memory overhead, so add a little headroom here.
* Few sockets backlog are possibly concurrently non empty.
*/
- limit += 64*1024;
+ limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024;
if (unlikely(sk_add_backlog(sk, skb, limit))) {
bh_unlock_sock(sk);
@@ -2103,6 +2102,7 @@ process:
sk_incoming_cpu_update(sk);
+ sk_defer_free_flush(sk);
bh_lock_sock_nested(sk);
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index fc61cd3fea65..30abde86db45 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -8,6 +8,7 @@
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/gro.h>
#include <net/tcp.h>
#include <net/protocol.h>
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2e6e5a70168e..5079832af5c1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1359,7 +1359,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_TCP_MD5SIG
/* Calculate the MD5 hash, as we have all we need now */
if (md5) {
- sk_nocaps_add(sk, NETIF_F_GSO_MASK);
+ sk_gso_disable(sk);
tp->af_specific->calc_md5_hash(opts.hash_location,
md5, sk, skb);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8bcecdd6aeda..799b663daf87 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -122,7 +122,7 @@ EXPORT_SYMBOL(udp_table);
long sysctl_udp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_udp_mem);
-atomic_long_t udp_memory_allocated;
+atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
EXPORT_SYMBOL(udp_memory_allocated);
#define MAX_UDP_PORTS 65536
@@ -459,7 +459,7 @@ static struct sock *udp4_lookup_run_bpf(struct net *net,
struct udp_table *udptable,
struct sk_buff *skb,
__be32 saddr, __be16 sport,
- __be32 daddr, u16 hnum)
+ __be32 daddr, u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@@ -467,8 +467,8 @@ static struct sock *udp4_lookup_run_bpf(struct net *net,
if (udptable != &udp_table)
return NULL; /* only UDP is supported */
- no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP,
- saddr, sport, daddr, hnum, &sk);
+ no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport,
+ daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@@ -504,7 +504,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
sk = udp4_lookup_run_bpf(net, udptable, skb,
- saddr, sport, daddr, hnum);
+ saddr, sport, daddr, hnum, dif);
if (sk) {
result = sk;
goto done;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 86d32a1e62ac..6d1a4bec2614 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -7,6 +7,7 @@
*/
#include <linux/skbuff.h>
+#include <net/gro.h>
#include <net/udp.h>
#include <net/protocol.h>
#include <net/inet_common.h>
@@ -424,6 +425,33 @@ out:
return segs;
}
+static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+{
+ if (unlikely(p->len + skb->len >= 65536))
+ return -E2BIG;
+
+ if (NAPI_GRO_CB(p)->last == p)
+ skb_shinfo(p)->frag_list = skb;
+ else
+ NAPI_GRO_CB(p)->last->next = skb;
+
+ skb_pull(skb, skb_gro_offset(skb));
+
+ NAPI_GRO_CB(p)->last = skb;
+ NAPI_GRO_CB(p)->count++;
+ p->data_len += skb->len;
+
+ /* sk owenrship - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
+ p->truesize += skb->truesize;
+ p->len += skb->len;
+
+ NAPI_GRO_CB(skb)->same_flow = 1;
+
+ return 0;
+}
+
+
#define UDP_GRO_CNT_MAX 64
static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
struct sk_buff *skb)
@@ -600,13 +628,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
inet_gro_compute_pseudo);
skip:
NAPI_GRO_CB(skb)->is_ipv6 = 0;
- rcu_read_lock();
if (static_branch_unlikely(&udp_encap_needed_key))
sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
pp = udp_gro_receive(head, skb, uh, sk);
- rcu_read_unlock();
return pp;
flush:
@@ -641,7 +667,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
uh->len = newlen;
- rcu_read_lock();
sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
udp4_lib_lookup_skb, skb, uh->source, uh->dest);
if (sk && udp_sk(sk)->gro_complete) {
@@ -662,7 +687,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
} else {
err = udp_gro_complete_segment(skb);
}
- rcu_read_unlock();
if (skb->remcsum_offload)
skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index dab4a047590b..d1636425654e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -337,11 +337,8 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
rcu_read_unlock();
- if (!inet_can_nonlocal_bind(net, inet) &&
- v4addr != htonl(INADDR_ANY) &&
- chk_addr_ret != RTN_LOCAL &&
- chk_addr_ret != RTN_MULTICAST &&
- chk_addr_ret != RTN_BROADCAST) {
+ if (!inet_addr_valid_or_nonlocal(net, inet, v4addr,
+ chk_addr_ret)) {
err = -EADDRNOTAVAIL;
goto out;
}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 828e62514260..b5995c1f4d7a 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -175,7 +175,6 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
* See 11.3.2 of RFC 3775 for details.
*/
if (opt[off] == IPV6_TLV_HAO) {
- struct in6_addr final_addr;
struct ipv6_destopt_hao *hao;
hao = (struct ipv6_destopt_hao *)&opt[off];
@@ -184,9 +183,7 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
hao->length);
goto bad;
}
- final_addr = hao->addr;
- hao->addr = iph->saddr;
- iph->saddr = final_addr;
+ swap(hao->addr, iph->saddr);
}
break;
}
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index a349d4798077..ba5e81cd569c 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -16,6 +16,7 @@
#include <crypto/authenc.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 38ece3b7b839..77e34aec7e82 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -686,7 +686,6 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
int accept_source_route = net->ipv6.devconf_all->accept_source_route;
- idev = __in6_dev_get(skb->dev);
if (idev && accept_source_route > idev->cnf.accept_source_route)
accept_source_route = idev->cnf.accept_source_route;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 67c9114835c8..4514444e96c8 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -165,7 +165,7 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
- const u16 hnum)
+ const u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@@ -173,8 +173,8 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net,
if (hashinfo != &tcp_hashinfo)
return NULL; /* only TCP is supported */
- no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP,
- saddr, sport, daddr, hnum, &sk);
+ no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport,
+ daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@@ -198,7 +198,7 @@ struct sock *inet6_lookup_listener(struct net *net,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet6_lookup_run_bpf(net, hashinfo, skb, doff,
- saddr, sport, daddr, hnum);
+ saddr, sport, daddr, hnum, dif);
if (result)
goto done;
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1b9827ff8ccf..48674888f2dc 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -208,7 +208,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
flush += ntohs(iph->payload_len) != skb_gro_len(skb);
- rcu_read_lock();
proto = iph->nexthdr;
ops = rcu_dereference(inet6_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive) {
@@ -221,7 +220,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
ops = rcu_dereference(inet6_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive)
- goto out_unlock;
+ goto out;
iph = ipv6_hdr(skb);
}
@@ -279,9 +278,6 @@ not_same_flow:
pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
ops->callbacks.gro_receive, head, skb);
-out_unlock:
- rcu_read_unlock();
-
out:
skb_gro_flush_final(skb, pp, flush);
@@ -331,18 +327,14 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
- rcu_read_lock();
-
nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
- goto out_unlock;
+ goto out;
err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
udp6_gro_complete, skb, nhoff);
-out_unlock:
- rcu_read_unlock();
-
+out:
return err;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ff4e83e2a506..2995f8d89e7e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -977,7 +977,7 @@ slow_path:
fail_toobig:
if (skb->sk && dst_allfrag(skb_dst(skb)))
- sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+ sk_gso_disable(skb->sk);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
err = -EMSGSIZE;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 41efca817db4..a733803a710c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -471,10 +471,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (sk->sk_protocol == IPPROTO_TCP) {
struct inet_connection_sock *icsk = inet_csk(sk);
- local_bh_disable();
+
sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(net, &tcp_prot, 1);
- local_bh_enable();
+
sk->sk_prot = &tcp_prot;
icsk->icsk_af_ops = &ipv4_specific;
sk->sk_socket->ops = &inet_stream_ops;
@@ -485,10 +485,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (sk->sk_protocol == IPPROTO_UDPLITE)
prot = &udplite_prot;
- local_bh_disable();
+
sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(net, prot, 1);
- local_bh_enable();
+
sk->sk_prot = prot;
sk->sk_socket->ops = &inet_dgram_ops;
sk->sk_family = PF_INET;
@@ -599,7 +599,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
/* RFC 3542, 6.5: default traffic class of 0x0 */
if (val == -1)
val = 0;
- np->tclass = val;
+ if (sk->sk_type == SOCK_STREAM) {
+ val &= ~INET_ECN_MASK;
+ val |= np->tclass & INET_ECN_MASK;
+ }
+ if (np->tclass != val) {
+ np->tclass = val;
+ sk_dst_reset(sk);
+ }
retv = 0;
break;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 42d60c76d30a..62f1e16eea2b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -328,9 +328,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
static void rt6_info_init(struct rt6_info *rt)
{
- struct dst_entry *dst = &rt->dst;
-
- memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+ memset_after(rt, 0, dst);
INIT_LIST_HEAD(&rt->rt6i_uncached);
}
@@ -1485,7 +1483,7 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
static u32 rt6_exception_hash(const struct in6_addr *dst,
const struct in6_addr *src)
{
- static siphash_key_t rt6_exception_key __read_mostly;
+ static siphash_aligned_key_t rt6_exception_key;
struct {
struct in6_addr dst;
struct in6_addr src;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index e8cfb9e997bf..d1b61d00368e 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -20,7 +20,7 @@
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
-static siphash_key_t syncookie6_secret[2] __read_mostly;
+static siphash_aligned_key_t syncookie6_secret[2];
/* RFC 2460, Section 8.3:
* [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 551fce49841d..3b7d6ede1364 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -72,7 +72,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
-static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
+INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
static const struct inet_connection_sock_af_ops ipv6_mapped;
const struct inet_connection_sock_af_ops ipv6_specific;
@@ -1466,7 +1466,8 @@ INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
* This is because we cannot sleep with the original spinlock
* held.
*/
-static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = tcp_inet6_sk(sk);
struct sk_buff *opt_skb = NULL;
@@ -1757,6 +1758,7 @@ process:
sk_incoming_cpu_update(sk);
+ sk_defer_free_flush(sk);
bh_lock_sock_nested(sk);
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
@@ -1893,9 +1895,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
+ __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
}
const struct inet_connection_sock_af_ops ipv6_specific = {
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 1796856bc24f..39db5a226855 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -7,6 +7,7 @@
*/
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/gro.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e43b31d25fb6..6a0e569f0bb8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -195,7 +195,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
const struct in6_addr *saddr,
__be16 sport,
const struct in6_addr *daddr,
- u16 hnum)
+ u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@@ -203,8 +203,8 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
if (udptable != &udp_table)
return NULL; /* only UDP is supported */
- no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
- saddr, sport, daddr, hnum, &sk);
+ no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport,
+ daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@@ -240,7 +240,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
sk = udp6_lookup_run_bpf(net, udptable, skb,
- saddr, sport, daddr, hnum);
+ saddr, sport, daddr, hnum, dif);
if (sk) {
result = sk;
goto done;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b3d9ed96e5ea..7720d04ed396 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -13,6 +13,7 @@
#include <net/udp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
+#include <net/gro.h>
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
@@ -144,13 +145,11 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
skip:
NAPI_GRO_CB(skb)->is_ipv6 = 1;
- rcu_read_lock();
if (static_branch_unlikely(&udpv6_encap_needed_key))
sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest);
pp = udp_gro_receive(head, skb, uh, sk);
- rcu_read_unlock();
return pp;
flush:
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 18316ee3c692..49ecbe8d176a 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -142,7 +142,7 @@ static inline size_t iucv_msg_length(struct iucv_message *msg)
* iucv_sock_in_state() - check for specific states
* @sk: sock structure
* @state: first iucv sk state
- * @state: second iucv sk state
+ * @state2: second iucv sk state
*
* Returns true if the socket in either in the first or second state.
*/
@@ -172,7 +172,7 @@ static inline int iucv_below_msglim(struct sock *sk)
(atomic_read(&iucv->pendings) <= 0));
}
-/**
+/*
* iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
*/
static void iucv_sock_wake_msglim(struct sock *sk)
@@ -187,7 +187,7 @@ static void iucv_sock_wake_msglim(struct sock *sk)
rcu_read_unlock();
}
-/**
+/*
* afiucv_hs_send() - send a message through HiperSockets transport
*/
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
@@ -473,7 +473,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio,
atomic_set(&iucv->msg_recv, 0);
iucv->path = NULL;
iucv->sk_txnotify = afiucv_hs_callback_txnotify;
- memset(&iucv->src_user_id , 0, 32);
+ memset(&iucv->init, 0, sizeof(iucv->init));
if (pr_iucv)
iucv->transport = AF_IUCV_TRANS_IUCV;
else
@@ -1831,9 +1831,9 @@ static void afiucv_swap_src_dest(struct sk_buff *skb)
memset(skb->data, 0, ETH_HLEN);
}
-/**
+/*
* afiucv_hs_callback_syn - react on received SYN
- **/
+ */
static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
{
struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
@@ -1896,9 +1896,9 @@ out:
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_synack() - react on received SYN-ACK
- **/
+ */
static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1917,9 +1917,9 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_synfin() - react on received SYN_FIN
- **/
+ */
static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1937,9 +1937,9 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_fin() - react on received FIN
- **/
+ */
static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1960,9 +1960,9 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_win() - react on received WIN
- **/
+ */
static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1978,9 +1978,9 @@ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_rx() - react on received data
- **/
+ */
static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -2022,11 +2022,11 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_rcv() - base function for arriving data through HiperSockets
* transport
* called from netif RX softirq
- **/
+ */
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
@@ -2128,10 +2128,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
return err;
}
-/**
+/*
* afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets
* transport
- **/
+ */
static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
{
struct iucv_sock *iucv = iucv_sk(sk);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f3343a8541a5..8f4d49a7d3e8 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -276,8 +276,8 @@ static union iucv_param *iucv_param[NR_CPUS];
static union iucv_param *iucv_param_irq[NR_CPUS];
/**
- * iucv_call_b2f0
- * @code: identifier of IUCV call to CP.
+ * __iucv_call_b2f0
+ * @command: identifier of IUCV call to CP.
* @parm: pointer to a struct iucv_parm block
*
* Calls CP to execute IUCV commands.
@@ -309,7 +309,7 @@ static inline int iucv_call_b2f0(int command, union iucv_param *parm)
return ccode == 1 ? parm->ctrl.iprcode : ccode;
}
-/**
+/*
* iucv_query_maxconn
*
* Determines the maximum number of connections that may be established.
@@ -493,8 +493,8 @@ static void iucv_retrieve_cpu(void *data)
cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
}
-/**
- * iucv_setmask_smp
+/*
+ * iucv_setmask_mp
*
* Allow iucv interrupts on all cpus.
*/
@@ -512,7 +512,7 @@ static void iucv_setmask_mp(void)
cpus_read_unlock();
}
-/**
+/*
* iucv_setmask_up
*
* Allow iucv interrupts on a single cpu.
@@ -529,7 +529,7 @@ static void iucv_setmask_up(void)
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
}
-/**
+/*
* iucv_enable
*
* This function makes iucv ready for use. It allocates the pathid
@@ -564,7 +564,7 @@ out:
return rc;
}
-/**
+/*
* iucv_disable
*
* This function shuts down iucv. It disables iucv interrupts, retrieves
@@ -1347,8 +1347,9 @@ EXPORT_SYMBOL(iucv_message_send);
* @srccls: source class of message
* @buffer: address of send buffer or address of struct iucv_array
* @size: length of send buffer
- * @ansbuf: address of answer buffer or address of struct iucv_array
+ * @answer: address of answer buffer or address of struct iucv_array
* @asize: size of reply buffer
+ * @residual: ignored
*
* This function transmits data to another application. Data to be
* transmitted is in a buffer. The receiver of the send is expected to
@@ -1400,13 +1401,6 @@ out:
}
EXPORT_SYMBOL(iucv_message_send2way);
-/**
- * iucv_path_pending
- * @data: Pointer to external interrupt buffer
- *
- * Process connection pending work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_pending {
u16 ippathid;
u8 ipflags1;
@@ -1420,6 +1414,13 @@ struct iucv_path_pending {
u8 res4[3];
} __packed;
+/**
+ * iucv_path_pending
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection pending work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_pending(struct iucv_irq_data *data)
{
struct iucv_path_pending *ipp = (void *) data;
@@ -1461,13 +1462,6 @@ out_sever:
iucv_sever_pathid(ipp->ippathid, error);
}
-/**
- * iucv_path_complete
- * @data: Pointer to external interrupt buffer
- *
- * Process connection complete work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_complete {
u16 ippathid;
u8 ipflags1;
@@ -1481,6 +1475,13 @@ struct iucv_path_complete {
u8 res4[3];
} __packed;
+/**
+ * iucv_path_complete
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection complete work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_complete(struct iucv_irq_data *data)
{
struct iucv_path_complete *ipc = (void *) data;
@@ -1492,13 +1493,6 @@ static void iucv_path_complete(struct iucv_irq_data *data)
path->handler->path_complete(path, ipc->ipuser);
}
-/**
- * iucv_path_severed
- * @data: Pointer to external interrupt buffer
- *
- * Process connection severed work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_severed {
u16 ippathid;
u8 res1;
@@ -1511,6 +1505,13 @@ struct iucv_path_severed {
u8 res5[3];
} __packed;
+/**
+ * iucv_path_severed
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection severed work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_severed(struct iucv_irq_data *data)
{
struct iucv_path_severed *ips = (void *) data;
@@ -1528,13 +1529,6 @@ static void iucv_path_severed(struct iucv_irq_data *data)
}
}
-/**
- * iucv_path_quiesced
- * @data: Pointer to external interrupt buffer
- *
- * Process connection quiesced work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_quiesced {
u16 ippathid;
u8 res1;
@@ -1547,6 +1541,13 @@ struct iucv_path_quiesced {
u8 res5[3];
} __packed;
+/**
+ * iucv_path_quiesced
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection quiesced work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_quiesced(struct iucv_irq_data *data)
{
struct iucv_path_quiesced *ipq = (void *) data;
@@ -1556,13 +1557,6 @@ static void iucv_path_quiesced(struct iucv_irq_data *data)
path->handler->path_quiesced(path, ipq->ipuser);
}
-/**
- * iucv_path_resumed
- * @data: Pointer to external interrupt buffer
- *
- * Process connection resumed work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_resumed {
u16 ippathid;
u8 res1;
@@ -1575,6 +1569,13 @@ struct iucv_path_resumed {
u8 res5[3];
} __packed;
+/**
+ * iucv_path_resumed
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection resumed work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_resumed(struct iucv_irq_data *data)
{
struct iucv_path_resumed *ipr = (void *) data;
@@ -1584,13 +1585,6 @@ static void iucv_path_resumed(struct iucv_irq_data *data)
path->handler->path_resumed(path, ipr->ipuser);
}
-/**
- * iucv_message_complete
- * @data: Pointer to external interrupt buffer
- *
- * Process message complete work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_message_complete {
u16 ippathid;
u8 ipflags1;
@@ -1606,6 +1600,13 @@ struct iucv_message_complete {
u8 res2[3];
} __packed;
+/**
+ * iucv_message_complete
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process message complete work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_message_complete(struct iucv_irq_data *data)
{
struct iucv_message_complete *imc = (void *) data;
@@ -1624,13 +1625,6 @@ static void iucv_message_complete(struct iucv_irq_data *data)
}
}
-/**
- * iucv_message_pending
- * @data: Pointer to external interrupt buffer
- *
- * Process message pending work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_message_pending {
u16 ippathid;
u8 ipflags1;
@@ -1653,6 +1647,13 @@ struct iucv_message_pending {
u8 res2[3];
} __packed;
+/**
+ * iucv_message_pending
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process message pending work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_message_pending(struct iucv_irq_data *data)
{
struct iucv_message_pending *imp = (void *) data;
@@ -1673,7 +1674,7 @@ static void iucv_message_pending(struct iucv_irq_data *data)
}
}
-/**
+/*
* iucv_tasklet_fn:
*
* This tasklet loops over the queue of irq buffers created by
@@ -1717,7 +1718,7 @@ static void iucv_tasklet_fn(unsigned long ignored)
spin_unlock(&iucv_table_lock);
}
-/**
+/*
* iucv_work_fn:
*
* This work function loops over the queue of path pending irq blocks
@@ -1748,9 +1749,8 @@ static void iucv_work_fn(struct work_struct *work)
spin_unlock_bh(&iucv_table_lock);
}
-/**
+/*
* iucv_external_interrupt
- * @code: irq code
*
* Handles external interrupts coming in from CP.
* Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 99a2e30b3833..b2253df54413 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -14,7 +14,9 @@
#include "driver-ops.h"
static int ieee80211_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
@@ -25,7 +27,9 @@ static int ieee80211_set_ringparam(struct net_device *dev,
}
static void ieee80211_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 36fac3daf86a..86ad15abf897 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -150,11 +150,6 @@ static void mctp_test_fragment(struct kunit *test)
rt = mctp_test_create_route(&init_net, NULL, 10, mtu);
KUNIT_ASSERT_TRUE(test, rt);
- /* The refcount would usually be incremented as part of a route lookup,
- * but we're setting the route directly here.
- */
- refcount_inc(&rt->rt.refs);
-
rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER);
KUNIT_EXPECT_FALSE(test, rc);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index c82a76d2d0bf..b100048e43fe 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -48,7 +48,7 @@ enum {
MPTCP_CMSG_TS = BIT(0),
};
-static struct percpu_counter mptcp_sockets_allocated;
+static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;
static void __mptcp_destroy_sock(struct sock *sk);
static void __mptcp_check_send_data_fin(struct sock *sk);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 0f1e661c2032..fb43e145cb57 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -390,6 +390,8 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
switch (optname) {
case IPV6_V6ONLY:
+ case IPV6_TRANSPARENT:
+ case IPV6_FREEBIND:
lock_sock(sk);
ssock = __mptcp_nmpc_socket(msk);
if (!ssock) {
@@ -398,8 +400,24 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
}
ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
- if (ret == 0)
+ if (ret != 0) {
+ release_sock(sk);
+ return ret;
+ }
+
+ sockopt_seq_inc(msk);
+
+ switch (optname) {
+ case IPV6_V6ONLY:
sk->sk_ipv6only = ssock->sk->sk_ipv6only;
+ break;
+ case IPV6_TRANSPARENT:
+ inet_sk(sk)->transparent = inet_sk(ssock->sk)->transparent;
+ break;
+ case IPV6_FREEBIND:
+ inet_sk(sk)->freebind = inet_sk(ssock->sk)->freebind;
+ break;
+ }
release_sock(sk);
break;
@@ -598,6 +616,85 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
return ret;
}
+static int mptcp_setsockopt_sol_ip_set_transparent(struct mptcp_sock *msk, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct sock *sk = (struct sock *)msk;
+ struct inet_sock *issk;
+ struct socket *ssock;
+ int err;
+
+ err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen);
+ if (err != 0)
+ return err;
+
+ lock_sock(sk);
+
+ ssock = __mptcp_nmpc_socket(msk);
+ if (!ssock) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+
+ issk = inet_sk(ssock->sk);
+
+ switch (optname) {
+ case IP_FREEBIND:
+ issk->freebind = inet_sk(sk)->freebind;
+ break;
+ case IP_TRANSPARENT:
+ issk->transparent = inet_sk(sk)->transparent;
+ break;
+ default:
+ release_sock(sk);
+ WARN_ON_ONCE(1);
+ return -EOPNOTSUPP;
+ }
+
+ sockopt_seq_inc(msk);
+ release_sock(sk);
+ return 0;
+}
+
+static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ int err, val;
+
+ err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen);
+
+ if (err != 0)
+ return err;
+
+ lock_sock(sk);
+ sockopt_seq_inc(msk);
+ val = inet_sk(sk)->tos;
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ __ip_sock_set_tos(ssk, val);
+ }
+ release_sock(sk);
+
+ return err;
+}
+
+static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ switch (optname) {
+ case IP_FREEBIND:
+ case IP_TRANSPARENT:
+ return mptcp_setsockopt_sol_ip_set_transparent(msk, optname, optval, optlen);
+ case IP_TOS:
+ return mptcp_setsockopt_v4_set_tos(msk, optname, optval, optlen);
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
sockptr_t optval, unsigned int optlen)
{
@@ -637,6 +734,9 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
if (ssk)
return tcp_setsockopt(ssk, level, optname, optval, optlen);
+ if (level == SOL_IP)
+ return mptcp_setsockopt_v4(msk, optname, optval, optlen);
+
if (level == SOL_IPV6)
return mptcp_setsockopt_v6(msk, optname, optval, optlen);
@@ -1003,6 +1103,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
ssk->sk_priority = sk->sk_priority;
ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
+ __ip_sock_set_tos(ssk, inet_sk(sk)->tos);
if (sk->sk_userlocks & tx_rx_locks) {
ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks;
@@ -1028,6 +1129,9 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops)
tcp_set_congestion_control(ssk, msk->ca_name, false, true);
+
+ inet_sk(ssk)->transparent = inet_sk(sk)->transparent;
+ inet_sk(ssk)->freebind = inet_sk(sk)->freebind;
}
static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 6172f380dfb7..b8dd3441f7d0 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1425,6 +1425,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
if (addr.ss_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
#endif
+ mptcp_sockopt_sync(msk, ssk);
+
ssk->sk_bound_dev_if = ifindex;
err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
if (err)
@@ -1441,7 +1443,6 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
mptcp_add_pending_subflow(msk, subflow);
- mptcp_sockopt_sync(msk, ssk);
err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
if (err && err != -EINPROGRESS)
goto failed_unlink;
@@ -1534,9 +1535,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
*/
sf->sk->sk_net_refcnt = 1;
get_net(net);
-#ifdef CONFIG_PROC_FS
- this_cpu_add(*net->core.sock_inuse, 1);
-#endif
+ sock_inuse_add(net, 1);
err = tcp_set_ulp(sf->sk, "mptcp");
release_sock(sf->sk);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 770a63103c7a..054ee9d25efe 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_max);
seqcount_spinlock_t nf_conntrack_generation __read_mostly;
-static siphash_key_t nf_conntrack_hash_rnd __read_mostly;
+static siphash_aligned_key_t nf_conntrack_hash_rnd;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
unsigned int zoneid,
@@ -482,7 +482,7 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
*/
u32 nf_ct_get_id(const struct nf_conn *ct)
{
- static __read_mostly siphash_key_t ct_id_seed;
+ static siphash_aligned_key_t ct_id_seed;
unsigned long a, b, c, d;
net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index f562eeef4234..1e89b595ecd0 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
unsigned int nf_ct_expect_max __read_mostly;
static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
-static siphash_key_t nf_ct_expect_hashrnd __read_mostly;
+static siphash_aligned_key_t nf_ct_expect_hashrnd;
/* nf_conntrack_expect helper functions */
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c7708bde057c..849fa7f4353c 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2995,7 +2995,7 @@ static const union nf_inet_addr any_addr;
static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
{
- static __read_mostly siphash_key_t exp_id_seed;
+ static siphash_aligned_key_t exp_id_seed;
unsigned long a, b, c, d;
net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 4d50d51db796..ab9f6c75524d 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -34,7 +34,7 @@ static unsigned int nat_net_id __read_mostly;
static struct hlist_head *nf_nat_bysource __read_mostly;
static unsigned int nf_nat_htable_size __read_mostly;
-static siphash_key_t nf_nat_hash_rnd __read_mostly;
+static siphash_aligned_key_t nf_nat_hash_rnd;
struct nf_nat_lookup_hook_priv {
struct nf_hook_entries __rcu *entries;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4c575324a985..1a19d179e913 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -707,9 +707,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
if (err < 0)
goto out_module;
- local_bh_disable();
sock_prot_inuse_add(net, &netlink_proto, 1);
- local_bh_enable();
nlk = nlk_sk(sock->sk);
nlk->module = module;
@@ -809,9 +807,7 @@ static int netlink_release(struct socket *sock)
netlink_table_ungrab();
}
- local_bh_disable();
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
- local_bh_enable();
call_rcu(&nlk->rcu, deferred_put_nlk_sk);
return 0;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 46943a18a10d..a1ffdb48cc47 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3102,9 +3102,7 @@ static int packet_release(struct socket *sock)
sk_del_node_init_rcu(sk);
mutex_unlock(&net->packet.sklist_lock);
- preempt_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
- preempt_enable();
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, false);
@@ -3368,9 +3366,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
sk_add_node_tail_rcu(sk, &net->packet.sklist);
mutex_unlock(&net->packet.sklist_lock);
- preempt_disable();
sock_prot_inuse_add(net, &packet_proto, 1);
- preempt_enable();
return 0;
out2:
diff --git a/net/rds/send.c b/net/rds/send.c
index 53444397de66..0c5504068e3c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -272,7 +272,7 @@ restart:
/* Unfortunately, the way Infiniband deals with
* RDMA to a bad MR key is by moving the entire
- * queue pair to error state. We cold possibly
+ * queue pair to error state. We could possibly
* recover from that, but right now we drop the
* connection.
* Therefore, we never retransmit messages with RDMA ops.
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3b0f62095803..d33804d41c5c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -434,9 +434,9 @@ unsigned long dev_trans_start(struct net_device *dev)
dev = vlan_dev_real_dev(dev);
else if (netif_is_macvlan(dev))
dev = macvlan_dev_real_dev(dev);
- res = netdev_get_tx_queue(dev, 0)->trans_start;
+ res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
for (i = 1; i < dev->num_tx_queues; i++) {
- val = netdev_get_tx_queue(dev, i)->trans_start;
+ val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
if (val && time_after(val, res))
res = val;
}
@@ -445,11 +445,62 @@ unsigned long dev_trans_start(struct net_device *dev)
}
EXPORT_SYMBOL(dev_trans_start);
+static void netif_freeze_queues(struct net_device *dev)
+{
+ unsigned int i;
+ int cpu;
+
+ cpu = smp_processor_id();
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* We are the only thread of execution doing a
+ * freeze, but we have to grab the _xmit_lock in
+ * order to synchronize with threads which are in
+ * the ->hard_start_xmit() handler and already
+ * checked the frozen bit.
+ */
+ __netif_tx_lock(txq, cpu);
+ set_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ __netif_tx_unlock(txq);
+ }
+}
+
+void netif_tx_lock(struct net_device *dev)
+{
+ spin_lock(&dev->tx_global_lock);
+ netif_freeze_queues(dev);
+}
+EXPORT_SYMBOL(netif_tx_lock);
+
+static void netif_unfreeze_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* No need to grab the _xmit_lock here. If the
+ * queue is not stopped for another reason, we
+ * force a schedule.
+ */
+ clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ netif_schedule_queue(txq);
+ }
+}
+
+void netif_tx_unlock(struct net_device *dev)
+{
+ netif_unfreeze_queues(dev);
+ spin_unlock(&dev->tx_global_lock);
+}
+EXPORT_SYMBOL(netif_tx_unlock);
+
static void dev_watchdog(struct timer_list *t)
{
struct net_device *dev = from_timer(dev, t, watchdog_timer);
- netif_tx_lock(dev);
+ spin_lock(&dev->tx_global_lock);
if (!qdisc_tx_is_noop(dev)) {
if (netif_device_present(dev) &&
netif_running(dev) &&
@@ -462,21 +513,23 @@ static void dev_watchdog(struct timer_list *t)
struct netdev_queue *txq;
txq = netdev_get_tx_queue(dev, i);
- trans_start = txq->trans_start;
+ trans_start = READ_ONCE(txq->trans_start);
if (netif_xmit_stopped(txq) &&
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
some_queue_timedout = 1;
- txq->trans_timeout++;
+ atomic_long_inc(&txq->trans_timeout);
break;
}
}
- if (some_queue_timedout) {
+ if (unlikely(some_queue_timedout)) {
trace_net_dev_xmit_timeout(dev, i);
WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
dev->name, netdev_drivername(dev), i);
+ netif_freeze_queues(dev);
dev->netdev_ops->ndo_tx_timeout(dev, i);
+ netif_unfreeze_queues(dev);
}
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies +
@@ -484,7 +537,7 @@ static void dev_watchdog(struct timer_list *t)
dev_hold(dev);
}
}
- netif_tx_unlock(dev);
+ spin_unlock(&dev->tx_global_lock);
dev_put(dev);
}
@@ -1148,7 +1201,7 @@ static void transition_one_qdisc(struct net_device *dev,
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
if (need_watchdog_p) {
- dev_queue->trans_start = 0;
+ WRITE_ONCE(dev_queue->trans_start, 0);
*need_watchdog_p = 1;
}
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index ecbb10db1111..ed4ccef5d6a8 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -208,17 +208,17 @@ static bool loss_4state(struct netem_sched_data *q)
* next state and if the next packet has to be transmitted or lost.
* The four states correspond to:
* TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
- * LOST_IN_BURST_PERIOD => isolated losses within a gap period
- * LOST_IN_GAP_PERIOD => lost packets within a burst period
- * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
+ * LOST_IN_GAP_PERIOD => isolated losses within a gap period
+ * LOST_IN_BURST_PERIOD => lost packets within a burst period
+ * TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
*/
switch (clg->state) {
case TX_IN_GAP_PERIOD:
if (rnd < clg->a4) {
- clg->state = LOST_IN_BURST_PERIOD;
+ clg->state = LOST_IN_GAP_PERIOD;
return true;
} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
- clg->state = LOST_IN_GAP_PERIOD;
+ clg->state = LOST_IN_BURST_PERIOD;
return true;
} else if (clg->a1 + clg->a4 < rnd) {
clg->state = TX_IN_GAP_PERIOD;
@@ -227,24 +227,24 @@ static bool loss_4state(struct netem_sched_data *q)
break;
case TX_IN_BURST_PERIOD:
if (rnd < clg->a5) {
- clg->state = LOST_IN_GAP_PERIOD;
+ clg->state = LOST_IN_BURST_PERIOD;
return true;
} else {
clg->state = TX_IN_BURST_PERIOD;
}
break;
- case LOST_IN_GAP_PERIOD:
+ case LOST_IN_BURST_PERIOD:
if (rnd < clg->a3)
clg->state = TX_IN_BURST_PERIOD;
else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
clg->state = TX_IN_GAP_PERIOD;
} else if (clg->a2 + clg->a3 < rnd) {
- clg->state = LOST_IN_GAP_PERIOD;
+ clg->state = LOST_IN_BURST_PERIOD;
return true;
}
break;
- case LOST_IN_BURST_PERIOD:
+ case LOST_IN_GAP_PERIOD:
clg->state = TX_IN_GAP_PERIOD;
break;
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index cdfdbd353c67..72fe6669c50d 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -134,7 +134,7 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
dst_hold(tp->dst);
sk_setup_caps(sk, tp->dst);
}
- packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
+ packet->max_size = sk_can_gso(sk) ? READ_ONCE(tp->dst->dev->gso_max_size)
: asoc->pathmtu;
rcu_read_unlock();
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index ff47091c385e..a18609f608fb 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -547,6 +547,9 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_assoc_update_retran_path(transport->asoc);
transport->asoc->rtx_data_chunks +=
transport->asoc->unack_data;
+ if (transport->pl.state == SCTP_PL_COMPLETE &&
+ transport->asoc->unack_data)
+ sctp_transport_reset_probe_timer(transport);
break;
case SCTP_RTXR_FAST_RTX:
SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 354c1c4de19b..cc544a97c4af 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1124,12 +1124,11 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net,
if (!sctp_transport_pl_enabled(transport))
return SCTP_DISPOSITION_CONSUME;
- if (sctp_transport_pl_send(transport)) {
- reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
- if (!reply)
- return SCTP_DISPOSITION_NOMEM;
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
- }
+ sctp_transport_pl_send(transport);
+ reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
+ if (!reply)
+ return SCTP_DISPOSITION_NOMEM;
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 33391254fa82..055a6d3ec6e2 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5068,12 +5068,9 @@ static int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock);
- local_bh_disable();
sk_sockets_allocated_inc(sk);
sock_prot_inuse_add(net, sk->sk_prot, 1);
- local_bh_enable();
-
return 0;
}
@@ -5099,10 +5096,8 @@ static void sctp_destroy_sock(struct sock *sk)
list_del(&sp->auto_asconf_list);
}
sctp_endpoint_free(sp->ep);
- local_bh_disable();
sk_sockets_allocated_dec(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- local_bh_enable();
}
/* Triggered when there are no references on the socket anymore */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 133f1719bf1b..f8fd98784977 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -213,13 +213,18 @@ void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
void sctp_transport_reset_probe_timer(struct sctp_transport *transport)
{
- if (timer_pending(&transport->probe_timer))
- return;
if (!mod_timer(&transport->probe_timer,
jiffies + transport->probe_interval))
sctp_transport_hold(transport);
}
+void sctp_transport_reset_raise_timer(struct sctp_transport *transport)
+{
+ if (!mod_timer(&transport->probe_timer,
+ jiffies + transport->probe_interval * 30))
+ sctp_transport_hold(transport);
+}
+
/* This transport has been assigned to an association.
* Initialize fields from the association or from the sock itself.
* Register the reference count in the association.
@@ -258,12 +263,11 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
sctp_transport_pl_update(transport);
}
-bool sctp_transport_pl_send(struct sctp_transport *t)
+void sctp_transport_pl_send(struct sctp_transport *t)
{
if (t->pl.probe_count < SCTP_MAX_PROBES)
goto out;
- t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
t->pl.probe_count = 0;
if (t->pl.state == SCTP_PL_BASE) {
if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
@@ -298,17 +302,9 @@ bool sctp_transport_pl_send(struct sctp_transport *t)
}
out:
- if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 &&
- !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) {
- t->pl.raise_count++;
- return false;
- }
-
pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
__func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
-
t->pl.probe_count++;
- return true;
}
bool sctp_transport_pl_recv(struct sctp_transport *t)
@@ -316,7 +312,6 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
__func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
- t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
t->pl.pmtu = t->pl.probe_size;
t->pl.probe_count = 0;
if (t->pl.state == SCTP_PL_BASE) {
@@ -338,14 +333,14 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
t->pl.probe_size += SCTP_PL_MIN_STEP;
if (t->pl.probe_size >= t->pl.probe_high) {
t->pl.probe_high = 0;
- t->pl.raise_count = 0;
t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */
t->pl.probe_size = t->pl.pmtu;
t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
sctp_assoc_sync_pmtu(t->asoc);
+ sctp_transport_reset_raise_timer(t);
}
- } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) {
+ } else if (t->pl.state == SCTP_PL_COMPLETE) {
/* Raise probe_size again after 30 * interval in Search Complete */
t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
t->pl.probe_size += SCTP_PL_MIN_STEP;
@@ -393,6 +388,7 @@ static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
t->pl.probe_high = 0;
t->pl.pmtu = SCTP_BASE_PLPMTU;
t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_transport_reset_probe_timer(t);
return true;
}
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 230072f9ec48..67a78bbf305f 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -89,8 +89,8 @@ int smc_hash_sk(struct sock *sk)
write_lock_bh(&h->lock);
sk_add_node(sk, head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&h->lock);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index b4d9419a015b..81116312b753 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -761,21 +761,10 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
skb_tailroom(skb), tailen);
}
- if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) {
- nsg = 1;
- trailer = skb;
- } else {
- /* TODO: We could avoid skb_cow_data() if skb has no frag_list
- * e.g. by skb_fill_page_desc() to add another page to the skb
- * with the wanted tailen... However, page skbs look not often,
- * so take it easy now!
- * Cloned skbs e.g. from link_xmit() seems no choice though :(
- */
- nsg = skb_cow_data(skb, tailen, &trailer);
- if (unlikely(nsg < 0)) {
- pr_err("TX: skb_cow_data() returned %d\n", nsg);
- return nsg;
- }
+ nsg = skb_cow_data(skb, tailen, &trailer);
+ if (unlikely(nsg < 0)) {
+ pr_err("TX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
}
pskb_put(skb, trailer, tailen);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b0bfc78e421c..b9ff1f31f3af 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -522,9 +522,7 @@ static void unix_sock_destructor(struct sock *sk)
unix_release_addr(u->addr);
atomic_long_dec(&unix_nr_socks);
- local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- local_bh_enable();
#ifdef UNIX_REFCNT_DEBUG
pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
@@ -889,9 +887,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
memset(&u->scm_stat, 0, sizeof(struct scm_stat));
unix_insert_socket(unix_sockets_unbound(sk), sk);
- local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- local_bh_enable();
return sk;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index f16074eb53c7..28ef3f4465ae 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -794,9 +794,7 @@ static int xsk_release(struct socket *sock)
sk_del_node_init_rcu(sk);
mutex_unlock(&net->xdp.lock);
- local_bh_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
- local_bh_enable();
xsk_delete_from_maps(xs);
mutex_lock(&xs->mutex);
@@ -1396,9 +1394,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk_add_node_rcu(sk, &net->xdp.list);
mutex_unlock(&net->xdp.lock);
- local_bh_disable();
sock_prot_inuse_add(net, &xsk_proto, 1);
- local_bh_enable();
return 0;
}
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index 05ce4446b780..a736f64dc5dc 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
*.d
/bootstrap/
/bpftool
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index c49487905ceb..692e1b947490 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -1,6 +1,5 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
include ../../../scripts/Makefile.include
-include ../../../scripts/utilities.mak
INSTALL ?= install
RM ?= rm -f
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
index 88b28aa7431f..4425d942dd39 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -13,7 +13,7 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **btf** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | {**-d** | **--debug** } |
- { **-B** | **--base-btf** } }
+ { **-B** | **--base-btf** } }
*COMMANDS* := { **dump** | **help** }
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index 3e4395eede4f..8069d37dd991 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -13,7 +13,7 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **cgroup** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
- { **-f** | **--bpffs** } }
+ { **-f** | **--bpffs** } }
*COMMANDS* :=
{ **show** | **list** | **tree** | **attach** | **detach** | **help** }
@@ -30,9 +30,9 @@ CGROUP COMMANDS
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
-| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
-| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
-| **sock_release** }
+| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
+| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
+| **sock_release** }
| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
@@ -98,9 +98,9 @@ DESCRIPTION
**sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an
unconnected udp6 socket (since 4.18);
**recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
- an unconnected udp4 socket (since 5.2);
+ an unconnected udp4 socket (since 5.2);
**recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
- an unconnected udp6 socket (since 5.2);
+ an unconnected udp6 socket (since 5.2);
**sysctl** sysctl access (since 5.2);
**getsockopt** call to getsockopt (since 5.3);
**setsockopt** call to setsockopt (since 5.3);
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index 2ef2f2df0279..2a137f8a4cea 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -13,7 +13,7 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **gen** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
- { **-L** | **--use-loader** } }
+ { **-L** | **--use-loader** } }
*COMMAND* := { **object** | **skeleton** | **help** }
diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst
index 0de90f086238..9434349636a5 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-link.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst
@@ -13,7 +13,7 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **link** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
- { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
+ { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
*COMMANDS* := { **show** | **list** | **pin** | **help** }
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index d0c4abe08aba..991d18fd84f2 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -13,11 +13,11 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **map** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
- { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
+ { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
*COMMANDS* :=
- { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext**
- | **delete** | **pin** | **help** }
+ { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
+ **delete** | **pin** | **help** }
MAP COMMANDS
=============
@@ -52,7 +52,7 @@ MAP COMMANDS
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage**
- | **task_storage** }
+| | **task_storage** | **bloom_filter** }
DESCRIPTION
===========
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index 1ae0375e8fea..7ec57535a7c1 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -31,44 +31,44 @@ NET COMMANDS
DESCRIPTION
===========
**bpftool net { show | list }** [ **dev** *NAME* ]
- List bpf program attachments in the kernel networking subsystem.
-
- Currently, only device driver xdp attachments and tc filter
- classification/action attachments are implemented, i.e., for
- program types **BPF_PROG_TYPE_SCHED_CLS**,
- **BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
- For programs attached to a particular cgroup, e.g.,
- **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
- **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
- users can use **bpftool cgroup** to dump cgroup attachments.
- For sk_{filter, skb, msg, reuseport} and lwt/seg6
- bpf programs, users should consult other tools, e.g., iproute2.
-
- The current output will start with all xdp program attachments, followed by
- all tc class/qdisc bpf program attachments. Both xdp programs and
- tc programs are ordered based on ifindex number. If multiple bpf
- programs attached to the same networking device through **tc filter**,
- the order will be first all bpf programs attached to tc classes, then
- all bpf programs attached to non clsact qdiscs, and finally all
- bpf programs attached to root and clsact qdisc.
+ List bpf program attachments in the kernel networking subsystem.
+
+ Currently, only device driver xdp attachments and tc filter
+ classification/action attachments are implemented, i.e., for
+ program types **BPF_PROG_TYPE_SCHED_CLS**,
+ **BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
+ For programs attached to a particular cgroup, e.g.,
+ **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
+ **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ users can use **bpftool cgroup** to dump cgroup attachments.
+ For sk_{filter, skb, msg, reuseport} and lwt/seg6
+ bpf programs, users should consult other tools, e.g., iproute2.
+
+ The current output will start with all xdp program attachments, followed by
+ all tc class/qdisc bpf program attachments. Both xdp programs and
+ tc programs are ordered based on ifindex number. If multiple bpf
+ programs attached to the same networking device through **tc filter**,
+ the order will be first all bpf programs attached to tc classes, then
+ all bpf programs attached to non clsact qdiscs, and finally all
+ bpf programs attached to root and clsact qdisc.
**bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ]
- Attach bpf program *PROG* to network interface *NAME* with
- type specified by *ATTACH_TYPE*. Previously attached bpf program
- can be replaced by the command used with **overwrite** option.
- Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
+ Attach bpf program *PROG* to network interface *NAME* with
+ type specified by *ATTACH_TYPE*. Previously attached bpf program
+ can be replaced by the command used with **overwrite** option.
+ Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
- *ATTACH_TYPE* can be of:
- **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it;
- **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
- **xdpdrv** - Native XDP. runs earliest point in driver's receive path;
- **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
+ *ATTACH_TYPE* can be of:
+ **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it;
+ **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
+ **xdpdrv** - Native XDP. runs earliest point in driver's receive path;
+ **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
**bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME*
- Detach bpf program attached to network interface *NAME* with
- type specified by *ATTACH_TYPE*. To detach bpf program, same
- *ATTACH_TYPE* previously used for attach must be specified.
- Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
+ Detach bpf program attached to network interface *NAME* with
+ type specified by *ATTACH_TYPE*. To detach bpf program, same
+ *ATTACH_TYPE* previously used for attach must be specified.
+ Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
**bpftool net help**
Print short help message.
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 91608cb7e44a..f27265bd589b 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -13,12 +13,12 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **prog** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
- { **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } |
- { **-L** | **--use-loader** } }
+ { **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } |
+ { **-L** | **--use-loader** } }
*COMMANDS* :=
- { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load**
- | **loadall** | **help** }
+ { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** |
+ **loadall** | **help** }
PROG COMMANDS
=============
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index bb23f55bb05a..8ac86565c501 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -19,14 +19,14 @@ SYNOPSIS
*OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** }
*OPTIONS* := { { **-V** | **--version** } |
- { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
+ { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
*MAP-COMMANDS* :=
{ **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
- **delete** | **pin** | **event_pipe** | **help** }
+ **delete** | **pin** | **event_pipe** | **help** }
*PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** |
- **load** | **attach** | **detach** | **help** }
+ **load** | **attach** | **detach** | **help** }
*CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst
index 05d06c74dcaa..75adf23202d8 100644
--- a/tools/bpf/bpftool/Documentation/common_options.rst
+++ b/tools/bpf/bpftool/Documentation/common_options.rst
@@ -20,3 +20,12 @@
Print all logs available, even debug-level information. This includes
logs from libbpf as well as from the verifier, when attempting to
load programs.
+
+-l, --legacy
+ Use legacy libbpf mode which has more relaxed BPF program
+ requirements. By default, bpftool has more strict requirements
+ about section names, changes pinning logic and doesn't support
+ some of the older non-BTF map declarations.
+
+ See https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0
+ for details.
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 7cfba11c3014..42eb8eee3d89 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -1,6 +1,5 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
include ../../scripts/Makefile.include
-include ../../scripts/utilities.mak
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -187,7 +186,8 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
-I$(if $(OUTPUT),$(OUTPUT),.) \
-I$(srctree)/tools/include/uapi/ \
-I$(LIBBPF_BOOTSTRAP_INCLUDE) \
- -g -O2 -Wall -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@
+ -g -O2 -Wall -target bpf -c $< -o $@
+ $(Q)$(LLVM_STRIP) -g $@
$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
$(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@
@@ -202,10 +202,10 @@ endif
CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS)
$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
- $(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $<
+ $(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD $< -o $@
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
- $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
$(OUTPUT)feature.o:
ifneq ($(feature-zlib), 1)
@@ -213,19 +213,18 @@ ifneq ($(feature-zlib), 1)
endif
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
- $(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) \
- $(LIBS_BOOTSTRAP)
+ $(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@
$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_BOOTSTRAP_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT)
$(QUIET_CC)$(HOSTCC) \
$(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),$(CFLAGS)) \
- -c -MMD -o $@ $<
+ -c -MMD $< -o $@
$(OUTPUT)%.o: %.c
- $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
feature-detect-clean:
$(call QUIET_CLEAN, feature-detect)
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 88e2bcf16cca..493753a4962e 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -261,7 +261,7 @@ _bpftool()
# Deal with options
if [[ ${words[cword]} == -* ]]; then
local c='--version --json --pretty --bpffs --mapcompat --debug \
- --use-loader --base-btf'
+ --use-loader --base-btf --legacy'
COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
return 0
fi
@@ -710,7 +710,8 @@ _bpftool()
hash_of_maps devmap devmap_hash sockmap cpumap \
xskmap sockhash cgroup_storage reuseport_sockarray \
percpu_cgroup_storage queue stack sk_storage \
- struct_ops inode_storage task_storage ringbuf'
+ struct_ops ringbuf inode_storage task_storage \
+ bloom_filter'
COMPREPLY=( $( compgen -W "$BPFTOOL_MAP_CREATE_TYPES" -- "$cur" ) )
return 0
;;
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index 015d2758f826..59833125ac0a 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -39,6 +39,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_DATASEC] = "DATASEC",
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
+ [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
};
struct btf_attach_point {
@@ -142,6 +143,7 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPEDEF:
+ case BTF_KIND_TYPE_TAG:
if (json_output)
jsonw_uint_field(w, "type_id", t->type);
else
@@ -418,9 +420,10 @@ static int dump_btf_c(const struct btf *btf,
struct btf_dump *d;
int err = 0, i;
- d = btf_dump__new(btf, NULL, NULL, btf_dump_printf);
- if (IS_ERR(d))
- return PTR_ERR(d);
+ d = btf_dump__new(btf, btf_dump_printf, NULL, NULL);
+ err = libbpf_get_error(d);
+ if (err)
+ return err;
printf("#ifndef __VMLINUX_H__\n");
printf("#define __VMLINUX_H__\n");
@@ -547,8 +550,8 @@ static int do_dump(int argc, char **argv)
}
btf = btf__parse_split(*argv, base ?: base_btf);
- if (IS_ERR(btf)) {
- err = -PTR_ERR(btf);
+ err = libbpf_get_error(btf);
+ if (err) {
btf = NULL;
p_err("failed to load BTF from %s: %s",
*argv, strerror(err));
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 9c25286a5c73..f5dddf8ef404 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -32,14 +32,16 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
const struct btf_type *func_proto,
__u32 prog_id)
{
- struct bpf_prog_info_linear *prog_info = NULL;
const struct btf_type *func_type;
+ int prog_fd = -1, func_sig_len;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
const char *prog_name = NULL;
- struct bpf_func_info *finfo;
struct btf *prog_btf = NULL;
- struct bpf_prog_info *info;
- int prog_fd, func_sig_len;
+ struct bpf_func_info finfo;
+ __u32 finfo_rec_size;
char prog_str[1024];
+ int err;
/* Get the ptr's func_proto */
func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0,
@@ -52,25 +54,30 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
/* Get the bpf_prog's name. Obtain from func_info. */
prog_fd = bpf_prog_get_fd_by_id(prog_id);
- if (prog_fd == -1)
+ if (prog_fd < 0)
goto print;
- prog_info = bpf_program__get_prog_info_linear(prog_fd,
- 1UL << BPF_PROG_INFO_FUNC_INFO);
- close(prog_fd);
- if (IS_ERR(prog_info)) {
- prog_info = NULL;
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err)
goto print;
- }
- info = &prog_info->info;
- if (!info->btf_id || !info->nr_func_info)
+ if (!info.btf_id || !info.nr_func_info)
+ goto print;
+
+ finfo_rec_size = info.func_info_rec_size;
+ memset(&info, 0, sizeof(info));
+ info.nr_func_info = 1;
+ info.func_info_rec_size = finfo_rec_size;
+ info.func_info = ptr_to_u64(&finfo);
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err)
goto print;
- prog_btf = btf__load_from_kernel_by_id(info->btf_id);
+
+ prog_btf = btf__load_from_kernel_by_id(info.btf_id);
if (libbpf_get_error(prog_btf))
goto print;
- finfo = u64_to_ptr(info->func_info);
- func_type = btf__type_by_id(prog_btf, finfo->type_id);
+ func_type = btf__type_by_id(prog_btf, finfo.type_id);
if (!func_type || !btf_is_func(func_type))
goto print;
@@ -92,7 +99,8 @@ print:
prog_str[sizeof(prog_str) - 1] = '\0';
jsonw_string(d->jw, prog_str);
btf__free(prog_btf);
- free(prog_info);
+ if (prog_fd >= 0)
+ close(prog_fd);
return 0;
}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 511eccdbdfe6..fa8eb8134344 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -74,6 +74,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
[BPF_XDP] = "xdp",
[BPF_SK_REUSEPORT_SELECT] = "sk_skb_reuseport_select",
[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_skb_reuseport_select_or_migrate",
+ [BPF_PERF_EVENT] = "perf_event",
};
void p_err(const char *fmt, ...)
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index ade44577688e..5397077d0d9e 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -467,7 +467,7 @@ static bool probe_bpf_syscall(const char *define_prefix)
{
bool res;
- bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0);
+ bpf_prog_load(BPF_PROG_TYPE_UNSPEC, NULL, NULL, NULL, 0, NULL);
res = (errno != ENOSYS);
print_bool_feature("have_bpf_syscall",
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 5c18351290f0..997a2865e04a 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -218,9 +218,10 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
char sec_ident[256], map_ident[256];
int i, err = 0;
- d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf);
- if (IS_ERR(d))
- return PTR_ERR(d);
+ d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
+ err = libbpf_get_error(d);
+ if (err)
+ return err;
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
@@ -719,10 +720,11 @@ static int do_skeleton(int argc, char **argv)
get_obj_name(obj_name, file);
opts.object_name = obj_name;
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
- if (IS_ERR(obj)) {
+ err = libbpf_get_error(obj);
+ if (err) {
char err_buf[256];
- libbpf_strerror(PTR_ERR(obj), err_buf, sizeof(err_buf));
+ libbpf_strerror(err, err_buf, sizeof(err_buf));
p_err("failed to open BPF object file: %s", err_buf);
obj = NULL;
goto out;
diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c
index 6c0de647b8ad..f88fdc820d23 100644
--- a/tools/bpf/bpftool/iter.c
+++ b/tools/bpf/bpftool/iter.c
@@ -46,7 +46,8 @@ static int do_pin(int argc, char **argv)
}
obj = bpf_object__open(objfile);
- if (IS_ERR(obj)) {
+ err = libbpf_get_error(obj);
+ if (err) {
p_err("can't open objfile %s", objfile);
goto close_map_fd;
}
@@ -64,8 +65,8 @@ static int do_pin(int argc, char **argv)
}
link = bpf_program__attach_iter(prog, &iter_opts);
- if (IS_ERR(link)) {
- err = PTR_ERR(link);
+ err = libbpf_get_error(link);
+ if (err) {
p_err("attach_iter failed for program %s",
bpf_program__name(prog));
goto close_obj;
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 28237d7cef67..473791e87f7d 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -31,6 +31,7 @@ bool block_mount;
bool verifier_logs;
bool relaxed_maps;
bool use_loader;
+bool legacy_libbpf;
struct btf *base_btf;
struct hashmap *refs_table;
@@ -396,6 +397,7 @@ int main(int argc, char **argv)
{ "debug", no_argument, NULL, 'd' },
{ "use-loader", no_argument, NULL, 'L' },
{ "base-btf", required_argument, NULL, 'B' },
+ { "legacy", no_argument, NULL, 'l' },
{ 0 }
};
int opt, ret;
@@ -408,7 +410,7 @@ int main(int argc, char **argv)
bin_name = argv[0];
opterr = 0;
- while ((opt = getopt_long(argc, argv, "VhpjfLmndB:",
+ while ((opt = getopt_long(argc, argv, "VhpjfLmndB:l",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
@@ -454,6 +456,9 @@ int main(int argc, char **argv)
case 'L':
use_loader = true;
break;
+ case 'l':
+ legacy_libbpf = true;
+ break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
@@ -463,6 +468,12 @@ int main(int argc, char **argv)
}
}
+ if (!legacy_libbpf) {
+ ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+ if (ret)
+ p_err("failed to enable libbpf strict mode: %d", ret);
+ }
+
argc -= optind;
argv += optind;
if (argc < 0)
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 383835c2604d..8d76d937a62b 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -57,7 +57,7 @@ static inline void *u64_to_ptr(__u64 ptr)
#define HELP_SPEC_PROGRAM \
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG | name PROG_NAME }"
#define HELP_SPEC_OPTIONS \
- "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug}"
+ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug} | {-l|--legacy}"
#define HELP_SPEC_MAP \
"MAP := { id MAP_ID | pinned FILE | name MAP_NAME }"
#define HELP_SPEC_LINK \
@@ -90,6 +90,7 @@ extern bool block_mount;
extern bool verifier_logs;
extern bool relaxed_maps;
extern bool use_loader;
+extern bool legacy_libbpf;
extern struct btf *base_btf;
extern struct hashmap *refs_table;
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index cae1f1119296..25b258804f11 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -53,6 +53,7 @@ const char * const map_type_name[] = {
[BPF_MAP_TYPE_RINGBUF] = "ringbuf",
[BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
[BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
+ [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
};
const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
@@ -811,7 +812,7 @@ static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
if (info->btf_vmlinux_value_type_id) {
if (!btf_vmlinux) {
btf_vmlinux = libbpf_find_kernel_btf();
- if (IS_ERR(btf_vmlinux))
+ if (libbpf_get_error(btf_vmlinux))
p_err("failed to get kernel btf");
}
return btf_vmlinux;
@@ -831,13 +832,13 @@ static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
static void free_map_kv_btf(struct btf *btf)
{
- if (!IS_ERR(btf) && btf != btf_vmlinux)
+ if (!libbpf_get_error(btf) && btf != btf_vmlinux)
btf__free(btf);
}
static void free_btf_vmlinux(void)
{
- if (!IS_ERR(btf_vmlinux))
+ if (!libbpf_get_error(btf_vmlinux))
btf__free(btf_vmlinux);
}
@@ -862,8 +863,8 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
if (wtr) {
btf = get_map_kv_btf(info);
- if (IS_ERR(btf)) {
- err = PTR_ERR(btf);
+ err = libbpf_get_error(btf);
+ if (err) {
goto exit_free;
}
@@ -1477,7 +1478,7 @@ static int do_help(int argc, char **argv)
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
" queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
- " task_storage }\n"
+ " task_storage | bloom_filter }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-n|--nomount} }\n"
"",
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index b98ea702d284..6b0c410152de 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -124,7 +124,7 @@ int do_event_pipe(int argc, char **argv)
.wakeup_events = 1,
};
struct bpf_map_info map_info = {};
- struct perf_buffer_raw_opts opts = {};
+ LIBBPF_OPTS(perf_buffer_raw_opts, opts);
struct event_pipe_ctx ctx = {
.all_cpus = true,
.cpu = -1,
@@ -190,14 +190,11 @@ int do_event_pipe(int argc, char **argv)
ctx.idx = 0;
}
- opts.attr = &perf_attr;
- opts.event_cb = print_bpf_output;
- opts.ctx = &ctx;
opts.cpu_cnt = ctx.all_cpus ? 0 : 1;
opts.cpus = &ctx.cpu;
opts.map_keys = &ctx.idx;
-
- pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &opts);
+ pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr,
+ print_bpf_output, &ctx, &opts);
err = libbpf_get_error(pb);
if (err) {
p_err("failed to create perf buffer: %s (%d)",
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 515d22952602..e47e8b06cc3d 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -100,6 +100,76 @@ static enum bpf_attach_type parse_attach_type(const char *str)
return __MAX_BPF_ATTACH_TYPE;
}
+static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
+ void **info_data, size_t *const info_data_sz)
+{
+ struct bpf_prog_info holder = {};
+ size_t needed = 0;
+ void *ptr;
+
+ if (mode == DUMP_JITED) {
+ holder.jited_prog_len = info->jited_prog_len;
+ needed += info->jited_prog_len;
+ } else {
+ holder.xlated_prog_len = info->xlated_prog_len;
+ needed += info->xlated_prog_len;
+ }
+
+ holder.nr_jited_ksyms = info->nr_jited_ksyms;
+ needed += info->nr_jited_ksyms * sizeof(__u64);
+
+ holder.nr_jited_func_lens = info->nr_jited_func_lens;
+ needed += info->nr_jited_func_lens * sizeof(__u32);
+
+ holder.nr_func_info = info->nr_func_info;
+ holder.func_info_rec_size = info->func_info_rec_size;
+ needed += info->nr_func_info * info->func_info_rec_size;
+
+ holder.nr_line_info = info->nr_line_info;
+ holder.line_info_rec_size = info->line_info_rec_size;
+ needed += info->nr_line_info * info->line_info_rec_size;
+
+ holder.nr_jited_line_info = info->nr_jited_line_info;
+ holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
+ needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
+
+ if (needed > *info_data_sz) {
+ ptr = realloc(*info_data, needed);
+ if (!ptr)
+ return -1;
+
+ *info_data = ptr;
+ *info_data_sz = needed;
+ }
+ ptr = *info_data;
+
+ if (mode == DUMP_JITED) {
+ holder.jited_prog_insns = ptr_to_u64(ptr);
+ ptr += holder.jited_prog_len;
+ } else {
+ holder.xlated_prog_insns = ptr_to_u64(ptr);
+ ptr += holder.xlated_prog_len;
+ }
+
+ holder.jited_ksyms = ptr_to_u64(ptr);
+ ptr += holder.nr_jited_ksyms * sizeof(__u64);
+
+ holder.jited_func_lens = ptr_to_u64(ptr);
+ ptr += holder.nr_jited_func_lens * sizeof(__u32);
+
+ holder.func_info = ptr_to_u64(ptr);
+ ptr += holder.nr_func_info * holder.func_info_rec_size;
+
+ holder.line_info = ptr_to_u64(ptr);
+ ptr += holder.nr_line_info * holder.line_info_rec_size;
+
+ holder.jited_line_info = ptr_to_u64(ptr);
+ ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
+
+ *info = holder;
+ return 0;
+}
+
static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
{
struct timespec real_time_ts, boot_time_ts;
@@ -639,8 +709,8 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
char func_sig[1024];
unsigned char *buf;
__u32 member_len;
+ int fd, err = -1;
ssize_t n;
- int fd;
if (mode == DUMP_JITED) {
if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
@@ -679,7 +749,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
if (fd < 0) {
p_err("can't open file %s: %s", filepath,
strerror(errno));
- return -1;
+ goto exit_free;
}
n = write(fd, buf, member_len);
@@ -687,7 +757,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
if (n != (ssize_t)member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
- return -1;
+ goto exit_free;
}
if (json_output)
@@ -701,7 +771,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
info->netns_ino,
&disasm_opt);
if (!name)
- return -1;
+ goto exit_free;
}
if (info->nr_jited_func_lens && info->jited_func_lens) {
@@ -796,23 +866,28 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
kernel_syms_destroy(&dd);
}
- btf__free(btf);
+ err = 0;
- return 0;
+exit_free:
+ btf__free(btf);
+ bpf_prog_linfo__free(prog_linfo);
+ return err;
}
static int do_dump(int argc, char **argv)
{
- struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info info;
+ __u32 info_len = sizeof(info);
+ size_t info_data_sz = 0;
+ void *info_data = NULL;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
enum dump_mode mode;
bool linum = false;
- int *fds = NULL;
int nb_fds, i = 0;
+ int *fds = NULL;
int err = -1;
- __u64 arrays;
if (is_prefix(*argv, "jited")) {
if (disasm_init())
@@ -872,43 +947,44 @@ static int do_dump(int argc, char **argv)
goto exit_close;
}
- if (mode == DUMP_JITED)
- arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
- else
- arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
-
- arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
- arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
- arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
- arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
- arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
-
if (json_output && nb_fds > 1)
jsonw_start_array(json_wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
- info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
- if (IS_ERR_OR_NULL(info_linear)) {
+ memset(&info, 0, sizeof(info));
+
+ err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
+ if (err) {
+ p_err("can't get prog info: %s", strerror(errno));
+ break;
+ }
+
+ err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
+ if (err) {
+ p_err("can't grow prog info_data");
+ break;
+ }
+
+ err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
+ if (err) {
p_err("can't get prog info: %s", strerror(errno));
break;
}
if (json_output && nb_fds > 1) {
jsonw_start_object(json_wtr); /* prog object */
- print_prog_header_json(&info_linear->info);
+ print_prog_header_json(&info);
jsonw_name(json_wtr, "insns");
} else if (nb_fds > 1) {
- print_prog_header_plain(&info_linear->info);
+ print_prog_header_plain(&info);
}
- err = prog_dump(&info_linear->info, mode, filepath, opcodes,
- visual, linum);
+ err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
if (json_output && nb_fds > 1)
jsonw_end_object(json_wtr); /* prog object */
else if (i != nb_fds - 1 && nb_fds > 1)
printf("\n");
- free(info_linear);
if (err)
break;
close(fds[i]);
@@ -920,6 +996,7 @@ exit_close:
for (; i < nb_fds; i++)
close(fds[i]);
exit_free:
+ free(info_data);
free(fds);
return err;
}
@@ -1409,8 +1486,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
while (argc) {
if (is_prefix(*argv, "type")) {
- char *type;
-
NEXT_ARG();
if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
@@ -1420,21 +1495,26 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
if (!REQ_ARGS(1))
goto err_free_reuse_maps;
- /* Put a '/' at the end of type to appease libbpf */
- type = malloc(strlen(*argv) + 2);
- if (!type) {
- p_err("mem alloc failed");
- goto err_free_reuse_maps;
- }
- *type = 0;
- strcat(type, *argv);
- strcat(type, "/");
+ err = libbpf_prog_type_by_name(*argv, &common_prog_type,
+ &expected_attach_type);
+ if (err < 0) {
+ /* Put a '/' at the end of type to appease libbpf */
+ char *type = malloc(strlen(*argv) + 2);
- err = get_prog_type_by_name(type, &common_prog_type,
- &expected_attach_type);
- free(type);
- if (err < 0)
- goto err_free_reuse_maps;
+ if (!type) {
+ p_err("mem alloc failed");
+ goto err_free_reuse_maps;
+ }
+ *type = 0;
+ strcat(type, *argv);
+ strcat(type, "/");
+
+ err = get_prog_type_by_name(type, &common_prog_type,
+ &expected_attach_type);
+ free(type);
+ if (err < 0)
+ goto err_free_reuse_maps;
+ }
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
@@ -1657,6 +1737,11 @@ err_unpin:
else
bpf_object__unpin_programs(obj, pinfile);
err_close_obj:
+ if (!legacy_libbpf) {
+ p_info("Warning: bpftool is now running in libbpf strict mode and has more stringent requirements about BPF programs.\n"
+ "If it used to work for this object file but now doesn't, see --legacy option for more details.\n");
+ }
+
bpf_object__close(obj);
err_free_reuse_maps:
for (i = 0; i < old_map_fds; i++)
@@ -2016,41 +2101,58 @@ static void profile_print_readings(void)
static char *profile_target_name(int tgt_fd)
{
- struct bpf_prog_info_linear *info_linear;
- struct bpf_func_info *func_info;
+ struct bpf_func_info func_info;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
const struct btf_type *t;
+ __u32 func_info_rec_size;
struct btf *btf = NULL;
char *name = NULL;
+ int err;
- info_linear = bpf_program__get_prog_info_linear(
- tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
- if (IS_ERR_OR_NULL(info_linear)) {
- p_err("failed to get info_linear for prog FD %d", tgt_fd);
- return NULL;
+ err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
+ if (err) {
+ p_err("failed to bpf_obj_get_info_by_fd for prog FD %d", tgt_fd);
+ goto out;
}
- if (info_linear->info.btf_id == 0) {
+ if (info.btf_id == 0) {
p_err("prog FD %d doesn't have valid btf", tgt_fd);
goto out;
}
- btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
+ func_info_rec_size = info.func_info_rec_size;
+ if (info.nr_func_info == 0) {
+ p_err("bpf_obj_get_info_by_fd for prog FD %d found 0 func_info", tgt_fd);
+ goto out;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.nr_func_info = 1;
+ info.func_info_rec_size = func_info_rec_size;
+ info.func_info = ptr_to_u64(&func_info);
+
+ err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
+ if (err) {
+ p_err("failed to get func_info for prog FD %d", tgt_fd);
+ goto out;
+ }
+
+ btf = btf__load_from_kernel_by_id(info.btf_id);
if (libbpf_get_error(btf)) {
p_err("failed to load btf for prog FD %d", tgt_fd);
goto out;
}
- func_info = u64_to_ptr(info_linear->info.func_info);
- t = btf__type_by_id(btf, func_info[0].type_id);
+ t = btf__type_by_id(btf, func_info.type_id);
if (!t) {
p_err("btf %d doesn't have type %d",
- info_linear->info.btf_id, func_info[0].type_id);
+ info.btf_id, func_info.type_id);
goto out;
}
name = strdup(btf__name_by_offset(btf, t->name_off));
out:
btf__free(btf);
- free(info_linear);
return name;
}
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
index ab2d2290569a..cbdca37a53f0 100644
--- a/tools/bpf/bpftool/struct_ops.c
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -32,7 +32,7 @@ static const struct btf *get_btf_vmlinux(void)
return btf_vmlinux;
btf_vmlinux = libbpf_find_kernel_btf();
- if (IS_ERR(btf_vmlinux))
+ if (libbpf_get_error(btf_vmlinux))
p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y");
return btf_vmlinux;
@@ -45,7 +45,7 @@ static const char *get_kern_struct_ops_name(const struct bpf_map_info *info)
const char *st_ops_name;
kern_btf = get_btf_vmlinux();
- if (IS_ERR(kern_btf))
+ if (libbpf_get_error(kern_btf))
return "<btf_vmlinux_not_found>";
t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id);
@@ -63,7 +63,7 @@ static __s32 get_map_info_type_id(void)
return map_info_type_id;
kern_btf = get_btf_vmlinux();
- if (IS_ERR(kern_btf)) {
+ if (libbpf_get_error(kern_btf)) {
map_info_type_id = PTR_ERR(kern_btf);
return map_info_type_id;
}
@@ -252,7 +252,7 @@ static struct res do_one_id(const char *id_str, work_func func, void *data,
}
fd = bpf_map_get_fd_by_id(id);
- if (fd == -1) {
+ if (fd < 0) {
p_err("can't get map by id (%lu): %s", id, strerror(errno));
res.nr_errs++;
return res;
@@ -415,7 +415,7 @@ static int do_dump(int argc, char **argv)
}
kern_btf = get_btf_vmlinux();
- if (IS_ERR(kern_btf))
+ if (libbpf_get_error(kern_btf))
return -1;
if (!json_output) {
@@ -495,7 +495,7 @@ static int do_register(int argc, char **argv)
file = GET_ARG();
obj = bpf_object__open(file);
- if (IS_ERR_OR_NULL(obj))
+ if (libbpf_get_error(obj))
return -1;
set_max_rlimit();
@@ -516,7 +516,7 @@ static int do_register(int argc, char **argv)
continue;
link = bpf_map__attach_struct_ops(map);
- if (IS_ERR(link)) {
+ if (libbpf_get_error(link)) {
p_err("can't register struct_ops %s: %s",
bpf_map__name(map),
strerror(-PTR_ERR(link)));
@@ -596,7 +596,7 @@ int do_struct_ops(int argc, char **argv)
err = cmd_select(cmds, argc, argv, do_help);
- if (!IS_ERR(btf_vmlinux))
+ if (!libbpf_get_error(btf_vmlinux))
btf__free(btf_vmlinux);
return err;
diff --git a/tools/bpf/runqslower/runqslower.c b/tools/bpf/runqslower/runqslower.c
index d89715844952..2414cc764461 100644
--- a/tools/bpf/runqslower/runqslower.c
+++ b/tools/bpf/runqslower/runqslower.c
@@ -123,7 +123,6 @@ int main(int argc, char **argv)
.parser = parse_arg,
.doc = argp_program_doc,
};
- struct perf_buffer_opts pb_opts;
struct perf_buffer *pb = NULL;
struct runqslower_bpf *obj;
int err;
@@ -165,9 +164,8 @@ int main(int argc, char **argv)
printf("Tracing run queue latency higher than %llu us\n", env.min_us);
printf("%-8s %-16s %-6s %14s\n", "TIME", "COMM", "PID", "LAT(us)");
- pb_opts.sample_cb = handle_event;
- pb_opts.lost_cb = handle_lost_events;
- pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64, &pb_opts);
+ pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64,
+ handle_event, handle_lost_events, NULL, NULL);
err = libbpf_get_error(pb);
if (err) {
pb = NULL;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index ba5af15e25f5..6297eafdc40f 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4938,6 +4938,25 @@ union bpf_attr {
* **-ENOENT** if symbol is not found.
*
* **-EPERM** if caller does not have permission to obtain kernel address.
+ *
+ * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * Find vma of *task* that contains *addr*, call *callback_fn*
+ * function with *task*, *vma*, and *callback_ctx*.
+ * The *callback_fn* should be a static function and
+ * the *callback_ctx* should be a pointer to the stack.
+ * The *flags* is used to control certain aspects of the helper.
+ * Currently, the *flags* must be 0.
+ *
+ * The expected callback signature is
+ *
+ * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
+ *
+ * Return
+ * 0 on success.
+ * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
+ * **-EBUSY** if failed to try lock mmap_lock.
+ * **-EINVAL** for invalid **flags**.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5120,6 +5139,7 @@ union bpf_attr {
FN(trace_vprintk), \
FN(skc_to_unix_sock), \
FN(kallsyms_lookup_name), \
+ FN(find_vma), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -6296,6 +6316,7 @@ struct bpf_sk_lookup {
__u32 local_ip4; /* Network byte order */
__u32 local_ip6[4]; /* Network byte order */
__u32 local_port; /* Host byte order */
+ __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
};
/*
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index deb12f755f0f..b0d8fea1951d 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -43,7 +43,7 @@ struct btf_type {
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
- * FUNC, FUNC_PROTO, VAR and DECL_TAG.
+ * FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG.
* "type" is a type_id referring to another type.
*/
union {
@@ -75,6 +75,7 @@ enum {
BTF_KIND_DATASEC = 15, /* Section */
BTF_KIND_FLOAT = 16, /* Floating point */
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
+ BTF_KIND_TYPE_TAG = 18, /* Type Tag */
NR_BTF_KINDS,
BTF_KIND_MAX = NR_BTF_KINDS - 1,
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index b393b5e82380..5f7086fae31c 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -84,6 +84,7 @@ else
endif
# Append required CFLAGS
+override CFLAGS += -std=gnu89
override CFLAGS += $(EXTRA_WARNINGS) -Wno-switch-enum
override CFLAGS += -Werror -Wall
override CFLAGS += $(INCLUDES)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 725701235fd8..94560ba31724 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -28,6 +28,7 @@
#include <asm/unistd.h>
#include <errno.h>
#include <linux/bpf.h>
+#include <limits.h>
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
@@ -74,14 +75,15 @@ static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
return ensure_good_fd(fd);
}
-static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
+#define PROG_LOAD_ATTEMPTS 5
+
+static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
{
- int retries = 5;
int fd;
do {
fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
- } while (fd < 0 && errno == EAGAIN && retries-- > 0);
+ } while (fd < 0 && errno == EAGAIN && --attempts > 0);
return fd;
}
@@ -253,58 +255,91 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
return info;
}
-int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
+DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0)
+int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
+ const char *prog_name, const char *license,
+ const struct bpf_insn *insns, size_t insn_cnt,
+ const struct bpf_prog_load_opts *opts)
{
void *finfo = NULL, *linfo = NULL;
+ const char *func_info, *line_info;
+ __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
+ __u32 func_info_rec_size, line_info_rec_size;
+ int fd, attempts;
union bpf_attr attr;
- int fd;
+ char *log_buf;
- if (!load_attr->log_buf != !load_attr->log_buf_sz)
+ if (!OPTS_VALID(opts, bpf_prog_load_opts))
return libbpf_err(-EINVAL);
- if (load_attr->log_level > (4 | 2 | 1) || (load_attr->log_level && !load_attr->log_buf))
+ attempts = OPTS_GET(opts, attempts, 0);
+ if (attempts < 0)
return libbpf_err(-EINVAL);
+ if (attempts == 0)
+ attempts = PROG_LOAD_ATTEMPTS;
memset(&attr, 0, sizeof(attr));
- attr.prog_type = load_attr->prog_type;
- attr.expected_attach_type = load_attr->expected_attach_type;
- if (load_attr->attach_prog_fd)
- attr.attach_prog_fd = load_attr->attach_prog_fd;
- else
- attr.attach_btf_obj_fd = load_attr->attach_btf_obj_fd;
- attr.attach_btf_id = load_attr->attach_btf_id;
+ attr.prog_type = prog_type;
+ attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
- attr.prog_ifindex = load_attr->prog_ifindex;
- attr.kern_version = load_attr->kern_version;
+ attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
+ attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
+ attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
+ attr.kern_version = OPTS_GET(opts, kern_version, 0);
- attr.insn_cnt = (__u32)load_attr->insn_cnt;
- attr.insns = ptr_to_u64(load_attr->insns);
- attr.license = ptr_to_u64(load_attr->license);
+ if (prog_name)
+ strncat(attr.prog_name, prog_name, sizeof(attr.prog_name) - 1);
+ attr.license = ptr_to_u64(license);
- attr.log_level = load_attr->log_level;
- if (attr.log_level) {
- attr.log_buf = ptr_to_u64(load_attr->log_buf);
- attr.log_size = load_attr->log_buf_sz;
- }
+ if (insn_cnt > UINT_MAX)
+ return libbpf_err(-E2BIG);
+
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = (__u32)insn_cnt;
+
+ attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
+ attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
+
+ if (attach_prog_fd && attach_btf_obj_fd)
+ return libbpf_err(-EINVAL);
+
+ attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
+ if (attach_prog_fd)
+ attr.attach_prog_fd = attach_prog_fd;
+ else
+ attr.attach_btf_obj_fd = attach_btf_obj_fd;
- attr.prog_btf_fd = load_attr->prog_btf_fd;
- attr.prog_flags = load_attr->prog_flags;
+ log_buf = OPTS_GET(opts, log_buf, NULL);
+ log_size = OPTS_GET(opts, log_size, 0);
+ log_level = OPTS_GET(opts, log_level, 0);
- attr.func_info_rec_size = load_attr->func_info_rec_size;
- attr.func_info_cnt = load_attr->func_info_cnt;
- attr.func_info = ptr_to_u64(load_attr->func_info);
+ if (!!log_buf != !!log_size)
+ return libbpf_err(-EINVAL);
+ if (log_level > (4 | 2 | 1))
+ return libbpf_err(-EINVAL);
+ if (log_level && !log_buf)
+ return libbpf_err(-EINVAL);
- attr.line_info_rec_size = load_attr->line_info_rec_size;
- attr.line_info_cnt = load_attr->line_info_cnt;
- attr.line_info = ptr_to_u64(load_attr->line_info);
- attr.fd_array = ptr_to_u64(load_attr->fd_array);
+ attr.log_level = log_level;
+ attr.log_buf = ptr_to_u64(log_buf);
+ attr.log_size = log_size;
- if (load_attr->name)
- memcpy(attr.prog_name, load_attr->name,
- min(strlen(load_attr->name), (size_t)BPF_OBJ_NAME_LEN - 1));
+ func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
+ func_info = OPTS_GET(opts, func_info, NULL);
+ attr.func_info_rec_size = func_info_rec_size;
+ attr.func_info = ptr_to_u64(func_info);
+ attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0);
- fd = sys_bpf_prog_load(&attr, sizeof(attr));
+ line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
+ line_info = OPTS_GET(opts, line_info, NULL);
+ attr.line_info_rec_size = line_info_rec_size;
+ attr.line_info = ptr_to_u64(line_info);
+ attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
+
+ attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
+
+ fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
if (fd >= 0)
return fd;
@@ -314,11 +349,11 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
*/
while (errno == E2BIG && (!finfo || !linfo)) {
if (!finfo && attr.func_info_cnt &&
- attr.func_info_rec_size < load_attr->func_info_rec_size) {
+ attr.func_info_rec_size < func_info_rec_size) {
/* try with corrected func info records */
- finfo = alloc_zero_tailing_info(load_attr->func_info,
- load_attr->func_info_cnt,
- load_attr->func_info_rec_size,
+ finfo = alloc_zero_tailing_info(func_info,
+ attr.func_info_cnt,
+ func_info_rec_size,
attr.func_info_rec_size);
if (!finfo) {
errno = E2BIG;
@@ -326,13 +361,12 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
}
attr.func_info = ptr_to_u64(finfo);
- attr.func_info_rec_size = load_attr->func_info_rec_size;
+ attr.func_info_rec_size = func_info_rec_size;
} else if (!linfo && attr.line_info_cnt &&
- attr.line_info_rec_size <
- load_attr->line_info_rec_size) {
- linfo = alloc_zero_tailing_info(load_attr->line_info,
- load_attr->line_info_cnt,
- load_attr->line_info_rec_size,
+ attr.line_info_rec_size < line_info_rec_size) {
+ linfo = alloc_zero_tailing_info(line_info,
+ attr.line_info_cnt,
+ line_info_rec_size,
attr.line_info_rec_size);
if (!linfo) {
errno = E2BIG;
@@ -340,26 +374,26 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
}
attr.line_info = ptr_to_u64(linfo);
- attr.line_info_rec_size = load_attr->line_info_rec_size;
+ attr.line_info_rec_size = line_info_rec_size;
} else {
break;
}
- fd = sys_bpf_prog_load(&attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
if (fd >= 0)
goto done;
}
- if (load_attr->log_level || !load_attr->log_buf)
+ if (log_level || !log_buf)
goto done;
/* Try again with log */
- attr.log_buf = ptr_to_u64(load_attr->log_buf);
- attr.log_size = load_attr->log_buf_sz;
+ log_buf[0] = 0;
+ attr.log_buf = ptr_to_u64(log_buf);
+ attr.log_size = log_size;
attr.log_level = 1;
- load_attr->log_buf[0] = 0;
- fd = sys_bpf_prog_load(&attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
done:
/* free() doesn't affect errno, so we don't need to restore it */
free(finfo);
@@ -367,17 +401,20 @@ done:
return libbpf_err_errno(fd);
}
+__attribute__((alias("bpf_load_program_xattr2")))
int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
- char *log_buf, size_t log_buf_sz)
+ char *log_buf, size_t log_buf_sz);
+
+static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr,
+ char *log_buf, size_t log_buf_sz)
{
- struct bpf_prog_load_params p = {};
+ LIBBPF_OPTS(bpf_prog_load_opts, p);
if (!load_attr || !log_buf != !log_buf_sz)
return libbpf_err(-EINVAL);
- p.prog_type = load_attr->prog_type;
p.expected_attach_type = load_attr->expected_attach_type;
- switch (p.prog_type) {
+ switch (load_attr->prog_type) {
case BPF_PROG_TYPE_STRUCT_OPS:
case BPF_PROG_TYPE_LSM:
p.attach_btf_id = load_attr->attach_btf_id;
@@ -391,12 +428,9 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
p.prog_ifindex = load_attr->prog_ifindex;
p.kern_version = load_attr->kern_version;
}
- p.insn_cnt = load_attr->insns_cnt;
- p.insns = load_attr->insns;
- p.license = load_attr->license;
p.log_level = load_attr->log_level;
p.log_buf = log_buf;
- p.log_buf_sz = log_buf_sz;
+ p.log_size = log_buf_sz;
p.prog_btf_fd = load_attr->prog_btf_fd;
p.func_info_rec_size = load_attr->func_info_rec_size;
p.func_info_cnt = load_attr->func_info_cnt;
@@ -404,10 +438,10 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
p.line_info_rec_size = load_attr->line_info_rec_size;
p.line_info_cnt = load_attr->line_info_cnt;
p.line_info = load_attr->line_info;
- p.name = load_attr->name;
p.prog_flags = load_attr->prog_flags;
- return libbpf__bpf_prog_load(&p);
+ return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license,
+ load_attr->insns, load_attr->insns_cnt, &p);
}
int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
@@ -426,7 +460,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
load_attr.license = license;
load_attr.kern_version = kern_version;
- return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
+ return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz);
}
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
@@ -449,7 +483,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.kern_version = kern_version;
attr.prog_flags = prog_flags;
- fd = sys_bpf_prog_load(&attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS);
return libbpf_err_errno(fd);
}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 6fffb3cdf39b..079cc81ac51e 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -29,6 +29,7 @@
#include <stdint.h>
#include "libbpf_common.h"
+#include "libbpf_legacy.h"
#ifdef __cplusplus
extern "C" {
@@ -71,6 +72,71 @@ LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type,
int inner_map_fd, int max_entries,
__u32 map_flags);
+struct bpf_prog_load_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+
+ /* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns
+ * -EAGAIN. This field determines how many attempts libbpf has to
+ * make. If not specified, libbpf will use default value of 5.
+ */
+ int attempts;
+
+ enum bpf_attach_type expected_attach_type;
+ __u32 prog_btf_fd;
+ __u32 prog_flags;
+ __u32 prog_ifindex;
+ __u32 kern_version;
+
+ __u32 attach_btf_id;
+ __u32 attach_prog_fd;
+ __u32 attach_btf_obj_fd;
+
+ const int *fd_array;
+
+ /* .BTF.ext func info data */
+ const void *func_info;
+ __u32 func_info_cnt;
+ __u32 func_info_rec_size;
+
+ /* .BTF.ext line info data */
+ const void *line_info;
+ __u32 line_info_cnt;
+ __u32 line_info_rec_size;
+
+ /* verifier log options */
+ __u32 log_level;
+ __u32 log_size;
+ char *log_buf;
+};
+#define bpf_prog_load_opts__last_field log_buf
+
+LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
+ const char *prog_name, const char *license,
+ const struct bpf_insn *insns, size_t insn_cnt,
+ const struct bpf_prog_load_opts *opts);
+/* this "specialization" should go away in libbpf 1.0 */
+LIBBPF_API int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
+ const char *prog_name, const char *license,
+ const struct bpf_insn *insns, size_t insn_cnt,
+ const struct bpf_prog_load_opts *opts);
+
+/* This is an elaborate way to not conflict with deprecated bpf_prog_load()
+ * API, defined in libbpf.h. Once we hit libbpf 1.0, all this will be gone.
+ * With this approach, if someone is calling bpf_prog_load() with
+ * 4 arguments, they will use the deprecated API, which keeps backwards
+ * compatibility (both source code and binary). If bpf_prog_load() is called
+ * with 6 arguments, though, it gets redirected to __bpf_prog_load.
+ * So looking forward to libbpf 1.0 when this hack will be gone and
+ * __bpf_prog_load() will be called just bpf_prog_load().
+ */
+#ifndef bpf_prog_load
+#define bpf_prog_load(...) ___libbpf_overload(___bpf_prog_load, __VA_ARGS__)
+#define ___bpf_prog_load4(file, type, pobj, prog_fd) \
+ bpf_prog_load_deprecated(file, type, pobj, prog_fd)
+#define ___bpf_prog_load6(prog_type, prog_name, license, insns, insn_cnt, opts) \
+ bpf_prog_load(prog_type, prog_name, license, insns, insn_cnt, opts)
+#endif /* bpf_prog_load */
+
struct bpf_load_program_attr {
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
@@ -102,13 +168,15 @@ struct bpf_load_program_attr {
/* Recommend log buffer size */
#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
-LIBBPF_API int
-bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
- char *log_buf, size_t log_buf_sz);
+LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
+LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
+ char *log_buf, size_t log_buf_sz);
+LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
const struct bpf_insn *insns, size_t insns_cnt,
const char *license, __u32 kern_version,
char *log_buf, size_t log_buf_sz);
+LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
const struct bpf_insn *insns,
size_t insns_cnt, __u32 prog_flags,
diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h
index 6f3df004479b..cc486a77db65 100644
--- a/tools/lib/bpf/bpf_gen_internal.h
+++ b/tools/lib/bpf/bpf_gen_internal.h
@@ -3,6 +3,8 @@
#ifndef __BPF_GEN_INTERNAL_H
#define __BPF_GEN_INTERNAL_H
+#include "bpf.h"
+
struct ksym_relo_desc {
const char *name;
int kind;
@@ -50,8 +52,10 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps);
void bpf_gen__free(struct bpf_gen *gen);
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
-struct bpf_prog_load_params;
-void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
+void bpf_gen__prog_load(struct bpf_gen *gen,
+ enum bpf_prog_type prog_type, const char *prog_name,
+ const char *license, struct bpf_insn *insns, size_t insn_cnt,
+ struct bpf_prog_load_opts *load_attr, int prog_idx);
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 7e4c5586bd87..fadf089ae8fe 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -299,6 +299,7 @@ static int btf_type_size(const struct btf_type *t)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_FLOAT:
+ case BTF_KIND_TYPE_TAG:
return base_size;
case BTF_KIND_INT:
return base_size + sizeof(__u32);
@@ -349,6 +350,7 @@ static int btf_bswap_type_rest(struct btf_type *t)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_FLOAT:
+ case BTF_KIND_TYPE_TAG:
return 0;
case BTF_KIND_INT:
*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
@@ -649,6 +651,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPE_TAG:
return btf__align_of(btf, t->type);
case BTF_KIND_ARRAY:
return btf__align_of(btf, btf_array(t)->type);
@@ -2236,6 +2239,22 @@ int btf__add_restrict(struct btf *btf, int ref_type_id)
}
/*
+ * Append new BTF_KIND_TYPE_TAG type with:
+ * - *value*, non-empty/non-NULL tag value;
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
+{
+ if (!value|| !value[0])
+ return libbpf_err(-EINVAL);
+
+ return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
+}
+
+/*
* Append new BTF_KIND_FUNC type with:
* - *name*, non-empty/non-NULL name;
* - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
@@ -2846,8 +2865,7 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
struct btf_dedup;
-static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
- const struct btf_dedup_opts *opts);
+static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
static void btf_dedup_free(struct btf_dedup *d);
static int btf_dedup_prep(struct btf_dedup *d);
static int btf_dedup_strings(struct btf_dedup *d);
@@ -2994,12 +3012,17 @@ static int btf_dedup_remap_types(struct btf_dedup *d);
* deduplicating structs/unions is described in greater details in comments for
* `btf_dedup_is_equiv` function.
*/
-int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
- const struct btf_dedup_opts *opts)
+
+DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0)
+int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts)
{
- struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
+ struct btf_dedup *d;
int err;
+ if (!OPTS_VALID(opts, btf_dedup_opts))
+ return libbpf_err(-EINVAL);
+
+ d = btf_dedup_new(btf, opts);
if (IS_ERR(d)) {
pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
return libbpf_err(-EINVAL);
@@ -3051,6 +3074,19 @@ done:
return libbpf_err(err);
}
+COMPAT_VERSION(bpf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2)
+int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts)
+{
+ LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext);
+
+ if (unused_opts) {
+ pr_warn("please use new version of btf__dedup() that supports options\n");
+ return libbpf_err(-ENOTSUP);
+ }
+
+ return btf__dedup(btf, &opts);
+}
+
#define BTF_UNPROCESSED_ID ((__u32)-1)
#define BTF_IN_PROGRESS_ID ((__u32)-2)
@@ -3163,8 +3199,7 @@ static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
return k1 == k2;
}
-static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
- const struct btf_dedup_opts *opts)
+static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
{
struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
@@ -3173,13 +3208,11 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
if (!d)
return ERR_PTR(-ENOMEM);
- d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
- /* dedup_table_size is now used only to force collisions in tests */
- if (opts && opts->dedup_table_size == 1)
+ if (OPTS_GET(opts, force_collisions, false))
hash_fn = btf_dedup_collision_hash_fn;
d->btf = btf;
- d->btf_ext = btf_ext;
+ d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
if (IS_ERR(d->dedup_table)) {
@@ -3625,6 +3658,7 @@ static int btf_dedup_prep(struct btf_dedup *d)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_FLOAT:
+ case BTF_KIND_TYPE_TAG:
h = btf_hash_common(t);
break;
case BTF_KIND_INT:
@@ -3685,6 +3719,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_VAR:
case BTF_KIND_DATASEC:
case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
return 0;
case BTF_KIND_INT:
@@ -3708,8 +3743,6 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
new_id = cand_id;
break;
}
- if (d->opts.dont_resolve_fwds)
- continue;
if (btf_compat_enum(t, cand)) {
if (btf_is_enum_fwd(t)) {
/* resolve fwd to full enum */
@@ -3952,8 +3985,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return 0;
/* FWD <--> STRUCT/UNION equivalence check, if enabled */
- if (!d->opts.dont_resolve_fwds
- && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
+ if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
&& cand_kind != canon_kind) {
__u16 real_kind;
__u16 fwd_kind;
@@ -3979,10 +4011,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return btf_equal_int_tag(cand_type, canon_type);
case BTF_KIND_ENUM:
- if (d->opts.dont_resolve_fwds)
- return btf_equal_enum(cand_type, canon_type);
- else
- return btf_compat_enum(cand_type, canon_type);
+ return btf_compat_enum(cand_type, canon_type);
case BTF_KIND_FWD:
case BTF_KIND_FLOAT:
@@ -4289,6 +4318,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_TYPE_TAG:
ref_type_id = btf_dedup_ref_type(d, t->type);
if (ref_type_id < 0)
return ref_type_id;
@@ -4595,6 +4625,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
return visit(&t->type, ctx);
case BTF_KIND_ARRAY: {
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index bc005ba3ceec..5c73a5b0a044 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -227,6 +227,7 @@ LIBBPF_API int btf__add_typedef(struct btf *btf, const char *name, int ref_type_
LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id);
LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id);
LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id);
+LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id);
/* func and func_proto construction APIs */
LIBBPF_API int btf__add_func(struct btf *btf, const char *name,
@@ -245,25 +246,80 @@ LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_typ
int component_idx);
struct btf_dedup_opts {
- unsigned int dedup_table_size;
- bool dont_resolve_fwds;
+ size_t sz;
+ /* optional .BTF.ext info to dedup along the main BTF info */
+ struct btf_ext *btf_ext;
+ /* force hash collisions (used for testing) */
+ bool force_collisions;
+ size_t :0;
};
+#define btf_dedup_opts__last_field force_collisions
+
+LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
-LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
- const struct btf_dedup_opts *opts);
+LIBBPF_API int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts);
+
+LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__dedup() instead")
+LIBBPF_API int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *opts);
+#define btf__dedup(...) ___libbpf_overload(___btf_dedup, __VA_ARGS__)
+#define ___btf_dedup3(btf, btf_ext, opts) btf__dedup_deprecated(btf, btf_ext, opts)
+#define ___btf_dedup2(btf, opts) btf__dedup(btf, opts)
struct btf_dump;
struct btf_dump_opts {
- void *ctx;
+ union {
+ size_t sz;
+ void *ctx; /* DEPRECATED: will be gone in v1.0 */
+ };
};
typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const struct btf_dump_opts *opts,
- btf_dump_printf_fn_t printf_fn);
+ btf_dump_printf_fn_t printf_fn,
+ void *ctx,
+ const struct btf_dump_opts *opts);
+
+LIBBPF_API struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
+ btf_dump_printf_fn_t printf_fn,
+ void *ctx,
+ const struct btf_dump_opts *opts);
+
+LIBBPF_API struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const struct btf_dump_opts *opts,
+ btf_dump_printf_fn_t printf_fn);
+
+/* Choose either btf_dump__new() or btf_dump__new_deprecated() based on the
+ * type of 4th argument. If it's btf_dump's print callback, use deprecated
+ * API; otherwise, choose the new btf_dump__new(). ___libbpf_override()
+ * doesn't work here because both variants have 4 input arguments.
+ *
+ * (void *) casts are necessary to avoid compilation warnings about type
+ * mismatches, because even though __builtin_choose_expr() only ever evaluates
+ * one side the other side still has to satisfy type constraints (this is
+ * compiler implementation limitation which might be lifted eventually,
+ * according to the documentation). So passing struct btf_ext in place of
+ * btf_dump_printf_fn_t would be generating compilation warning. Casting to
+ * void * avoids this issue.
+ *
+ * Also, two type compatibility checks for a function and function pointer are
+ * required because passing function reference into btf_dump__new() as
+ * btf_dump__new(..., my_callback, ...) and as btf_dump__new(...,
+ * &my_callback, ...) (not explicit ampersand in the latter case) actually
+ * differs as far as __builtin_types_compatible_p() is concerned. Thus two
+ * checks are combined to detect callback argument.
+ *
+ * The rest works just like in case of ___libbpf_override() usage with symbol
+ * versioning.
+ */
+#define btf_dump__new(a1, a2, a3, a4) __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(a4), btf_dump_printf_fn_t) || \
+ __builtin_types_compatible_p(typeof(a4), void(void *, const char *, va_list)), \
+ btf_dump__new_deprecated((void *)a1, (void *)a2, (void *)a3, (void *)a4), \
+ btf_dump__new((void *)a1, (void *)a2, (void *)a3, (void *)a4))
+
LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
@@ -403,7 +459,8 @@ static inline bool btf_is_mod(const struct btf_type *t)
return kind == BTF_KIND_VOLATILE ||
kind == BTF_KIND_CONST ||
- kind == BTF_KIND_RESTRICT;
+ kind == BTF_KIND_RESTRICT ||
+ kind == BTF_KIND_TYPE_TAG;
}
static inline bool btf_is_func(const struct btf_type *t)
@@ -436,6 +493,11 @@ static inline bool btf_is_decl_tag(const struct btf_type *t)
return btf_kind(t) == BTF_KIND_DECL_TAG;
}
+static inline bool btf_is_type_tag(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_TYPE_TAG;
+}
+
static inline __u8 btf_int_encoding(const struct btf_type *t)
{
return BTF_INT_ENCODING(*(__u32 *)(t + 1));
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 17db62b5002e..05f3e7dfec0a 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -77,9 +77,8 @@ struct btf_dump_data {
struct btf_dump {
const struct btf *btf;
- const struct btf_ext *btf_ext;
btf_dump_printf_fn_t printf_fn;
- struct btf_dump_opts opts;
+ void *cb_ctx;
int ptr_sz;
bool strip_mods;
bool skip_anon_defs;
@@ -138,29 +137,32 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
va_list args;
va_start(args, fmt);
- d->printf_fn(d->opts.ctx, fmt, args);
+ d->printf_fn(d->cb_ctx, fmt, args);
va_end(args);
}
static int btf_dump_mark_referenced(struct btf_dump *d);
static int btf_dump_resize(struct btf_dump *d);
-struct btf_dump *btf_dump__new(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const struct btf_dump_opts *opts,
- btf_dump_printf_fn_t printf_fn)
+DEFAULT_VERSION(btf_dump__new_v0_6_0, btf_dump__new, LIBBPF_0.6.0)
+struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
+ btf_dump_printf_fn_t printf_fn,
+ void *ctx,
+ const struct btf_dump_opts *opts)
{
struct btf_dump *d;
int err;
+ if (!printf_fn)
+ return libbpf_err_ptr(-EINVAL);
+
d = calloc(1, sizeof(struct btf_dump));
if (!d)
return libbpf_err_ptr(-ENOMEM);
d->btf = btf;
- d->btf_ext = btf_ext;
d->printf_fn = printf_fn;
- d->opts.ctx = opts ? opts->ctx : NULL;
+ d->cb_ctx = ctx;
d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
@@ -186,6 +188,17 @@ err:
return libbpf_err_ptr(err);
}
+COMPAT_VERSION(btf_dump__new_deprecated, btf_dump__new, LIBBPF_0.0.4)
+struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const struct btf_dump_opts *opts,
+ btf_dump_printf_fn_t printf_fn)
+{
+ if (!printf_fn)
+ return libbpf_err_ptr(-EINVAL);
+ return btf_dump__new_v0_6_0(btf, printf_fn, opts ? opts->ctx : NULL, opts);
+}
+
static int btf_dump_resize(struct btf_dump *d)
{
int err, last_id = btf__type_cnt(d->btf) - 1;
@@ -317,6 +330,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
d->type_states[t->type].referenced = 1;
break;
@@ -560,6 +574,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPE_TAG:
return btf_dump_order_type(d, t->type, through_ptr);
case BTF_KIND_FUNC_PROTO: {
@@ -734,6 +749,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPE_TAG:
btf_dump_emit_type(d, t->type, cont_id);
break;
case BTF_KIND_ARRAY:
@@ -1154,6 +1170,7 @@ skip_mod:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_FUNC_PROTO:
+ case BTF_KIND_TYPE_TAG:
id = t->type;
break;
case BTF_KIND_ARRAY:
@@ -1322,6 +1339,11 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
case BTF_KIND_RESTRICT:
btf_dump_printf(d, " restrict");
break;
+ case BTF_KIND_TYPE_TAG:
+ btf_dump_emit_mods(d, decls);
+ name = btf_name_of(d, t->name_off);
+ btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
+ break;
case BTF_KIND_ARRAY: {
const struct btf_array *a = btf_array(t);
const struct btf_type *next_t;
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 9934851ccde7..c28ab24d1bff 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -597,8 +597,9 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
{
struct ksym_desc *kdesc;
+ int i;
- for (int i = 0; i < gen->nr_ksyms; i++) {
+ for (i = 0; i < gen->nr_ksyms; i++) {
if (!strcmp(gen->ksyms[i].name, relo->name)) {
gen->ksyms[i].ref++;
return &gen->ksyms[i];
@@ -913,27 +914,27 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
}
void bpf_gen__prog_load(struct bpf_gen *gen,
- struct bpf_prog_load_params *load_attr, int prog_idx)
+ enum bpf_prog_type prog_type, const char *prog_name,
+ const char *license, struct bpf_insn *insns, size_t insn_cnt,
+ struct bpf_prog_load_opts *load_attr, int prog_idx)
{
int attr_size = offsetofend(union bpf_attr, fd_array);
- int prog_load_attr, license, insns, func_info, line_info;
+ int prog_load_attr, license_off, insns_off, func_info, line_info;
union bpf_attr attr;
memset(&attr, 0, attr_size);
- pr_debug("gen: prog_load: type %d insns_cnt %zd\n",
- load_attr->prog_type, load_attr->insn_cnt);
+ pr_debug("gen: prog_load: type %d insns_cnt %zd\n", prog_type, insn_cnt);
/* add license string to blob of bytes */
- license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1);
+ license_off = add_data(gen, license, strlen(license) + 1);
/* add insns to blob of bytes */
- insns = add_data(gen, load_attr->insns,
- load_attr->insn_cnt * sizeof(struct bpf_insn));
+ insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
- attr.prog_type = load_attr->prog_type;
+ attr.prog_type = prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
attr.attach_btf_id = load_attr->attach_btf_id;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.kern_version = 0;
- attr.insn_cnt = (__u32)load_attr->insn_cnt;
+ attr.insn_cnt = (__u32)insn_cnt;
attr.prog_flags = load_attr->prog_flags;
attr.func_info_rec_size = load_attr->func_info_rec_size;
@@ -946,15 +947,15 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
line_info = add_data(gen, load_attr->line_info,
attr.line_info_cnt * attr.line_info_rec_size);
- memcpy(attr.prog_name, load_attr->name,
- min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
+ memcpy(attr.prog_name, prog_name,
+ min((unsigned)strlen(prog_name), BPF_OBJ_NAME_LEN - 1));
prog_load_attr = add_data(gen, &attr, attr_size);
/* populate union bpf_attr with a pointer to license */
- emit_rel_store(gen, attr_field(prog_load_attr, license), license);
+ emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
/* populate union bpf_attr with a pointer to instructions */
- emit_rel_store(gen, attr_field(prog_load_attr, insns), insns);
+ emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
/* populate union bpf_attr with a pointer to func_info */
emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
@@ -986,12 +987,12 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
offsetof(union bpf_attr, attach_btf_obj_fd)));
}
- emit_relos(gen, insns);
+ emit_relos(gen, insns_off);
/* emit PROG_LOAD command */
emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
/* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
- cleanup_relos(gen, insns);
+ cleanup_relos(gen, insns_off);
if (gen->attach_kind)
emit_sys_close_blob(gen,
attr_field(prog_load_attr, attach_btf_obj_fd));
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 7c74342bb668..f6faa33c80fa 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -197,6 +197,8 @@ enum kern_feature_id {
FEAT_PERF_LINK,
/* BTF_KIND_DECL_TAG support */
FEAT_BTF_DECL_TAG,
+ /* BTF_KIND_TYPE_TAG support */
+ FEAT_BTF_TYPE_TAG,
__FEAT_CNT,
};
@@ -221,7 +223,7 @@ struct reloc_desc {
struct bpf_sec_def;
typedef int (*init_fn_t)(struct bpf_program *prog, long cookie);
-typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_params *attr, long cookie);
+typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_opts *opts, long cookie);
typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie);
/* stored as sec_def->cookie for all libbpf-supported SEC()s */
@@ -2076,6 +2078,7 @@ static const char *__btf_kind_str(__u16 kind)
case BTF_KIND_DATASEC: return "datasec";
case BTF_KIND_FLOAT: return "float";
case BTF_KIND_DECL_TAG: return "decl_tag";
+ case BTF_KIND_TYPE_TAG: return "type_tag";
default: return "unknown";
}
}
@@ -2588,8 +2591,10 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
+ bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
- return !has_func || !has_datasec || !has_func_global || !has_float || !has_decl_tag;
+ return !has_func || !has_datasec || !has_func_global || !has_float ||
+ !has_decl_tag || !has_type_tag;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
@@ -2599,6 +2604,7 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
+ bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
struct btf_type *t;
int i, j, vlen;
@@ -2657,6 +2663,10 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
*/
t->name_off = 0;
t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
+ } else if (!has_type_tag && btf_is_type_tag(t)) {
+ /* replace TYPE_TAG with a CONST */
+ t->name_off = 0;
+ t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
}
}
}
@@ -2752,13 +2762,12 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
t_var = btf__type_by_id(btf, vsi->type);
- var = btf_var(t_var);
-
- if (!btf_is_var(t_var)) {
+ if (!t_var || !btf_is_var(t_var)) {
pr_debug("Non-VAR type seen in section %s\n", name);
return -EINVAL;
}
+ var = btf_var(t_var);
if (var->linkage == BTF_VAR_STATIC)
continue;
@@ -3191,11 +3200,11 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
Elf_Scn *scn;
Elf64_Shdr *sh;
- /* ELF section indices are 1-based, so allocate +1 element to keep
- * indexing simple. Also include 0th invalid section into sec_cnt for
- * simpler and more traditional iteration logic.
+ /* ELF section indices are 0-based, but sec #0 is special "invalid"
+ * section. e_shnum does include sec #0, so e_shnum is the necessary
+ * size of an array to keep all the sections.
*/
- obj->efile.sec_cnt = 1 + obj->efile.ehdr->e_shnum;
+ obj->efile.sec_cnt = obj->efile.ehdr->e_shnum;
obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
if (!obj->efile.secs)
return -ENOMEM;
@@ -3271,8 +3280,12 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
obj->efile.btf_maps_shndx = idx;
} else if (strcmp(name, BTF_ELF_SEC) == 0) {
+ if (sh->sh_type != SHT_PROGBITS)
+ return -LIBBPF_ERRNO__FORMAT;
btf_data = data;
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
+ if (sh->sh_type != SHT_PROGBITS)
+ return -LIBBPF_ERRNO__FORMAT;
btf_ext_data = data;
} else if (sh->sh_type == SHT_SYMTAB) {
/* already processed during the first pass above */
@@ -3303,6 +3316,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (sh->sh_type == SHT_REL) {
int targ_sec_idx = sh->sh_info; /* points to other section */
+ if (sh->sh_entsize != sizeof(Elf64_Rel) ||
+ targ_sec_idx >= obj->efile.sec_cnt)
+ return -LIBBPF_ERRNO__FORMAT;
+
/* Only do relo for section with exec instructions */
if (!section_have_execinstr(obj, targ_sec_idx) &&
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
@@ -3555,7 +3572,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
sh = elf_sec_hdr(obj, scn);
- if (!sh)
+ if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
return -LIBBPF_ERRNO__FORMAT;
dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
@@ -4022,7 +4039,7 @@ static int
bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
{
const char *relo_sec_name, *sec_name;
- size_t sec_idx = shdr->sh_info;
+ size_t sec_idx = shdr->sh_info, sym_idx;
struct bpf_program *prog;
struct reloc_desc *relos;
int err, i, nrels;
@@ -4033,6 +4050,9 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
Elf64_Sym *sym;
Elf64_Rel *rel;
+ if (sec_idx >= obj->efile.sec_cnt)
+ return -EINVAL;
+
scn = elf_sec_by_idx(obj, sec_idx);
scn_data = elf_sec_data(obj, scn);
@@ -4052,16 +4072,23 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
return -LIBBPF_ERRNO__FORMAT;
}
- sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
+ sym_idx = ELF64_R_SYM(rel->r_info);
+ sym = elf_sym_by_idx(obj, sym_idx);
if (!sym) {
- pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
- relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i);
+ pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
+ relo_sec_name, sym_idx, i);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ if (sym->st_shndx >= obj->efile.sec_cnt) {
+ pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
+ relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
return -LIBBPF_ERRNO__FORMAT;
}
if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
- relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i);
+ relo_sec_name, (size_t)rel->r_offset, i);
return -LIBBPF_ERRNO__FORMAT;
}
@@ -4265,30 +4292,20 @@ int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
static int
bpf_object__probe_loading(struct bpf_object *obj)
{
- struct bpf_load_program_attr attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- int ret;
+ int ret, insn_cnt = ARRAY_SIZE(insns);
if (obj->gen_loader)
return 0;
/* make sure basic loading works */
-
- memset(&attr, 0, sizeof(attr));
- attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- attr.insns = insns;
- attr.insns_cnt = ARRAY_SIZE(insns);
- attr.license = "GPL";
-
- ret = bpf_load_program_xattr(&attr, NULL, 0);
- if (ret < 0) {
- attr.prog_type = BPF_PROG_TYPE_TRACEPOINT;
- ret = bpf_load_program_xattr(&attr, NULL, 0);
- }
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
+ if (ret < 0)
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
if (ret < 0) {
ret = errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -4312,28 +4329,19 @@ static int probe_fd(int fd)
static int probe_kern_prog_name(void)
{
- struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- int ret;
+ int ret, insn_cnt = ARRAY_SIZE(insns);
/* make sure loading with name works */
-
- memset(&attr, 0, sizeof(attr));
- attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- attr.insns = insns;
- attr.insns_cnt = ARRAY_SIZE(insns);
- attr.license = "GPL";
- attr.name = "test";
- ret = bpf_load_program_xattr(&attr, NULL, 0);
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "test", "GPL", insns, insn_cnt, NULL);
return probe_fd(ret);
}
static int probe_kern_global_data(void)
{
- struct bpf_load_program_attr prg_attr;
struct bpf_create_map_attr map_attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
@@ -4342,7 +4350,7 @@ static int probe_kern_global_data(void)
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- int ret, map;
+ int ret, map, insn_cnt = ARRAY_SIZE(insns);
memset(&map_attr, 0, sizeof(map_attr));
map_attr.map_type = BPF_MAP_TYPE_ARRAY;
@@ -4361,13 +4369,7 @@ static int probe_kern_global_data(void)
insns[0].imm = map;
- memset(&prg_attr, 0, sizeof(prg_attr));
- prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- prg_attr.insns = insns;
- prg_attr.insns_cnt = ARRAY_SIZE(insns);
- prg_attr.license = "GPL";
-
- ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
close(map);
return probe_fd(ret);
}
@@ -4468,6 +4470,22 @@ static int probe_kern_btf_decl_tag(void)
strs, sizeof(strs)));
}
+static int probe_kern_btf_type_tag(void)
+{
+ static const char strs[] = "\0tag";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* attr */
+ BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
+ /* ptr */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
+}
+
static int probe_kern_array_mmap(void)
{
struct bpf_create_map_attr attr = {
@@ -4483,30 +4501,24 @@ static int probe_kern_array_mmap(void)
static int probe_kern_exp_attach_type(void)
{
- struct bpf_load_program_attr attr;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
+ int fd, insn_cnt = ARRAY_SIZE(insns);
- memset(&attr, 0, sizeof(attr));
/* use any valid combination of program type and (optional)
* non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
* to see if kernel supports expected_attach_type field for
* BPF_PROG_LOAD command
*/
- attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
- attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
- attr.insns = insns;
- attr.insns_cnt = ARRAY_SIZE(insns);
- attr.license = "GPL";
-
- return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(fd);
}
static int probe_kern_probe_read_kernel(void)
{
- struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
@@ -4515,26 +4527,21 @@ static int probe_kern_probe_read_kernel(void)
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(),
};
+ int fd, insn_cnt = ARRAY_SIZE(insns);
- memset(&attr, 0, sizeof(attr));
- attr.prog_type = BPF_PROG_TYPE_KPROBE;
- attr.insns = insns;
- attr.insns_cnt = ARRAY_SIZE(insns);
- attr.license = "GPL";
-
- return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
+ fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
+ return probe_fd(fd);
}
static int probe_prog_bind_map(void)
{
- struct bpf_load_program_attr prg_attr;
struct bpf_create_map_attr map_attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- int ret, map, prog;
+ int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
memset(&map_attr, 0, sizeof(map_attr));
map_attr.map_type = BPF_MAP_TYPE_ARRAY;
@@ -4551,13 +4558,7 @@ static int probe_prog_bind_map(void)
return ret;
}
- memset(&prg_attr, 0, sizeof(prg_attr));
- prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- prg_attr.insns = insns;
- prg_attr.insns_cnt = ARRAY_SIZE(insns);
- prg_attr.license = "GPL";
-
- prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
+ prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
if (prog < 0) {
close(map);
return 0;
@@ -4602,19 +4603,14 @@ static int probe_module_btf(void)
static int probe_perf_link(void)
{
- struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
int prog_fd, link_fd, err;
- memset(&attr, 0, sizeof(attr));
- attr.prog_type = BPF_PROG_TYPE_TRACEPOINT;
- attr.insns = insns;
- attr.insns_cnt = ARRAY_SIZE(insns);
- attr.license = "GPL";
- prog_fd = bpf_load_program_xattr(&attr, NULL, 0);
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
+ insns, ARRAY_SIZE(insns), NULL);
if (prog_fd < 0)
return -errno;
@@ -4687,6 +4683,9 @@ static struct kern_feature_desc {
[FEAT_BTF_DECL_TAG] = {
"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
},
+ [FEAT_BTF_TYPE_TAG] = {
+ "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
+ },
};
static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
@@ -6374,16 +6373,16 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
/* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */
static int libbpf_preload_prog(struct bpf_program *prog,
- struct bpf_prog_load_params *attr, long cookie)
+ struct bpf_prog_load_opts *opts, long cookie)
{
enum sec_def_flags def = cookie;
/* old kernels might not support specifying expected_attach_type */
if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
- attr->expected_attach_type = 0;
+ opts->expected_attach_type = 0;
if (def & SEC_SLEEPABLE)
- attr->prog_flags |= BPF_F_SLEEPABLE;
+ opts->prog_flags |= BPF_F_SLEEPABLE;
if ((prog->type == BPF_PROG_TYPE_TRACING ||
prog->type == BPF_PROG_TYPE_LSM ||
@@ -6402,21 +6401,22 @@ static int libbpf_preload_prog(struct bpf_program *prog,
/* but by now libbpf common logic is not utilizing
* prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
- * this callback is called after attrs were populated by
- * libbpf, so this callback has to update attr explicitly here
+ * this callback is called after opts were populated by
+ * libbpf, so this callback has to update opts explicitly here
*/
- attr->attach_btf_obj_fd = btf_obj_fd;
- attr->attach_btf_id = btf_type_id;
+ opts->attach_btf_obj_fd = btf_obj_fd;
+ opts->attach_btf_id = btf_type_id;
}
return 0;
}
-static int
-load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
- char *license, __u32 kern_version, int *pfd)
+static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog,
+ struct bpf_insn *insns, int insns_cnt,
+ const char *license, __u32 kern_version,
+ int *prog_fd)
{
- struct bpf_prog_load_params load_attr = {};
- struct bpf_object *obj = prog->obj;
+ LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
+ const char *prog_name = NULL;
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
char *log_buf = NULL;
@@ -6435,13 +6435,9 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
if (!insns || !insns_cnt)
return -EINVAL;
- load_attr.prog_type = prog->type;
load_attr.expected_attach_type = prog->expected_attach_type;
if (kernel_supports(obj, FEAT_PROG_NAME))
- load_attr.name = prog->name;
- load_attr.insns = insns;
- load_attr.insn_cnt = insns_cnt;
- load_attr.license = license;
+ prog_name = prog->name;
load_attr.attach_btf_id = prog->attach_btf_id;
load_attr.attach_prog_fd = prog->attach_prog_fd;
load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
@@ -6475,9 +6471,10 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
}
if (obj->gen_loader) {
- bpf_gen__prog_load(obj->gen_loader, &load_attr,
+ bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
+ license, insns, insns_cnt, &load_attr,
prog - obj->programs);
- *pfd = -1;
+ *prog_fd = -1;
return 0;
}
retry_load:
@@ -6490,8 +6487,8 @@ retry_load:
}
load_attr.log_buf = log_buf;
- load_attr.log_buf_sz = log_buf_size;
- ret = libbpf__bpf_prog_load(&load_attr);
+ load_attr.log_size = log_buf_size;
+ ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
if (ret >= 0) {
if (log_buf && load_attr.log_level)
@@ -6515,7 +6512,7 @@ retry_load:
}
}
- *pfd = ret;
+ *prog_fd = ret;
ret = 0;
goto out;
}
@@ -6537,19 +6534,19 @@ retry_load:
pr_warn("-- BEGIN DUMP LOG ---\n");
pr_warn("\n%s\n", log_buf);
pr_warn("-- END LOG --\n");
- } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
- pr_warn("Program too large (%zu insns), at most %d insns\n",
- load_attr.insn_cnt, BPF_MAXINSNS);
+ } else if (insns_cnt >= BPF_MAXINSNS) {
+ pr_warn("Program too large (%d insns), at most %d insns\n",
+ insns_cnt, BPF_MAXINSNS);
ret = -LIBBPF_ERRNO__PROG2BIG;
- } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
+ } else if (prog->type != BPF_PROG_TYPE_KPROBE) {
/* Wrong program type? */
int fd;
- load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
load_attr.expected_attach_type = 0;
load_attr.log_buf = NULL;
- load_attr.log_buf_sz = 0;
- fd = libbpf__bpf_prog_load(&load_attr);
+ load_attr.log_size = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, prog_name, license,
+ insns, insns_cnt, &load_attr);
if (fd >= 0) {
close(fd);
ret = -LIBBPF_ERRNO__PROGTYPE;
@@ -6591,11 +6588,12 @@ static int bpf_program__record_externs(struct bpf_program *prog)
return 0;
}
-int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
+static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
+ const char *license, __u32 kern_ver)
{
int err = 0, fd, i;
- if (prog->obj->loaded) {
+ if (obj->loaded) {
pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
return libbpf_err(-EINVAL);
}
@@ -6621,10 +6619,11 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
prog->name, prog->instances.nr);
}
- if (prog->obj->gen_loader)
+ if (obj->gen_loader)
bpf_program__record_externs(prog);
- err = load_program(prog, prog->insns, prog->insns_cnt,
- license, kern_ver, &fd);
+ err = bpf_object_load_prog_instance(obj, prog,
+ prog->insns, prog->insns_cnt,
+ license, kern_ver, &fd);
if (!err)
prog->instances.fds[0] = fd;
goto out;
@@ -6652,8 +6651,9 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
continue;
}
- err = load_program(prog, result.new_insn_ptr,
- result.new_insn_cnt, license, kern_ver, &fd);
+ err = bpf_object_load_prog_instance(obj, prog,
+ result.new_insn_ptr, result.new_insn_cnt,
+ license, kern_ver, &fd);
if (err) {
pr_warn("Loading the %dth instance of program '%s' failed\n",
i, prog->name);
@@ -6670,6 +6670,11 @@ out:
return libbpf_err(err);
}
+int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_ver)
+{
+ return bpf_object_load_prog(prog->obj, prog, license, kern_ver);
+}
+
static int
bpf_object__load_progs(struct bpf_object *obj, int log_level)
{
@@ -6693,7 +6698,7 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
continue;
}
prog->log_level |= log_level;
- err = bpf_program__load(prog, obj->license, obj->kern_version);
+ err = bpf_object_load_prog(obj, prog, obj->license, obj->kern_version);
if (err)
return err;
}
@@ -7733,7 +7738,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
return 0;
err_unpin_maps:
- while ((map = bpf_map__prev(map, obj))) {
+ while ((map = bpf_object__prev_map(obj, map))) {
if (!map->pin_path)
continue;
@@ -7813,7 +7818,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
return 0;
err_unpin_programs:
- while ((prog = bpf_program__prev(prog, obj))) {
+ while ((prog = bpf_object__prev_program(obj, prog))) {
char buf[PATH_MAX];
int len;
@@ -8154,9 +8159,11 @@ int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
return 0;
}
+static int bpf_program_nth_fd(const struct bpf_program *prog, int n);
+
int bpf_program__fd(const struct bpf_program *prog)
{
- return bpf_program__nth_fd(prog, 0);
+ return bpf_program_nth_fd(prog, 0);
}
size_t bpf_program__size(const struct bpf_program *prog)
@@ -8202,7 +8209,10 @@ int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
return 0;
}
-int bpf_program__nth_fd(const struct bpf_program *prog, int n)
+__attribute__((alias("bpf_program_nth_fd")))
+int bpf_program__nth_fd(const struct bpf_program *prog, int n);
+
+static int bpf_program_nth_fd(const struct bpf_program *prog, int n)
{
int fd;
@@ -8281,6 +8291,20 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
prog->expected_attach_type = type;
}
+__u32 bpf_program__flags(const struct bpf_program *prog)
+{
+ return prog->prog_flags;
+}
+
+int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags)
+{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
+
+ prog->prog_flags |= extra_flags;
+ return 0;
+}
+
#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
.sec = sec_pfx, \
.prog_type = BPF_PROG_TYPE_##ptype, \
@@ -9028,7 +9052,10 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
pr_warn("error: inner_map_fd already specified\n");
return libbpf_err(-EINVAL);
}
- zfree(&map->inner_map);
+ if (map->inner_map) {
+ bpf_map__destroy(map->inner_map);
+ zfree(&map->inner_map);
+ }
map->inner_map_fd = fd;
return 0;
}
@@ -9145,21 +9172,12 @@ long libbpf_get_error(const void *ptr)
return -errno;
}
-int bpf_prog_load(const char *file, enum bpf_prog_type type,
- struct bpf_object **pobj, int *prog_fd)
-{
- struct bpf_prog_load_attr attr;
-
- memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
- attr.file = file;
- attr.prog_type = type;
- attr.expected_attach_type = 0;
-
- return bpf_prog_load_xattr(&attr, pobj, prog_fd);
-}
-
+__attribute__((alias("bpf_prog_load_xattr2")))
int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
- struct bpf_object **pobj, int *prog_fd)
+ struct bpf_object **pobj, int *prog_fd);
+
+static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
+ struct bpf_object **pobj, int *prog_fd)
{
struct bpf_object_open_attr open_attr = {};
struct bpf_program *prog, *first_prog = NULL;
@@ -9230,6 +9248,20 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
return 0;
}
+COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1)
+int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
+ struct bpf_object **pobj, int *prog_fd)
+{
+ struct bpf_prog_load_attr attr;
+
+ memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
+ attr.file = file;
+ attr.prog_type = type;
+ attr.expected_attach_type = 0;
+
+ return bpf_prog_load_xattr2(&attr, pobj, prog_fd);
+}
+
struct bpf_link {
int (*detach)(struct bpf_link *link);
void (*dealloc)(struct bpf_link *link);
@@ -10575,11 +10607,18 @@ error:
static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params *p);
-struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
- const struct perf_buffer_opts *opts)
+DEFAULT_VERSION(perf_buffer__new_v0_6_0, perf_buffer__new, LIBBPF_0.6.0)
+struct perf_buffer *perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
+ perf_buffer_sample_fn sample_cb,
+ perf_buffer_lost_fn lost_cb,
+ void *ctx,
+ const struct perf_buffer_opts *opts)
{
struct perf_buffer_params p = {};
- struct perf_event_attr attr = { 0, };
+ struct perf_event_attr attr = {};
+
+ if (!OPTS_VALID(opts, perf_buffer_opts))
+ return libbpf_err_ptr(-EINVAL);
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
@@ -10588,29 +10627,62 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
attr.wakeup_events = 1;
p.attr = &attr;
- p.sample_cb = opts ? opts->sample_cb : NULL;
- p.lost_cb = opts ? opts->lost_cb : NULL;
- p.ctx = opts ? opts->ctx : NULL;
+ p.sample_cb = sample_cb;
+ p.lost_cb = lost_cb;
+ p.ctx = ctx;
return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
-struct perf_buffer *
-perf_buffer__new_raw(int map_fd, size_t page_cnt,
- const struct perf_buffer_raw_opts *opts)
+COMPAT_VERSION(perf_buffer__new_deprecated, perf_buffer__new, LIBBPF_0.0.4)
+struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
+ const struct perf_buffer_opts *opts)
+{
+ return perf_buffer__new_v0_6_0(map_fd, page_cnt,
+ opts ? opts->sample_cb : NULL,
+ opts ? opts->lost_cb : NULL,
+ opts ? opts->ctx : NULL,
+ NULL);
+}
+
+DEFAULT_VERSION(perf_buffer__new_raw_v0_6_0, perf_buffer__new_raw, LIBBPF_0.6.0)
+struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt,
+ struct perf_event_attr *attr,
+ perf_buffer_event_fn event_cb, void *ctx,
+ const struct perf_buffer_raw_opts *opts)
{
struct perf_buffer_params p = {};
- p.attr = opts->attr;
- p.event_cb = opts->event_cb;
- p.ctx = opts->ctx;
- p.cpu_cnt = opts->cpu_cnt;
- p.cpus = opts->cpus;
- p.map_keys = opts->map_keys;
+ if (page_cnt == 0 || !attr)
+ return libbpf_err_ptr(-EINVAL);
+
+ if (!OPTS_VALID(opts, perf_buffer_raw_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ p.attr = attr;
+ p.event_cb = event_cb;
+ p.ctx = ctx;
+ p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
+ p.cpus = OPTS_GET(opts, cpus, NULL);
+ p.map_keys = OPTS_GET(opts, map_keys, NULL);
return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
+COMPAT_VERSION(perf_buffer__new_raw_deprecated, perf_buffer__new_raw, LIBBPF_0.0.4)
+struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
+ const struct perf_buffer_raw_opts *opts)
+{
+ LIBBPF_OPTS(perf_buffer_raw_opts, inner_opts,
+ .cpu_cnt = opts->cpu_cnt,
+ .cpus = opts->cpus,
+ .map_keys = opts->map_keys,
+ );
+
+ return perf_buffer__new_raw_v0_6_0(map_fd, page_cnt, opts->attr,
+ opts->event_cb, opts->ctx, &inner_opts);
+}
+
static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params *p)
{
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 9de0f299706b..4ec69f224342 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -262,8 +262,8 @@ LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *p
*/
LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
-LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
- __u32 kern_version);
+LIBBPF_DEPRECATED_SINCE(0, 6, "use bpf_object__load() instead")
+LIBBPF_API int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_version);
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
@@ -431,7 +431,6 @@ bpf_program__attach_iter(const struct bpf_program *prog,
* one instance. In this case bpf_program__fd(prog) is equal to
* bpf_program__nth_fd(prog, 0).
*/
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
struct bpf_prog_prep_result {
/*
* If not NULL, load new instruction array.
@@ -494,6 +493,9 @@ LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
+LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags);
+
LIBBPF_API int
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
const char *attach_func_name);
@@ -676,8 +678,9 @@ struct bpf_prog_load_attr {
LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_object **pobj, int *prog_fd);
-LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
- struct bpf_object **pobj, int *prog_fd);
+LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead")
+LIBBPF_API int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
+ struct bpf_object **pobj, int *prog_fd);
/* XDP related API */
struct xdp_link_info {
@@ -775,18 +778,52 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
/* common use perf buffer options */
struct perf_buffer_opts {
- /* if specified, sample_cb is called for each sample */
- perf_buffer_sample_fn sample_cb;
- /* if specified, lost_cb is called for each batch of lost samples */
- perf_buffer_lost_fn lost_cb;
- /* ctx is provided to sample_cb and lost_cb */
- void *ctx;
+ union {
+ size_t sz;
+ struct { /* DEPRECATED: will be removed in v1.0 */
+ /* if specified, sample_cb is called for each sample */
+ perf_buffer_sample_fn sample_cb;
+ /* if specified, lost_cb is called for each batch of lost samples */
+ perf_buffer_lost_fn lost_cb;
+ /* ctx is provided to sample_cb and lost_cb */
+ void *ctx;
+ };
+ };
};
+#define perf_buffer_opts__last_field sz
+/**
+ * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified
+ * BPF_PERF_EVENT_ARRAY map
+ * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF
+ * code to send data over to user-space
+ * @param page_cnt number of memory pages allocated for each per-CPU buffer
+ * @param sample_cb function called on each received data record
+ * @param lost_cb function called when record loss has occurred
+ * @param ctx user-provided extra context passed into *sample_cb* and *lost_cb*
+ * @return a new instance of struct perf_buffer on success, NULL on error with
+ * *errno* containing an error code
+ */
LIBBPF_API struct perf_buffer *
perf_buffer__new(int map_fd, size_t page_cnt,
+ perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
const struct perf_buffer_opts *opts);
+LIBBPF_API struct perf_buffer *
+perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
+ perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
+ const struct perf_buffer_opts *opts);
+
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new() instead")
+struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
+ const struct perf_buffer_opts *opts);
+
+#define perf_buffer__new(...) ___libbpf_overload(___perf_buffer_new, __VA_ARGS__)
+#define ___perf_buffer_new6(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) \
+ perf_buffer__new(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts)
+#define ___perf_buffer_new3(map_fd, page_cnt, opts) \
+ perf_buffer__new_deprecated(map_fd, page_cnt, opts)
+
enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_DONE = 0,
LIBBPF_PERF_EVENT_ERROR = -1,
@@ -800,12 +837,21 @@ typedef enum bpf_perf_event_ret
/* raw perf buffer options, giving most power and control */
struct perf_buffer_raw_opts {
- /* perf event attrs passed directly into perf_event_open() */
- struct perf_event_attr *attr;
- /* raw event callback */
- perf_buffer_event_fn event_cb;
- /* ctx is provided to event_cb */
- void *ctx;
+ union {
+ struct {
+ size_t sz;
+ long :0;
+ long :0;
+ };
+ struct { /* DEPRECATED: will be removed in v1.0 */
+ /* perf event attrs passed directly into perf_event_open() */
+ struct perf_event_attr *attr;
+ /* raw event callback */
+ perf_buffer_event_fn event_cb;
+ /* ctx is provided to event_cb */
+ void *ctx;
+ };
+ };
/* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
* max_entries of given PERF_EVENT_ARRAY map)
*/
@@ -815,11 +861,28 @@ struct perf_buffer_raw_opts {
/* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */
int *map_keys;
};
+#define perf_buffer_raw_opts__last_field map_keys
LIBBPF_API struct perf_buffer *
-perf_buffer__new_raw(int map_fd, size_t page_cnt,
+perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
+ perf_buffer_event_fn event_cb, void *ctx,
const struct perf_buffer_raw_opts *opts);
+LIBBPF_API struct perf_buffer *
+perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
+ perf_buffer_event_fn event_cb, void *ctx,
+ const struct perf_buffer_raw_opts *opts);
+
+LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new_raw() instead")
+struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
+ const struct perf_buffer_raw_opts *opts);
+
+#define perf_buffer__new_raw(...) ___libbpf_overload(___perf_buffer_new_raw, __VA_ARGS__)
+#define ___perf_buffer_new_raw6(map_fd, page_cnt, attr, event_cb, ctx, opts) \
+ perf_buffer__new_raw(map_fd, page_cnt, attr, event_cb, ctx, opts)
+#define ___perf_buffer_new_raw3(map_fd, page_cnt, opts) \
+ perf_buffer__new_raw_deprecated(map_fd, page_cnt, opts)
+
LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
@@ -918,12 +981,15 @@ struct bpf_prog_info_linear {
__u8 data[];
};
+LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
LIBBPF_API struct bpf_prog_info_linear *
bpf_program__get_prog_info_linear(int fd, __u64 arrays);
+LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
LIBBPF_API void
bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
+LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
LIBBPF_API void
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 43580eb47740..6a59514a48cf 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -395,10 +395,23 @@ LIBBPF_0.6.0 {
bpf_object__next_program;
bpf_object__prev_map;
bpf_object__prev_program;
+ bpf_prog_load_deprecated;
+ bpf_prog_load;
+ bpf_program__flags;
bpf_program__insn_cnt;
bpf_program__insns;
+ bpf_program__set_extra_flags;
btf__add_btf;
btf__add_decl_tag;
+ btf__add_type_tag;
+ btf__dedup;
+ btf__dedup_deprecated;
btf__raw_data;
btf__type_cnt;
+ btf_dump__new;
+ btf_dump__new_deprecated;
+ perf_buffer__new;
+ perf_buffer__new_deprecated;
+ perf_buffer__new_raw;
+ perf_buffer__new_raw_deprecated;
} LIBBPF_0.5.0;
diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h
index aaa1efbf6f51..b21cefc9c3b6 100644
--- a/tools/lib/bpf/libbpf_common.h
+++ b/tools/lib/bpf/libbpf_common.h
@@ -41,6 +41,18 @@
#define __LIBBPF_MARK_DEPRECATED_0_7(X)
#endif
+/* This set of internal macros allows to do "function overloading" based on
+ * number of arguments provided by used in backwards-compatible way during the
+ * transition to libbpf 1.0
+ * It's ugly but necessary evil that will be cleaned up when we get to 1.0.
+ * See bpf_prog_load() overload for example.
+ */
+#define ___libbpf_cat(A, B) A ## B
+#define ___libbpf_select(NAME, NUM) ___libbpf_cat(NAME, NUM)
+#define ___libbpf_nth(_1, _2, _3, _4, _5, _6, N, ...) N
+#define ___libbpf_cnt(...) ___libbpf_nth(__VA_ARGS__, 6, 5, 4, 3, 2, 1)
+#define ___libbpf_overload(NAME, ...) ___libbpf_select(NAME, ___libbpf_cnt(__VA_ARGS__))(__VA_ARGS__)
+
/* Helper macro to declare and initialize libbpf options struct
*
* This dance with uninitialized declaration, followed by memset to zero,
@@ -54,7 +66,7 @@
* including any extra padding, it with memset() and then assigns initial
* values provided by users in struct initializer-syntax as varargs.
*/
-#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \
+#define LIBBPF_OPTS(TYPE, NAME, ...) \
struct TYPE NAME = ({ \
memset(&NAME, 0, sizeof(struct TYPE)); \
(struct TYPE) { \
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index aeb79e3a8ff9..f7ac349650a1 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -73,6 +73,8 @@
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx)
+#define BTF_TYPE_TYPE_TAG_ENC(value, type) \
+ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type)
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
@@ -276,37 +278,6 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
-struct bpf_prog_load_params {
- enum bpf_prog_type prog_type;
- enum bpf_attach_type expected_attach_type;
- const char *name;
- const struct bpf_insn *insns;
- size_t insn_cnt;
- const char *license;
- __u32 kern_version;
- __u32 attach_prog_fd;
- __u32 attach_btf_obj_fd;
- __u32 attach_btf_id;
- __u32 prog_ifindex;
- __u32 prog_btf_fd;
- __u32 prog_flags;
-
- __u32 func_info_rec_size;
- const void *func_info;
- __u32 func_info_cnt;
-
- __u32 line_info_rec_size;
- const void *line_info;
- __u32 line_info_cnt;
-
- __u32 log_level;
- char *log_buf;
- size_t log_buf_sz;
- int *fd_array;
-};
-
-int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
-
struct bpf_create_map_params {
const char *name;
enum bpf_map_type map_type;
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
index 5ba5c9beccfa..bb03c568af7b 100644
--- a/tools/lib/bpf/libbpf_legacy.h
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -69,6 +69,7 @@ enum libbpf_strict_mode {
LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
+#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS
#ifdef __cplusplus
} /* extern "C" */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 68f2dbf364aa..02c401e314c7 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -68,21 +68,21 @@ static void
probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
{
- struct bpf_load_program_attr xattr = {};
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
int fd;
switch (prog_type) {
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
- xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
+ opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
break;
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
- xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
+ opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
break;
case BPF_PROG_TYPE_SK_LOOKUP:
- xattr.expected_attach_type = BPF_SK_LOOKUP;
+ opts.expected_attach_type = BPF_SK_LOOKUP;
break;
case BPF_PROG_TYPE_KPROBE:
- xattr.kern_version = get_kernel_version();
+ opts.kern_version = get_kernel_version();
break;
case BPF_PROG_TYPE_UNSPEC:
case BPF_PROG_TYPE_SOCKET_FILTER:
@@ -115,13 +115,11 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
break;
}
- xattr.prog_type = prog_type;
- xattr.insns = insns;
- xattr.insns_cnt = insns_cnt;
- xattr.license = "GPL";
- xattr.prog_ifindex = ifindex;
+ opts.prog_ifindex = ifindex;
+ opts.log_buf = buf;
+ opts.log_size = buf_len;
- fd = bpf_load_program_xattr(&xattr, buf, buf_len);
+ fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, NULL);
if (fd >= 0)
close(fd);
}
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index f677dccdeae4..594b206fa674 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -2650,6 +2650,7 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name,
static int finalize_btf(struct bpf_linker *linker)
{
+ LIBBPF_OPTS(btf_dedup_opts, opts);
struct btf *btf = linker->btf;
const void *raw_data;
int i, j, id, err;
@@ -2686,7 +2687,8 @@ static int finalize_btf(struct bpf_linker *linker)
return err;
}
- err = btf__dedup(linker->btf, linker->btf_ext, NULL);
+ opts.btf_ext = linker->btf_ext;
+ err = btf__dedup(linker->btf, &opts);
if (err) {
pr_warn("BTF dedup failed: %d\n", err);
return err;
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 81f8fbc85e70..fdb22f5405c9 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -364,7 +364,6 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
static enum xsk_prog get_xsk_prog(void)
{
enum xsk_prog detected = XSK_PROG_FALLBACK;
- struct bpf_load_program_attr prog_attr;
struct bpf_create_map_attr map_attr;
__u32 size_out, retval, duration;
char data_in = 0, data_out;
@@ -375,7 +374,7 @@ static enum xsk_prog get_xsk_prog(void)
BPF_EMIT_CALL(BPF_FUNC_redirect_map),
BPF_EXIT_INSN(),
};
- int prog_fd, map_fd, ret;
+ int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns);
memset(&map_attr, 0, sizeof(map_attr));
map_attr.map_type = BPF_MAP_TYPE_XSKMAP;
@@ -389,13 +388,7 @@ static enum xsk_prog get_xsk_prog(void)
insns[0].imm = map_fd;
- memset(&prog_attr, 0, sizeof(prog_attr));
- prog_attr.prog_type = BPF_PROG_TYPE_XDP;
- prog_attr.insns = insns;
- prog_attr.insns_cnt = ARRAY_SIZE(insns);
- prog_attr.license = "GPL";
-
- prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
if (prog_fd < 0) {
close(map_fd);
return detected;
@@ -495,10 +488,13 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
};
struct bpf_insn *progs[] = {prog, prog_redirect_flags};
enum xsk_prog option = get_xsk_prog();
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = log_buf,
+ .log_size = log_buf_size,
+ );
- prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, progs[option], insns_cnt[option],
- "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
- log_buf_size);
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "LGPL-2.1 or BSD-2-Clause",
+ progs[option], insns_cnt[option], &opts);
if (prog_fd < 0) {
pr_warn("BPF log buffer:\n%s", log_buf);
return prog_fd;
@@ -725,14 +721,12 @@ static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd)
static bool xsk_probe_bpf_link(void)
{
- DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
- .flags = XDP_FLAGS_SKB_MODE);
- struct bpf_load_program_attr prog_attr;
+ LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = XDP_FLAGS_SKB_MODE);
struct bpf_insn insns[2] = {
BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
BPF_EXIT_INSN()
};
- int prog_fd, link_fd = -1;
+ int prog_fd, link_fd = -1, insn_cnt = ARRAY_SIZE(insns);
int ifindex_lo = 1;
bool ret = false;
int err;
@@ -744,13 +738,7 @@ static bool xsk_probe_bpf_link(void)
if (link_fd >= 0)
return true;
- memset(&prog_attr, 0, sizeof(prog_attr));
- prog_attr.prog_type = BPF_PROG_TYPE_XDP;
- prog_attr.insns = insns;
- prog_attr.insns_cnt = ARRAY_SIZE(insns);
- prog_attr.license = "GPL";
-
- prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
if (prog_fd < 0)
return ret;
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 62fafbeb4672..5d42db2e129a 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -23,9 +23,8 @@ BPF_GCC ?= $(shell command -v bpf-gcc;)
SAN_CFLAGS ?=
CFLAGS += -g -O0 -rdynamic -Wall $(GENFLAGS) $(SAN_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
- -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) \
- -Dbpf_prog_load=bpf_prog_test_load \
- -Dbpf_load_program=bpf_test_load_program
+ -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT)
+LDFLAGS += $(SAN_CFLAGS)
LDLIBS += -lcap -lelf -lz -lrt -lpthread
# Silence some warnings when compiled with clang
@@ -46,10 +45,8 @@ ifneq ($(BPF_GCC),)
TEST_GEN_PROGS += test_progs-bpf_gcc
endif
-TEST_GEN_FILES = test_lwt_ip_encap.o \
- test_tc_edt.o
-TEST_FILES = xsk_prereqs.sh \
- $(wildcard progs/btf_dump_test_case_*.c)
+TEST_GEN_FILES = test_lwt_ip_encap.o test_tc_edt.o
+TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c)
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -108,7 +105,10 @@ endif
OVERRIDE_TARGETS := 1
override define CLEAN
$(call msg,CLEAN)
- $(Q)$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
+ $(Q)$(RM) -r $(TEST_GEN_PROGS)
+ $(Q)$(RM) -r $(TEST_GEN_PROGS_EXTENDED)
+ $(Q)$(RM) -r $(TEST_GEN_FILES)
+ $(Q)$(RM) -r $(EXTRA_CLEAN)
$(Q)$(MAKE) -C bpf_testmod clean
$(Q)$(MAKE) docs-clean
endef
@@ -170,7 +170,7 @@ $(OUTPUT)/%:%.c
$(OUTPUT)/urandom_read: urandom_read.c
$(call msg,BINARY,,$@)
- $(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id=sha1
+ $(Q)$(CC) $(LDFLAGS) $< $(LDLIBS) -Wl,--build-id=sha1 -o $@
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
$(call msg,MOD,,$@)
@@ -178,10 +178,6 @@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_tes
$(Q)$(MAKE) $(submake_extras) -C bpf_testmod
$(Q)cp bpf_testmod/bpf_testmod.ko $@
-$(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
- $(call msg,CC,,$@)
- $(Q)$(CC) -c $(CFLAGS) -o $@ $<
-
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
@@ -194,18 +190,24 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL)
-$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ)
-
-$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
-$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c
-$(OUTPUT)/test_sock: cgroup_helpers.c
-$(OUTPUT)/test_sock_addr: cgroup_helpers.c
-$(OUTPUT)/test_sockmap: cgroup_helpers.c
-$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c
-$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
-$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
-$(OUTPUT)/test_sock_fields: cgroup_helpers.c
-$(OUTPUT)/test_sysctl: cgroup_helpers.c
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
+
+$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c testing_helpers.o
+$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c testing_helpers.o
+$(OUTPUT)/test_sock: cgroup_helpers.c testing_helpers.o
+$(OUTPUT)/test_sock_addr: cgroup_helpers.c testing_helpers.o
+$(OUTPUT)/test_sockmap: cgroup_helpers.c testing_helpers.o
+$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c testing_helpers.o
+$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c testing_helpers.o
+$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c testing_helpers.o
+$(OUTPUT)/test_sock_fields: cgroup_helpers.c testing_helpers.o
+$(OUTPUT)/test_sysctl: cgroup_helpers.c testing_helpers.o
+$(OUTPUT)/test_tag: testing_helpers.o
+$(OUTPUT)/test_lirc_mode2_user: testing_helpers.o
+$(OUTPUT)/xdping: testing_helpers.o
+$(OUTPUT)/flow_dissector_load: testing_helpers.o
+$(OUTPUT)/test_maps: testing_helpers.o
+$(OUTPUT)/test_verifier: testing_helpers.o
BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
@@ -231,16 +233,16 @@ docs-clean:
prefix= OUTPUT=$(OUTPUT)/ DESTDIR=$(OUTPUT)/ $@
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
- ../../../include/uapi/linux/bpf.h \
+ $(APIDIR)/linux/bpf.h \
| $(BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
EXTRA_CFLAGS='-g -O0' \
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
ifneq ($(BPFOBJ),$(HOST_BPFOBJ))
-$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
- ../../../include/uapi/linux/bpf.h \
- | $(HOST_BUILD_DIR)/libbpf
+$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
+ $(APIDIR)/linux/bpf.h \
+ | $(HOST_BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
EXTRA_CFLAGS='-g -O0' \
OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \
@@ -304,12 +306,12 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
# $3 - CFLAGS
define CLANG_BPF_BUILD_RULE
$(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
- $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -o $2 -mcpu=v3
+ $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -mcpu=v3 -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
define CLANG_NOALU32_BPF_BUILD_RULE
$(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
- $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -o $2 -mcpu=v2
+ $(Q)$(CLANG) $3 -O2 -target bpf -c $1 -mcpu=v2 -o $2
endef
# Build BPF object using GCC
define GCC_BPF_BUILD_RULE
@@ -471,13 +473,12 @@ TRUNNER_TESTS_DIR := prog_tests
TRUNNER_BPF_PROGS_DIR := progs
TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
network_helpers.c testing_helpers.c \
- btf_helpers.c flow_dissector_load.h
+ btf_helpers.c flow_dissector_load.h
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
ima_setup.sh \
$(wildcard progs/btf_dump_test_case_*.c)
TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
-TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS)
-TRUNNER_BPF_CFLAGS += -DENABLE_ATOMICS_TESTS
+TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) -DENABLE_ATOMICS_TESTS
$(eval $(call DEFINE_TEST_RUNNER,test_progs))
# Define test_progs-no_alu32 test runner.
@@ -539,7 +540,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
$(OUTPUT)/bench_ringbufs.o \
$(OUTPUT)/bench_bloom_filter_map.o
$(call msg,BINARY,,$@)
- $(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
+ $(Q)$(CC) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index 5e287e445f75..42ef250c7acc 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -204,16 +204,17 @@ __ https://reviews.llvm.org/D93563
btf_tag test and Clang version
==============================
-The btf_tag selftest require LLVM support to recognize the btf_decl_tag attribute.
-It was introduced in `Clang 14`__.
+The btf_tag selftest requires LLVM support to recognize the btf_decl_tag and
+btf_type_tag attributes. They are introduced in `Clang 14` [0_, 1_].
-Without it, the btf_tag selftest will be skipped and you will observe:
+Without them, the btf_tag selftest will be skipped and you will observe:
.. code-block:: console
#<test_num> btf_tag:SKIP
-__ https://reviews.llvm.org/D111588
+.. _0: https://reviews.llvm.org/D111588
+.. _1: https://reviews.llvm.org/D111199
Clang dependencies for static linking tests
===========================================
diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
index 6eeeed2913e6..5bcb8a8cdeb2 100644
--- a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
+++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c
@@ -63,29 +63,34 @@ static const struct argp_option opts[] = {
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
+ long ret;
+
switch (key) {
case ARG_NR_ENTRIES:
- args.nr_entries = strtol(arg, NULL, 10);
- if (args.nr_entries == 0) {
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
fprintf(stderr, "Invalid nr_entries count.");
argp_usage(state);
}
+ args.nr_entries = ret;
break;
case ARG_NR_HASH_FUNCS:
- args.nr_hash_funcs = strtol(arg, NULL, 10);
- if (args.nr_hash_funcs == 0 || args.nr_hash_funcs > 15) {
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > 15) {
fprintf(stderr,
"The bloom filter must use 1 to 15 hash functions.");
argp_usage(state);
}
+ args.nr_hash_funcs = ret;
break;
case ARG_VALUE_SIZE:
- args.value_size = strtol(arg, NULL, 10);
- if (args.value_size < 2 || args.value_size > 256) {
+ ret = strtol(arg, NULL, 10);
+ if (ret < 2 || ret > 256) {
fprintf(stderr,
"Invalid value size. Must be between 2 and 256 bytes");
argp_usage(state);
}
+ args.value_size = ret;
break;
default:
return ARGP_ERR_UNKNOWN;
diff --git a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
index d167bffac679..52d4a2f91dbd 100644
--- a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
+++ b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c
@@ -394,11 +394,6 @@ static void perfbuf_libbpf_setup()
{
struct perfbuf_libbpf_ctx *ctx = &perfbuf_libbpf_ctx;
struct perf_event_attr attr;
- struct perf_buffer_raw_opts pb_opts = {
- .event_cb = perfbuf_process_sample_raw,
- .ctx = (void *)(long)0,
- .attr = &attr,
- };
struct bpf_link *link;
ctx->skel = perfbuf_setup_skeleton();
@@ -423,7 +418,8 @@ static void perfbuf_libbpf_setup()
}
ctx->perfbuf = perf_buffer__new_raw(bpf_map__fd(ctx->skel->maps.perfbuf),
- args.perfbuf_sz, &pb_opts);
+ args.perfbuf_sz, &attr,
+ perfbuf_process_sample_raw, NULL, NULL);
if (!ctx->perfbuf) {
fprintf(stderr, "failed to create perfbuf\n");
exit(1);
diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c
index b5b6b013a245..b5941d514e17 100644
--- a/tools/testing/selftests/bpf/btf_helpers.c
+++ b/tools/testing/selftests/bpf/btf_helpers.c
@@ -25,11 +25,12 @@ static const char * const btf_kind_str_mapping[] = {
[BTF_KIND_DATASEC] = "DATASEC",
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
+ [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
};
static const char *btf_kind_str(__u16 kind)
{
- if (kind > BTF_KIND_DECL_TAG)
+ if (kind > BTF_KIND_TYPE_TAG)
return "UNKNOWN";
return btf_kind_str_mapping[kind];
}
@@ -109,6 +110,7 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPEDEF:
+ case BTF_KIND_TYPE_TAG:
fprintf(out, " type_id=%u", t->type);
break;
case BTF_KIND_ARRAY: {
@@ -238,7 +240,6 @@ const char *btf_type_c_dump(const struct btf *btf)
static char buf[16 * 1024];
FILE *buf_file;
struct btf_dump *d = NULL;
- struct btf_dump_opts opts = {};
int err, i;
buf_file = fmemopen(buf, sizeof(buf) - 1, "w");
@@ -247,22 +248,26 @@ const char *btf_type_c_dump(const struct btf *btf)
return NULL;
}
- opts.ctx = buf_file;
- d = btf_dump__new(btf, NULL, &opts, btf_dump_printf);
+ d = btf_dump__new(btf, btf_dump_printf, buf_file, NULL);
if (libbpf_get_error(d)) {
fprintf(stderr, "Failed to create btf_dump instance: %ld\n", libbpf_get_error(d));
- return NULL;
+ goto err_out;
}
for (i = 1; i < btf__type_cnt(btf); i++) {
err = btf_dump__dump_type(d, i);
if (err) {
fprintf(stderr, "Failed to dump type [%d]: %d\n", i, err);
- return NULL;
+ goto err_out;
}
}
+ btf_dump__free(d);
fflush(buf_file);
fclose(buf_file);
return buf;
+err_out:
+ btf_dump__free(d);
+ fclose(buf_file);
+ return NULL;
}
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
index 9d0acc2fc6cc..f40b585f4e7e 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.h
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -4,6 +4,7 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include "testing_helpers.h"
static inline int bpf_flow_load(struct bpf_object **obj,
const char *path,
@@ -18,7 +19,7 @@ static inline int bpf_flow_load(struct bpf_object **obj,
int prog_array_fd;
int ret, fd, i;
- ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
+ ret = bpf_prog_test_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
prog_fd);
if (ret)
return ret;
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c
index 99628e1a1e58..3a7b82bd9e94 100644
--- a/tools/testing/selftests/bpf/get_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/get_cgroup_id_user.c
@@ -19,6 +19,7 @@
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
+#include "testing_helpers.h"
#include "bpf_rlimit.h"
#define CHECK(condition, tag, format...) ({ \
@@ -66,8 +67,8 @@ int main(int argc, char **argv)
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
return 1;
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;
cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
index 5861446d0777..837f67c6bfda 100644
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ b/tools/testing/selftests/bpf/prog_tests/align.c
@@ -594,6 +594,12 @@ static int do_test_single(struct bpf_align_test *test)
struct bpf_insn *prog = test->insns;
int prog_type = test->prog_type;
char bpf_vlog_copy[32768];
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .prog_flags = BPF_F_STRICT_ALIGNMENT,
+ .log_buf = bpf_vlog,
+ .log_size = sizeof(bpf_vlog),
+ .log_level = 2,
+ );
const char *line_ptr;
int cur_line = -1;
int prog_len, i;
@@ -601,9 +607,8 @@ static int do_test_single(struct bpf_align_test *test)
int ret;
prog_len = probe_filter_length(prog);
- fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
- prog, prog_len, BPF_F_STRICT_ALIGNMENT,
- "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2);
+ fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
+ prog, prog_len, &opts);
if (fd_prog < 0 && test->result != REJECT) {
printf("Failed to load program.\n");
printf("%s", bpf_vlog);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 9454331aaf85..3e10abce3e5a 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -699,14 +699,13 @@ static void test_bpf_percpu_hash_map(void)
char buf[64];
void *val;
- val = malloc(8 * bpf_num_possible_cpus());
-
skel = bpf_iter_bpf_percpu_hash_map__open();
if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
"skeleton open failed\n"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
+ val = malloc(8 * bpf_num_possible_cpus());
err = bpf_iter_bpf_percpu_hash_map__load(skel);
if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
@@ -770,6 +769,7 @@ free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_hash_map__destroy(skel);
+ free(val);
}
static void test_bpf_array_map(void)
@@ -870,14 +870,13 @@ static void test_bpf_percpu_array_map(void)
void *val;
int len;
- val = malloc(8 * bpf_num_possible_cpus());
-
skel = bpf_iter_bpf_percpu_array_map__open();
if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
"skeleton open failed\n"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
+ val = malloc(8 * bpf_num_possible_cpus());
err = bpf_iter_bpf_percpu_array_map__load(skel);
if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
@@ -933,6 +932,7 @@ free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_array_map__destroy(skel);
+ free(val);
}
/* An iterator program deletes all local storage in a map. */
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
index eb8eeebe6935..0a6c5f00abd4 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
@@ -48,7 +48,7 @@ void serial_test_bpf_obj_id(void)
bzero(zeros, sizeof(zeros));
for (i = 0; i < nr_iters; i++) {
now = time(NULL);
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT,
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT,
&objs[i], &prog_fds[i]);
/* test_obj_id.o is a dumb prog. It should never fail
* to load.
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index ac596cb06e40..4aa6343dc4c8 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -3939,6 +3939,23 @@ static struct btf_raw_test raw_tests[] = {
.btf_load_err = true,
.err_str = "Invalid component_idx",
},
+{
+ .descr = "type_tag test #1",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [2] */
+ BTF_PTR_ENC(2), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
}; /* struct btf_raw_test raw_tests[] */
@@ -4046,11 +4063,9 @@ static void *btf_raw_create(const struct btf_header *hdr,
next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL;
done:
+ free(strs_idx);
if (err) {
- if (raw_btf)
- free(raw_btf);
- if (strs_idx)
- free(strs_idx);
+ free(raw_btf);
return NULL;
}
return raw_btf;
@@ -6629,7 +6644,7 @@ struct btf_dedup_test {
struct btf_dedup_opts opts;
};
-const struct btf_dedup_test dedup_tests[] = {
+static struct btf_dedup_test dedup_tests[] = {
{
.descr = "dedup: unused strings filtering",
@@ -6649,9 +6664,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0int\0long"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: strings deduplication",
@@ -6674,9 +6686,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0int\0long int"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: struct example #1",
@@ -6757,9 +6766,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: struct <-> fwd resolution w/ hash collision",
@@ -6802,8 +6808,7 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0s\0x"),
},
.opts = {
- .dont_resolve_fwds = false,
- .dedup_table_size = 1, /* force hash collisions */
+ .force_collisions = true, /* force hash collisions */
},
},
{
@@ -6849,8 +6854,7 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0s\0x"),
},
.opts = {
- .dont_resolve_fwds = false,
- .dedup_table_size = 1, /* force hash collisions */
+ .force_collisions = true, /* force hash collisions */
},
},
{
@@ -6874,15 +6878,16 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_RESTRICT_ENC(8), /* [11] restrict */
BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
- BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
BTF_END_RAW,
},
- BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"),
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"),
},
.expect = {
.raw_types = {
@@ -6903,18 +6908,16 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_RESTRICT_ENC(8), /* [11] restrict */
BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
- BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
BTF_END_RAW,
},
- BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"),
- },
- .opts = {
- .dont_resolve_fwds = false,
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"),
},
},
{
@@ -6967,9 +6970,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0int\0some other int\0float"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: enum fwd resolution",
@@ -7011,9 +7011,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: datasec and vars pass-through",
@@ -7056,8 +7053,7 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0.bss\0t"),
},
.opts = {
- .dont_resolve_fwds = false,
- .dedup_table_size = 1
+ .force_collisions = true
},
},
{
@@ -7101,9 +7097,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: func/func_param tags",
@@ -7154,9 +7147,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: struct/struct_member tags",
@@ -7202,9 +7192,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
},
- .opts = {
- .dont_resolve_fwds = false,
- },
},
{
.descr = "dedup: typedef tags",
@@ -7235,8 +7222,134 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"),
},
- .opts = {
- .dont_resolve_fwds = false,
+},
+{
+ .descr = "dedup: btf_type_tag #1",
+ .input = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [5] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 5), /* [6] */
+ BTF_PTR_ENC(6), /* [7] */
+ /* ptr -> tag1 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [8] */
+ BTF_PTR_ENC(8), /* [9] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+ .expect = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag1 -> int */
+ BTF_PTR_ENC(2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+},
+{
+ .descr = "dedup: btf_type_tag #2",
+ .input = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag2 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
+ BTF_PTR_ENC(5), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+ .expect = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag2 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
+ BTF_PTR_ENC(5), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+},
+{
+ .descr = "dedup: btf_type_tag #3",
+ .input = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag1 -> tag2 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */
+ BTF_PTR_ENC(6), /* [7] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+ .expect = {
+ .raw_types = {
+ /* ptr -> tag2 -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> tag1 -> tag2 -> int */
+ BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */
+ BTF_PTR_ENC(6), /* [7] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1\0tag2"),
+ },
+},
+{
+ .descr = "dedup: btf_type_tag #4",
+ .input = {
+ .raw_types = {
+ /* ptr -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_PTR_ENC(2), /* [3] */
+ /* ptr -> tag1 -> long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */
+ BTF_PTR_ENC(5), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1"),
+ },
+ .expect = {
+ .raw_types = {
+ /* ptr -> tag1 -> int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
+ BTF_PTR_ENC(2), /* [3] */
+ /* ptr -> tag1 -> long */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */
+ BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */
+ BTF_PTR_ENC(5), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag1"),
},
},
@@ -7257,6 +7370,7 @@ static int btf_type_size(const struct btf_type *t)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_FLOAT:
+ case BTF_KIND_TYPE_TAG:
return base_size;
case BTF_KIND_INT:
return base_size + sizeof(__u32);
@@ -7295,7 +7409,7 @@ static void dump_btf_strings(const char *strs, __u32 len)
static void do_test_dedup(unsigned int test_num)
{
- const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
+ struct btf_dedup_test *test = &dedup_tests[test_num - 1];
__u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
const struct btf_header *test_hdr, *expect_hdr;
struct btf *test_btf = NULL, *expect_btf = NULL;
@@ -7339,7 +7453,8 @@ static void do_test_dedup(unsigned int test_num)
goto done;
}
- err = btf__dedup(test_btf, NULL, &test->opts);
+ test->opts.sz = sizeof(test->opts);
+ err = btf__dedup(test_btf, &test->opts);
if (CHECK(err, "btf_dedup failed errno:%d", err)) {
err = -1;
goto done;
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
index 64554fd33547..9d3b8d7a1537 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c
@@ -92,7 +92,7 @@ struct s2 {\n\
int *f3;\n\
};\n\n", "c_dump");
- err = btf__dedup(btf2, NULL, NULL);
+ err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
@@ -186,7 +186,7 @@ static void test_split_fwd_resolve() {
"\t'f1' type_id=7 bits_offset=0\n"
"\t'f2' type_id=9 bits_offset=64");
- err = btf__dedup(btf2, NULL, NULL);
+ err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
@@ -283,7 +283,7 @@ static void test_split_struct_duped() {
"[13] STRUCT 's3' size=8 vlen=1\n"
"\t'f1' type_id=12 bits_offset=0");
- err = btf__dedup(btf2, NULL, NULL);
+ err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index aa76360d8f49..d6272013a5a3 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -13,25 +13,23 @@ static struct btf_dump_test_case {
const char *name;
const char *file;
bool known_ptr_sz;
- struct btf_dump_opts opts;
} btf_dump_test_cases[] = {
- {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
- {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
- {"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
- {"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
- {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
- {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
- {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
+ {"btf_dump: syntax", "btf_dump_test_case_syntax", true},
+ {"btf_dump: ordering", "btf_dump_test_case_ordering", false},
+ {"btf_dump: padding", "btf_dump_test_case_padding", true},
+ {"btf_dump: packing", "btf_dump_test_case_packing", true},
+ {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true},
+ {"btf_dump: multidim", "btf_dump_test_case_multidim", false},
+ {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false},
};
-static int btf_dump_all_types(const struct btf *btf,
- const struct btf_dump_opts *opts)
+static int btf_dump_all_types(const struct btf *btf, void *ctx)
{
size_t type_cnt = btf__type_cnt(btf);
struct btf_dump *d;
int err = 0, id;
- d = btf_dump__new(btf, NULL, opts, btf_dump_printf);
+ d = btf_dump__new(btf, btf_dump_printf, ctx, NULL);
err = libbpf_get_error(d);
if (err)
return err;
@@ -88,8 +86,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
goto done;
}
- t->opts.ctx = f;
- err = btf_dump_all_types(btf, &t->opts);
+ err = btf_dump_all_types(btf, f);
fclose(f);
close(fd);
if (CHECK(err, "btf_dump", "failure during C dumping: %d\n", err)) {
@@ -137,7 +134,6 @@ static void test_btf_dump_incremental(void)
{
struct btf *btf = NULL;
struct btf_dump *d = NULL;
- struct btf_dump_opts opts;
int id, err, i;
dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz);
@@ -146,8 +142,7 @@ static void test_btf_dump_incremental(void)
btf = btf__new_empty();
if (!ASSERT_OK_PTR(btf, "new_empty"))
goto err_out;
- opts.ctx = dump_buf_file;
- d = btf_dump__new(btf, NULL, &opts, btf_dump_printf);
+ d = btf_dump__new(btf, btf_dump_printf, dump_buf_file, NULL);
if (!ASSERT_OK(libbpf_get_error(d), "btf_dump__new"))
goto err_out;
@@ -814,26 +809,28 @@ static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str,
static void test_btf_dump_datasec_data(char *str)
{
- struct btf *btf = btf__parse("xdping_kern.o", NULL);
- struct btf_dump_opts opts = { .ctx = str };
+ struct btf *btf;
char license[4] = "GPL";
struct btf_dump *d;
+ btf = btf__parse("xdping_kern.o", NULL);
if (!ASSERT_OK_PTR(btf, "xdping_kern.o BTF not found"))
return;
- d = btf_dump__new(btf, NULL, &opts, btf_dump_snprintf);
+ d = btf_dump__new(btf, btf_dump_snprintf, str, NULL);
if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
- return;
+ goto out;
test_btf_datasec(btf, d, str, "license",
"SEC(\"license\") char[4] _license = (char[4])['G','P','L',];",
license, sizeof(license));
+out:
+ btf_dump__free(d);
+ btf__free(btf);
}
void test_btf_dump() {
char str[STRSIZE];
- struct btf_dump_opts opts = { .ctx = str };
struct btf_dump *d;
struct btf *btf;
int i;
@@ -853,7 +850,7 @@ void test_btf_dump() {
if (!ASSERT_OK_PTR(btf, "no kernel BTF found"))
return;
- d = btf_dump__new(btf, NULL, &opts, btf_dump_snprintf);
+ d = btf_dump__new(btf, btf_dump_snprintf, str, NULL);
if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_split.c b/tools/testing/selftests/bpf/prog_tests/btf_split.c
index b1ffe61f2aa9..eef1158676ed 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_split.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_split.c
@@ -13,7 +13,6 @@ static void btf_dump_printf(void *ctx, const char *fmt, va_list args)
}
void test_btf_split() {
- struct btf_dump_opts opts;
struct btf_dump *d = NULL;
const struct btf_type *t;
struct btf *btf1, *btf2;
@@ -68,8 +67,7 @@ void test_btf_split() {
dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz);
if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream"))
return;
- opts.ctx = dump_buf_file;
- d = btf_dump__new(btf2, NULL, &opts, btf_dump_printf);
+ d = btf_dump__new(btf2, btf_dump_printf, dump_buf_file, NULL);
if (!ASSERT_OK_PTR(d, "btf_dump__new"))
goto cleanup;
for (i = 1; i < btf__type_cnt(btf2); i++) {
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c
index 91821f42714d..88d63e23e35f 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_tag.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c
@@ -1,20 +1,50 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
-#include "tag.skel.h"
+#include "btf_decl_tag.skel.h"
-void test_btf_tag(void)
+/* struct btf_type_tag_test is referenced in btf_type_tag.skel.h */
+struct btf_type_tag_test {
+ int **p;
+};
+#include "btf_type_tag.skel.h"
+
+static void test_btf_decl_tag(void)
+{
+ struct btf_decl_tag *skel;
+
+ skel = btf_decl_tag__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "btf_decl_tag"))
+ return;
+
+ if (skel->rodata->skip_tests) {
+ printf("%s:SKIP: btf_decl_tag attribute not supported", __func__);
+ test__skip();
+ }
+
+ btf_decl_tag__destroy(skel);
+}
+
+static void test_btf_type_tag(void)
{
- struct tag *skel;
+ struct btf_type_tag *skel;
- skel = tag__open_and_load();
- if (!ASSERT_OK_PTR(skel, "btf_tag"))
+ skel = btf_type_tag__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "btf_type_tag"))
return;
if (skel->rodata->skip_tests) {
- printf("%s:SKIP: btf_tag attribute not supported", __func__);
+ printf("%s:SKIP: btf_type_tag attribute not supported", __func__);
test__skip();
}
- tag__destroy(skel);
+ btf_type_tag__destroy(skel);
+}
+
+void test_btf_tag(void)
+{
+ if (test__start_subtest("btf_decl_tag"))
+ test_btf_decl_tag();
+ if (test__start_subtest("btf_type_tag"))
+ test_btf_type_tag();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c
index b912eeb0b6b4..addf99c05896 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_write.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c
@@ -297,6 +297,16 @@ static void gen_btf(struct btf *btf)
ASSERT_EQ(btf_decl_tag(t)->component_idx, 1, "tag_component_idx");
ASSERT_STREQ(btf_type_raw_dump(btf, 19),
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1", "raw_dump");
+
+ /* TYPE_TAG */
+ id = btf__add_type_tag(btf, "tag1", 1);
+ ASSERT_EQ(id, 20, "tag_id");
+ t = btf__type_by_id(btf, 20);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_TYPE_TAG, "tag_kind");
+ ASSERT_EQ(t->type, 1, "tag_type");
+ ASSERT_STREQ(btf_type_raw_dump(btf, 20),
+ "[20] TYPE_TAG 'tag1' type_id=1", "raw_dump");
}
static void test_btf_add()
@@ -337,7 +347,8 @@ static void test_btf_add()
"[17] DATASEC 'datasec1' size=12 vlen=1\n"
"\ttype_id=1 offset=4 size=8",
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
- "[19] DECL_TAG 'tag2' type_id=14 component_idx=1");
+ "[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
+ "[20] TYPE_TAG 'tag1' type_id=1");
btf__free(btf);
}
@@ -359,7 +370,7 @@ static void test_btf_add_btf()
gen_btf(btf2);
id = btf__add_btf(btf1, btf2);
- if (!ASSERT_EQ(id, 20, "id"))
+ if (!ASSERT_EQ(id, 21, "id"))
goto cleanup;
VALIDATE_RAW_BTF(
@@ -391,35 +402,37 @@ static void test_btf_add_btf()
"\ttype_id=1 offset=4 size=8",
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
+ "[20] TYPE_TAG 'tag1' type_id=1",
/* types appended from the second BTF */
- "[20] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
- "[21] PTR '(anon)' type_id=20",
- "[22] CONST '(anon)' type_id=24",
- "[23] VOLATILE '(anon)' type_id=22",
- "[24] RESTRICT '(anon)' type_id=23",
- "[25] ARRAY '(anon)' type_id=21 index_type_id=20 nr_elems=10",
- "[26] STRUCT 's1' size=8 vlen=2\n"
- "\t'f1' type_id=20 bits_offset=0\n"
- "\t'f2' type_id=20 bits_offset=32 bitfield_size=16",
- "[27] UNION 'u1' size=8 vlen=1\n"
- "\t'f1' type_id=20 bits_offset=0 bitfield_size=16",
- "[28] ENUM 'e1' size=4 vlen=2\n"
+ "[21] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[22] PTR '(anon)' type_id=21",
+ "[23] CONST '(anon)' type_id=25",
+ "[24] VOLATILE '(anon)' type_id=23",
+ "[25] RESTRICT '(anon)' type_id=24",
+ "[26] ARRAY '(anon)' type_id=22 index_type_id=21 nr_elems=10",
+ "[27] STRUCT 's1' size=8 vlen=2\n"
+ "\t'f1' type_id=21 bits_offset=0\n"
+ "\t'f2' type_id=21 bits_offset=32 bitfield_size=16",
+ "[28] UNION 'u1' size=8 vlen=1\n"
+ "\t'f1' type_id=21 bits_offset=0 bitfield_size=16",
+ "[29] ENUM 'e1' size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2",
- "[29] FWD 'struct_fwd' fwd_kind=struct",
- "[30] FWD 'union_fwd' fwd_kind=union",
- "[31] ENUM 'enum_fwd' size=4 vlen=0",
- "[32] TYPEDEF 'typedef1' type_id=20",
- "[33] FUNC 'func1' type_id=34 linkage=global",
- "[34] FUNC_PROTO '(anon)' ret_type_id=20 vlen=2\n"
- "\t'p1' type_id=20\n"
- "\t'p2' type_id=21",
- "[35] VAR 'var1' type_id=20, linkage=global-alloc",
- "[36] DATASEC 'datasec1' size=12 vlen=1\n"
- "\ttype_id=20 offset=4 size=8",
- "[37] DECL_TAG 'tag1' type_id=35 component_idx=-1",
- "[38] DECL_TAG 'tag2' type_id=33 component_idx=1");
+ "[30] FWD 'struct_fwd' fwd_kind=struct",
+ "[31] FWD 'union_fwd' fwd_kind=union",
+ "[32] ENUM 'enum_fwd' size=4 vlen=0",
+ "[33] TYPEDEF 'typedef1' type_id=21",
+ "[34] FUNC 'func1' type_id=35 linkage=global",
+ "[35] FUNC_PROTO '(anon)' ret_type_id=21 vlen=2\n"
+ "\t'p1' type_id=21\n"
+ "\t'p2' type_id=22",
+ "[36] VAR 'var1' type_id=21, linkage=global-alloc",
+ "[37] DATASEC 'datasec1' size=12 vlen=1\n"
+ "\ttype_id=21 offset=4 size=8",
+ "[38] DECL_TAG 'tag1' type_id=36 component_idx=-1",
+ "[39] DECL_TAG 'tag2' type_id=34 component_idx=1",
+ "[40] TYPE_TAG 'tag1' type_id=21");
cleanup:
btf__free(btf1);
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
index 5de485c7370f..858916d11e2e 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
@@ -16,7 +16,7 @@ static int prog_load(void)
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
- return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
index 731bea84d8ed..de9c3e12b0ea 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
@@ -66,7 +66,7 @@ static int prog_load_cnt(int verdict, int val)
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
int ret;
- ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
index 10d3c33821a7..356547e849e2 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
@@ -18,7 +18,7 @@ static int prog_load(int verdict)
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
- return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index 55ec85ba7375..1041d0c593f6 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -433,7 +433,7 @@ static int setup_type_id_case_local(struct core_reloc_test_case *test)
static int setup_type_id_case_success(struct core_reloc_test_case *test) {
struct core_reloc_type_id_output *exp = (void *)test->output;
- struct btf *targ_btf = btf__parse(test->btf_src_file, NULL);
+ struct btf *targ_btf;
int err;
err = setup_type_id_case_local(test);
diff --git a/tools/testing/selftests/bpf/prog_tests/exhandler.c b/tools/testing/selftests/bpf/prog_tests/exhandler.c
new file mode 100644
index 000000000000..118bb182ee20
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/exhandler.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+
+/* Test that verifies exception handling is working. fork()
+ * triggers task_newtask tracepoint; that new task will have a
+ * NULL pointer task_works, and the associated task->task_works->func
+ * should not be NULL if task_works itself is non-NULL.
+ *
+ * So to verify exception handling we want to see a NULL task_works
+ * and task_works->func; if we see this we can conclude that the
+ * exception handler ran when we attempted to dereference task->task_works
+ * and zeroed the destination register.
+ */
+#include "exhandler_kern.skel.h"
+
+void test_exhandler(void)
+{
+ int err = 0, duration = 0, status;
+ struct exhandler_kern *skel;
+ pid_t cpid;
+
+ skel = exhandler_kern__open_and_load();
+ if (CHECK(!skel, "skel_load", "skeleton failed: %d\n", err))
+ goto cleanup;
+
+ skel->bss->test_pid = getpid();
+
+ err = exhandler_kern__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto cleanup;
+ cpid = fork();
+ if (!ASSERT_GT(cpid, -1, "fork failed"))
+ goto cleanup;
+ if (cpid == 0)
+ _exit(0);
+ waitpid(cpid, &status, 0);
+
+ ASSERT_NEQ(skel->bss->exception_triggered, 0, "verify exceptions occurred");
+cleanup:
+ exhandler_kern__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index 9cff14a23bb7..fdd603ebda28 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -65,7 +65,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
int err, tgt_fd, i;
struct btf *btf;
- err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
+ err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&tgt_obj, &tgt_fd);
if (!ASSERT_OK(err, "tgt_prog_load"))
return;
@@ -224,7 +224,7 @@ static int test_second_attach(struct bpf_object *obj)
if (CHECK(!prog, "find_prog", "prog %s not found\n", prog_name))
return -ENOENT;
- err = bpf_prog_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC,
+ err = bpf_prog_test_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC,
&tgt_obj, &tgt_fd);
if (CHECK(err, "second_prog_load", "file %s err %d errno %d\n",
tgt_obj_file, err, errno))
@@ -274,7 +274,7 @@ static void test_fmod_ret_freplace(void)
__u32 duration = 0;
int err, pkt_fd, attach_prog_fd;
- err = bpf_prog_load(tgt_name, BPF_PROG_TYPE_UNSPEC,
+ err = bpf_prog_test_load(tgt_name, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
@@ -341,7 +341,7 @@ static void test_obj_load_failure_common(const char *obj_file,
int err, pkt_fd;
__u32 duration = 0;
- err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
+ err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 7c9b62e971f1..e4cede6b4b2d 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -20,34 +20,33 @@ void test_fexit_stress(void)
BPF_EXIT_INSN(),
};
- struct bpf_load_program_attr load_attr = {
- .prog_type = BPF_PROG_TYPE_TRACING,
- .license = "GPL",
- .insns = trace_program,
- .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
+ LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
.expected_attach_type = BPF_TRACE_FEXIT,
- };
+ .log_buf = error,
+ .log_size = sizeof(error),
+ );
const struct bpf_insn skb_program[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- struct bpf_load_program_attr skb_load_attr = {
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .license = "GPL",
- .insns = skb_program,
- .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
- };
+ LIBBPF_OPTS(bpf_prog_load_opts, skb_opts,
+ .log_buf = error,
+ .log_size = sizeof(error),
+ );
err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",
- load_attr.expected_attach_type);
+ trace_opts.expected_attach_type);
if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err))
goto out;
- load_attr.attach_btf_id = err;
+ trace_opts.attach_btf_id = err;
for (i = 0; i < CNT; i++) {
- fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
+ trace_program,
+ sizeof(trace_program) / sizeof(struct bpf_insn),
+ &trace_opts);
if (CHECK(fexit_fd[i] < 0, "fexit loaded",
"failed: %d errno %d\n", fexit_fd[i], errno))
goto out;
@@ -57,7 +56,9 @@ void test_fexit_stress(void)
goto out;
}
- filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
+ filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
+ skb_program, sizeof(skb_program) / sizeof(struct bpf_insn),
+ &skb_opts);
if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
filter_fd, errno))
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c
new file mode 100644
index 000000000000..b74b3c0c555a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include <test_progs.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include "find_vma.skel.h"
+#include "find_vma_fail1.skel.h"
+#include "find_vma_fail2.skel.h"
+
+static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret)
+{
+ ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec");
+ ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret");
+ ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret");
+ ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs");
+
+ skel->bss->found_vm_exec = 0;
+ skel->data->find_addr_ret = -1;
+ skel->data->find_zero_ret = -1;
+ skel->bss->d_iname[0] = 0;
+}
+
+static int open_pe(void)
+{
+ struct perf_event_attr attr = {0};
+ int pfd;
+
+ /* create perf event */
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.config = PERF_COUNT_HW_CPU_CYCLES;
+ attr.freq = 1;
+ attr.sample_freq = 4000;
+ pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
+
+ return pfd >= 0 ? pfd : -errno;
+}
+
+static void test_find_vma_pe(struct find_vma *skel)
+{
+ struct bpf_link *link = NULL;
+ volatile int j = 0;
+ int pfd, i;
+
+ pfd = open_pe();
+ if (pfd < 0) {
+ if (pfd == -ENOENT || pfd == -EOPNOTSUPP) {
+ printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
+ test__skip();
+ goto cleanup;
+ }
+ if (!ASSERT_GE(pfd, 0, "perf_event_open"))
+ goto cleanup;
+ }
+
+ link = bpf_program__attach_perf_event(skel->progs.handle_pe, pfd);
+ if (!ASSERT_OK_PTR(link, "attach_perf_event"))
+ goto cleanup;
+
+ for (i = 0; i < 1000000; ++i)
+ ++j;
+
+ test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */);
+cleanup:
+ bpf_link__destroy(link);
+ close(pfd);
+}
+
+static void test_find_vma_kprobe(struct find_vma *skel)
+{
+ int err;
+
+ err = find_vma__attach(skel);
+ if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
+ return;
+
+ getpgid(skel->bss->target_pid);
+ test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */);
+}
+
+static void test_illegal_write_vma(void)
+{
+ struct find_vma_fail1 *skel;
+
+ skel = find_vma_fail1__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "find_vma_fail1__open_and_load"))
+ find_vma_fail1__destroy(skel);
+}
+
+static void test_illegal_write_task(void)
+{
+ struct find_vma_fail2 *skel;
+
+ skel = find_vma_fail2__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "find_vma_fail2__open_and_load"))
+ find_vma_fail2__destroy(skel);
+}
+
+void serial_test_find_vma(void)
+{
+ struct find_vma *skel;
+
+ skel = find_vma__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "find_vma__open_and_load"))
+ return;
+
+ skel->bss->target_pid = getpid();
+ skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe;
+
+ test_find_vma_pe(skel);
+ usleep(100000); /* allow the irq_work to finish */
+ test_find_vma_kprobe(skel);
+
+ find_vma__destroy(skel);
+ test_illegal_write_vma();
+ test_illegal_write_task();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
index 6093728497c7..93ac3f28226c 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
@@ -30,7 +30,7 @@ void serial_test_flow_dissector_load_bytes(void)
/* make sure bpf_skb_load_bytes is not allowed from skb-less context
*/
- fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
+ fd = bpf_test_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
CHECK(fd < 0,
"flow_dissector-bpf_skb_load_bytes-load",
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
index f0c6c226aba8..7c79462d2702 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
@@ -47,9 +47,9 @@ static int load_prog(enum bpf_prog_type type)
};
int fd;
- fd = bpf_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
+ fd = bpf_test_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
if (CHECK_FAIL(fd < 0))
- perror("bpf_load_program");
+ perror("bpf_test_load_program");
return fd;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
index 522237aa4470..4184c399d4c6 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
@@ -85,7 +85,6 @@ void test_get_stack_raw_tp(void)
const char *file_err = "./test_get_stack_rawtp_err.o";
const char *prog_name = "raw_tracepoint/sys_enter";
int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
- struct perf_buffer_opts pb_opts = {};
struct perf_buffer *pb = NULL;
struct bpf_link *link = NULL;
struct timespec tv = {0, 10};
@@ -94,11 +93,11 @@ void test_get_stack_raw_tp(void)
struct bpf_map *map;
cpu_set_t cpu_set;
- err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
@@ -124,8 +123,8 @@ void test_get_stack_raw_tp(void)
if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
- pb_opts.sample_cb = get_stack_print_output;
- pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts);
+ pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output,
+ NULL, NULL, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c
index afd8639f9a94..9da131b32e13 100644
--- a/tools/testing/selftests/bpf/prog_tests/global_data.c
+++ b/tools/testing/selftests/bpf/prog_tests/global_data.c
@@ -136,7 +136,7 @@ void test_global_data(void)
struct bpf_object *obj;
int err, prog_fd;
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK(err, "load program", "error %d loading %s\n", err, file))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/global_func_args.c b/tools/testing/selftests/bpf/prog_tests/global_func_args.c
index 8bcc2869102f..93a2439237b0 100644
--- a/tools/testing/selftests/bpf/prog_tests/global_func_args.c
+++ b/tools/testing/selftests/bpf/prog_tests/global_func_args.c
@@ -44,7 +44,7 @@ void test_global_func_args(void)
struct bpf_object *obj;
int err, prog_fd;
- err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
if (CHECK(err, "load program", "error %d loading %s\n", err, file))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
index 01e51d16c8b8..2a49f8fcde06 100644
--- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
+++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c
@@ -66,7 +66,6 @@ void serial_test_kfree_skb(void)
struct bpf_map *perf_buf_map, *global_data;
struct bpf_program *prog, *fentry, *fexit;
struct bpf_object *obj, *obj2 = NULL;
- struct perf_buffer_opts pb_opts = {};
struct perf_buffer *pb = NULL;
int err, kfree_skb_fd;
bool passed = false;
@@ -74,7 +73,7 @@ void serial_test_kfree_skb(void)
const int zero = 0;
bool test_ok[2];
- err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
+ err = bpf_prog_test_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &tattr.prog_fd);
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
return;
@@ -112,9 +111,8 @@ void serial_test_kfree_skb(void)
goto close_prog;
/* set up perf buffer */
- pb_opts.sample_cb = on_sample;
- pb_opts.ctx = &passed;
- pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
+ pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1,
+ on_sample, NULL, &passed, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;
diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
index 8073105548ff..540ef28fabff 100644
--- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
+++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
@@ -30,7 +30,7 @@ static void test_l4lb(const char *file)
char buf[128];
u32 *magic = (u32 *)buf;
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c
index 5a2a689dbb68..4e0b2ec057aa 100644
--- a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c
+++ b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c
@@ -27,7 +27,7 @@ void test_load_bytes_relative(void)
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
- err = bpf_prog_load("./load_bytes_relative.o", BPF_PROG_TYPE_CGROUP_SKB,
+ err = bpf_prog_test_load("./load_bytes_relative.o", BPF_PROG_TYPE_CGROUP_SKB,
&obj, &prog_fd);
if (CHECK_FAIL(err))
goto close_server_fd;
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c
index ce17b1ed8709..23d19e9cf26a 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c
@@ -53,9 +53,9 @@ void test_map_lock(void)
int err = 0, key = 0, i;
void *ret;
- err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
if (CHECK_FAIL(err)) {
- printf("test_map_lock:bpf_prog_load errno %d\n", errno);
+ printf("test_map_lock:bpf_prog_test_load errno %d\n", errno);
goto close_prog;
}
map_fd[0] = bpf_find_map(__func__, obj, "hash_map");
diff --git a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
index 7589c03fd26b..eb2feaac81fe 100644
--- a/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c
@@ -204,8 +204,8 @@ static int pass_ack(struct migrate_reuseport_test_case *test_case)
{
int err;
- err = bpf_link__detach(test_case->link);
- if (!ASSERT_OK(err, "bpf_link__detach"))
+ err = bpf_link__destroy(test_case->link);
+ if (!ASSERT_OK(err, "bpf_link__destroy"))
return -1;
test_case->link = NULL;
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
index 4e32f3586a75..5fc2b3a0711e 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
@@ -47,7 +47,6 @@ void serial_test_perf_buffer(void)
{
int err, on_len, nr_on_cpus = 0, nr_cpus, i, j;
int zero = 0, my_pid = getpid();
- struct perf_buffer_opts pb_opts = {};
struct test_perf_buffer *skel;
cpu_set_t cpu_seen;
struct perf_buffer *pb;
@@ -82,9 +81,8 @@ void serial_test_perf_buffer(void)
goto out_close;
/* set up perf buffer */
- pb_opts.sample_cb = on_sample;
- pb_opts.ctx = &cpu_seen;
- pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
+ pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1,
+ on_sample, NULL, &cpu_seen, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto out_close;
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
index 44b514fabccd..6628710ec3c6 100644
--- a/tools/testing/selftests/bpf/prog_tests/pkt_access.c
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
@@ -9,7 +9,7 @@ void test_pkt_access(void)
__u32 duration, retval;
int err, prog_fd;
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
index 939015cd6dba..c9d2d6a1bfcc 100644
--- a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
@@ -9,7 +9,7 @@ void test_pkt_md_access(void)
__u32 duration, retval;
int err, prog_fd;
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
index f47e7b1cb32c..8ccba3ab70ee 100644
--- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
@@ -27,7 +27,7 @@ static void test_queue_stack_map_by_type(int type)
else
return;
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
index 9807336a3016..e2f1445b0e10 100644
--- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
@@ -18,15 +18,15 @@ void test_raw_tp_writable_reject_nbd_invalid(void)
BPF_EXIT_INSN(),
};
- struct bpf_load_program_attr load_attr = {
- .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
- .license = "GPL v2",
- .insns = program,
- .insns_cnt = sizeof(program) / sizeof(struct bpf_insn),
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_level = 2,
- };
+ .log_buf = error,
+ .log_size = sizeof(error),
+ );
- bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2",
+ program, sizeof(program) / sizeof(struct bpf_insn),
+ &opts);
if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load",
"failed: %d errno %d\n", bpf_fd, errno))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
index ddefa1192e5d..239baccabccb 100644
--- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
@@ -17,15 +17,15 @@ void serial_test_raw_tp_writable_test_run(void)
BPF_EXIT_INSN(),
};
- struct bpf_load_program_attr load_attr = {
- .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
- .license = "GPL v2",
- .insns = trace_program,
- .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
+ LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
.log_level = 2,
- };
+ .log_buf = error,
+ .log_size = sizeof(error),
+ );
- int bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ int bpf_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, NULL, "GPL v2",
+ trace_program, sizeof(trace_program) / sizeof(struct bpf_insn),
+ &trace_opts);
if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded",
"failed: %d errno %d\n", bpf_fd, errno))
return;
@@ -35,15 +35,14 @@ void serial_test_raw_tp_writable_test_run(void)
BPF_EXIT_INSN(),
};
- struct bpf_load_program_attr skb_load_attr = {
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .license = "GPL v2",
- .insns = skb_program,
- .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
- };
+ LIBBPF_OPTS(bpf_prog_load_opts, skb_opts,
+ .log_buf = error,
+ .log_size = sizeof(error),
+ );
- int filter_fd =
- bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
+ int filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL v2",
+ skb_program, sizeof(skb_program) / sizeof(struct bpf_insn),
+ &skb_opts);
if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
filter_fd, errno))
goto out_bpffd;
diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
index fdfdcff6cbef..aecfe662c070 100644
--- a/tools/testing/selftests/bpf/prog_tests/signal_pending.c
+++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
@@ -22,7 +22,7 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type)
prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN();
- prog_fd = bpf_load_program(prog_type, prog, ARRAY_SIZE(prog),
+ prog_fd = bpf_test_load_program(prog_type, prog, ARRAY_SIZE(prog),
"GPL", 0, NULL, 0);
CHECK(prog_fd < 0, "test-run", "errno %d\n", errno);
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
index 6db07401bc49..57846cc7ce36 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
@@ -937,6 +937,37 @@ static void test_drop_on_lookup(struct test_sk_lookup *skel)
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, INT_PORT },
},
+ /* The program will drop on success, meaning that the ifindex
+ * was 1.
+ */
+ {
+ .desc = "TCP IPv4 drop on valid ifindex",
+ .lookup_prog = skel->progs.check_ifindex,
+ .sotype = SOCK_STREAM,
+ .connect_to = { EXT_IP4, EXT_PORT },
+ .listen_at = { EXT_IP4, EXT_PORT },
+ },
+ {
+ .desc = "TCP IPv6 drop on valid ifindex",
+ .lookup_prog = skel->progs.check_ifindex,
+ .sotype = SOCK_STREAM,
+ .connect_to = { EXT_IP6, EXT_PORT },
+ .listen_at = { EXT_IP6, EXT_PORT },
+ },
+ {
+ .desc = "UDP IPv4 drop on valid ifindex",
+ .lookup_prog = skel->progs.check_ifindex,
+ .sotype = SOCK_DGRAM,
+ .connect_to = { EXT_IP4, EXT_PORT },
+ .listen_at = { EXT_IP4, EXT_PORT },
+ },
+ {
+ .desc = "UDP IPv6 drop on valid ifindex",
+ .lookup_prog = skel->progs.check_ifindex,
+ .sotype = SOCK_DGRAM,
+ .connect_to = { EXT_IP6, EXT_PORT },
+ .listen_at = { EXT_IP6, EXT_PORT },
+ },
};
const struct test *t;
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
index c437e6ba8fe2..b5319ba2ee27 100644
--- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
+++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
@@ -32,7 +32,7 @@ void test_skb_ctx(void)
int err;
int i;
- err = bpf_prog_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&tattr.prog_fd);
if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
return;
@@ -111,4 +111,6 @@ void test_skb_ctx(void)
"ctx_out_mark",
"skb->mark == %u, expected %d\n",
skb.mark, 10);
+
+ bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c
index f302ad84a298..6f802a1c0800 100644
--- a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c
+++ b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c
@@ -20,7 +20,7 @@ void test_skb_helpers(void)
struct bpf_object *obj;
int err;
- err = bpf_prog_load("./test_skb_helpers.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ err = bpf_prog_test_load("./test_skb_helpers.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&tattr.prog_fd);
if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c
index 3e8517a8395a..cd09f4c7dd92 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c
@@ -852,22 +852,21 @@ static struct sockopt_test {
static int load_prog(const struct bpf_insn *insns,
enum bpf_attach_type expected_attach_type)
{
- struct bpf_load_program_attr attr = {
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
.expected_attach_type = expected_attach_type,
- .insns = insns,
- .license = "GPL",
.log_level = 2,
- };
- int fd;
+ .log_buf = bpf_log_buf,
+ .log_size = sizeof(bpf_log_buf),
+ );
+ int fd, insns_cnt = 0;
for (;
- insns[attr.insns_cnt].code != (BPF_JMP | BPF_EXIT);
- attr.insns_cnt++) {
+ insns[insns_cnt].code != (BPF_JMP | BPF_EXIT);
+ insns_cnt++) {
}
- attr.insns_cnt++;
+ insns_cnt++;
- fd = bpf_load_program_xattr(&attr, bpf_log_buf, sizeof(bpf_log_buf));
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCKOPT, NULL, "GPL", insns, insns_cnt, &opts);
if (verbose && fd < 0)
fprintf(stderr, "%s\n", bpf_log_buf);
diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c
index 7577a77a4c4c..6307f5d2b417 100644
--- a/tools/testing/selftests/bpf/prog_tests/spinlock.c
+++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c
@@ -24,9 +24,9 @@ void test_spinlock(void)
int err = 0, i;
void *ret;
- err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
if (CHECK_FAIL(err)) {
- printf("test_spin_lock:bpf_prog_load errno %d\n", errno);
+ printf("test_spin_lock:bpf_prog_test_load errno %d\n", errno);
goto close_prog;
}
for (i = 0; i < 4; i++)
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
index 04b476bd62b9..337493d74ec5 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
@@ -12,7 +12,7 @@ void test_stacktrace_map(void)
struct bpf_object *obj;
struct bpf_link *link;
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
index 4fd30bb651ad..063a14a2060d 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
@@ -12,7 +12,7 @@ void test_stacktrace_map_raw_tp(void)
struct bpf_object *obj;
struct bpf_link *link = NULL;
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index 9825f1f7bfcc..5dc0f425bd11 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -16,7 +16,7 @@ static void test_tailcall_1(void)
char prog_name[32];
char buff[128] = {};
- err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ err = bpf_prog_test_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
@@ -154,7 +154,7 @@ static void test_tailcall_2(void)
char prog_name[32];
char buff[128] = {};
- err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ err = bpf_prog_test_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
@@ -228,7 +228,7 @@ static void test_tailcall_count(const char *which)
__u32 retval, duration;
char buff[128] = {};
- err = bpf_prog_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
+ err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
@@ -324,7 +324,7 @@ static void test_tailcall_4(void)
char buff[128] = {};
char prog_name[32];
- err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ err = bpf_prog_test_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
@@ -412,7 +412,7 @@ static void test_tailcall_5(void)
char buff[128] = {};
char prog_name[32];
- err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ err = bpf_prog_test_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
&prog_fd);
if (CHECK_FAIL(err))
return;
@@ -498,7 +498,7 @@ static void test_tailcall_bpf2bpf_1(void)
__u32 retval, duration;
char prog_name[32];
- err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
+ err = bpf_prog_test_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (CHECK_FAIL(err))
return;
@@ -582,7 +582,7 @@ static void test_tailcall_bpf2bpf_2(void)
__u32 retval, duration;
char buff[128] = {};
- err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
+ err = bpf_prog_test_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (CHECK_FAIL(err))
return;
@@ -660,7 +660,7 @@ static void test_tailcall_bpf2bpf_3(void)
__u32 retval, duration;
char prog_name[32];
- err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
+ err = bpf_prog_test_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (CHECK_FAIL(err))
return;
@@ -757,7 +757,7 @@ static void test_tailcall_bpf2bpf_4(bool noise)
__u32 retval, duration;
char prog_name[32];
- err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
+ err = bpf_prog_test_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &prog_fd);
if (CHECK_FAIL(err))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
index 1bdc1d86a50c..17947c9e1d66 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
@@ -11,7 +11,7 @@ void test_task_fd_query_rawtp(void)
__u32 duration = 0;
char buf[256];
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
index 3f131b8fe328..c2a98a7a8dfc 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
@@ -13,8 +13,8 @@ static void test_task_fd_query_tp_core(const char *probe_name,
__u32 duration = 0;
char buf[256];
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto close_prog;
snprintf(buf, sizeof(buf),
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
index 594307dffd13..11bf755be4c9 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
@@ -8,7 +8,7 @@ void test_tcp_estats(void)
struct bpf_object *obj;
__u32 duration = 0;
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
CHECK(err, "", "err %d errno %d\n", err, errno);
if (err)
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
index 8652d0a46c87..39e79291c82b 100644
--- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
+++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
@@ -35,7 +35,7 @@ void serial_test_tp_attach_query(void)
query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
for (i = 0; i < num_progs; i++) {
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
&prog_fd[i]);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
goto cleanup1;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c
index 48921ff74850..7a7ef9d4e151 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp.c
@@ -16,7 +16,7 @@ void test_xdp(void)
__u32 duration, retval, size;
int err, prog_fd, map_fd;
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
index f529e3c923ae..3f5a17c38be5 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -10,7 +10,7 @@ static void test_xdp_adjust_tail_shrink(void)
int err, prog_fd;
char buf[128];
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
@@ -38,7 +38,7 @@ static void test_xdp_adjust_tail_grow(void)
__u32 duration, retval, size, expect_sz;
int err, prog_fd;
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
@@ -75,7 +75,7 @@ static void test_xdp_adjust_tail_grow2(void)
.data_size_out = 0, /* Per test */
};
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &tattr.prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &tattr.prog_fd);
if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
index 4c4057262cd8..c6fa390e3aa1 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
@@ -16,7 +16,7 @@ void serial_test_xdp_attach(void)
len = sizeof(info);
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
if (CHECK_FAIL(err))
return;
err = bpf_obj_get_info_by_fd(fd1, &info, &len);
@@ -24,7 +24,7 @@ void serial_test_xdp_attach(void)
goto out_1;
id1 = info.id;
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2);
if (CHECK_FAIL(err))
goto out_1;
@@ -34,7 +34,7 @@ void serial_test_xdp_attach(void)
goto out_2;
id2 = info.id;
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3);
if (CHECK_FAIL(err))
goto out_2;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
index 3bd5904b4db5..f99386d1dc4c 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
@@ -49,7 +49,6 @@ void test_xdp_bpf2bpf(void)
struct vip key4 = {.protocol = 6, .family = AF_INET};
struct bpf_program *prog;
struct perf_buffer *pb = NULL;
- struct perf_buffer_opts pb_opts = {};
/* Load XDP program to introspect */
pkt_skel = test_xdp__open_and_load();
@@ -86,10 +85,8 @@ void test_xdp_bpf2bpf(void)
goto out;
/* Set up perf buffer */
- pb_opts.sample_cb = on_sample;
- pb_opts.ctx = &passed;
- pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map),
- 1, &pb_opts);
+ pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 1,
+ on_sample, NULL, &passed, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c
index 4e2a4fd56f67..abe48e82e1dc 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c
@@ -29,7 +29,7 @@ void serial_test_xdp_info(void)
/* Setup prog */
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c
index 7185bee16fe4..15a3900e4370 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c
@@ -9,7 +9,7 @@ void test_xdp_perf(void)
char in[128], out[128];
int err, prog_fd;
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;
diff --git a/tools/testing/selftests/bpf/progs/tag.c b/tools/testing/selftests/bpf/progs/btf_decl_tag.c
index 1792f4eda095..c88ccc53529a 100644
--- a/tools/testing/selftests/bpf/progs/tag.c
+++ b/tools/testing/selftests/bpf/progs/btf_decl_tag.c
@@ -4,10 +4,6 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#ifndef __has_attribute
-#define __has_attribute(x) 0
-#endif
-
#if __has_attribute(btf_decl_tag)
#define __tag1 __attribute__((btf_decl_tag("tag1")))
#define __tag2 __attribute__((btf_decl_tag("tag2")))
diff --git a/tools/testing/selftests/bpf/progs/btf_type_tag.c b/tools/testing/selftests/bpf/progs/btf_type_tag.c
new file mode 100644
index 000000000000..1d488da7e920
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf_type_tag.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#if __has_attribute(btf_type_tag)
+#define __tag1 __attribute__((btf_type_tag("tag1")))
+#define __tag2 __attribute__((btf_type_tag("tag2")))
+volatile const bool skip_tests = false;
+#else
+#define __tag1
+#define __tag2
+volatile const bool skip_tests = true;
+#endif
+
+struct btf_type_tag_test {
+ int __tag1 * __tag1 __tag2 *p;
+} g;
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(sub, int x)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/exhandler_kern.c b/tools/testing/selftests/bpf/progs/exhandler_kern.c
new file mode 100644
index 000000000000..f5ca142abf8f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/exhandler_kern.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021, Oracle and/or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned int exception_triggered;
+int test_pid;
+
+/* TRACE_EVENT(task_newtask,
+ * TP_PROTO(struct task_struct *p, u64 clone_flags)
+ */
+SEC("tp_btf/task_newtask")
+int BPF_PROG(trace_task_newtask, struct task_struct *task, u64 clone_flags)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+ struct callback_head *work;
+ void *func;
+
+ if (test_pid != pid)
+ return 0;
+
+ /* To verify we hit an exception we dereference task->task_works->func.
+ * If task work has been added,
+ * - task->task_works is non-NULL; and
+ * - task->task_works->func is non-NULL also (the callback function
+ * must be specified for the task work.
+ *
+ * However, for a newly-created task, task->task_works is NULLed,
+ * so we know the exception handler triggered if task_works is
+ * NULL and func is NULL.
+ */
+ work = task->task_works;
+ func = work->func;
+ if (!work && !func)
+ exception_triggered++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
index 49a84a3a2306..48cd14b43741 100644
--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
@@ -73,7 +73,7 @@ int test_subprog2(struct args_subprog2 *ctx)
__builtin_preserve_access_index(&skb->len));
ret = ctx->ret;
- /* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
+ /* bpf_prog_test_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32
* which randomizes upper 32 bits after BPF_ALU32 insns.
* Hence after 'w0 <<= 1' upper bits of $rax are random.
* That is expected and correct. Trim them.
diff --git a/tools/testing/selftests/bpf/progs/find_vma.c b/tools/testing/selftests/bpf/progs/find_vma.c
new file mode 100644
index 000000000000..38034fb82530
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/find_vma.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int dummy;
+};
+
+#define VM_EXEC 0x00000004
+#define DNAME_INLINE_LEN 32
+
+pid_t target_pid = 0;
+char d_iname[DNAME_INLINE_LEN] = {0};
+__u32 found_vm_exec = 0;
+__u64 addr = 0;
+int find_zero_ret = -1;
+int find_addr_ret = -1;
+
+static long check_vma(struct task_struct *task, struct vm_area_struct *vma,
+ struct callback_ctx *data)
+{
+ if (vma->vm_file)
+ bpf_probe_read_kernel_str(d_iname, DNAME_INLINE_LEN - 1,
+ vma->vm_file->f_path.dentry->d_iname);
+
+ /* check for VM_EXEC */
+ if (vma->vm_flags & VM_EXEC)
+ found_vm_exec = 1;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_getpid(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ if (task->pid != target_pid)
+ return 0;
+
+ find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
+
+ /* this should return -ENOENT */
+ find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
+ return 0;
+}
+
+SEC("perf_event")
+int handle_pe(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ if (task->pid != target_pid)
+ return 0;
+
+ find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0);
+
+ /* In NMI, this should return -EBUSY, as the previous call is using
+ * the irq_work.
+ */
+ find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail1.c b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
new file mode 100644
index 000000000000..b3b326b8e2d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int dummy;
+};
+
+static long write_vma(struct task_struct *task, struct vm_area_struct *vma,
+ struct callback_ctx *data)
+{
+ /* writing to vma, which is illegal */
+ vma->vm_flags |= 0x55;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_getpid(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ bpf_find_vma(task, 0, write_vma, &data, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail2.c b/tools/testing/selftests/bpf/progs/find_vma_fail2.c
new file mode 100644
index 000000000000..9bcf3203e26b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/find_vma_fail2.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct callback_ctx {
+ int dummy;
+};
+
+static long write_task(struct task_struct *task, struct vm_area_struct *vma,
+ struct callback_ctx *data)
+{
+ /* writing to task, which is illegal */
+ task->mm = NULL;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_getpid(void)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ struct callback_ctx data = {};
+
+ bpf_find_vma(task, 0, write_task, &data, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c
index 04fee08863cb..c26057ec46dc 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb.c
@@ -448,7 +448,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end,
return bpf_redirect(ifindex, 0);
}
-SEC("l4lb-demo")
+SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index b9e2753f4f91..19e4d2071c60 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
@@ -447,7 +447,7 @@ static __noinline int process_packet(void *data, __u64 off, void *data_end,
return bpf_redirect(ifindex, 0);
}
-SEC("l4lb-demo")
+SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c
index b5c07ae7b68f..acf073db9e8b 100644
--- a/tools/testing/selftests/bpf/progs/test_map_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_map_lock.c
@@ -30,7 +30,7 @@ struct {
__type(value, struct array_elem);
} array_map SEC(".maps");
-SEC("map_lock_demo")
+SEC("cgroup/skb")
int bpf_map_lock_test(struct __sk_buff *skb)
{
struct hmap_elem zero = {}, *val;
diff --git a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
index 0fcd3ff0e38a..648e8cab7a23 100644
--- a/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
+++ b/tools/testing/selftests/bpf/progs/test_queue_stack_map.h
@@ -24,7 +24,7 @@ struct {
__uint(value_size, sizeof(__u32));
} map_out SEC(".maps");
-SEC("test")
+SEC("tc")
int _test(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
index 19d2465d9442..83b0aaa52ef7 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
@@ -84,6 +84,14 @@ int lookup_drop(struct bpf_sk_lookup *ctx)
return SK_DROP;
}
+SEC("sk_lookup")
+int check_ifindex(struct bpf_sk_lookup *ctx)
+{
+ if (ctx->ingress_ifindex == 1)
+ return SK_DROP;
+ return SK_PASS;
+}
+
SEC("sk_reuseport")
int reuseport_pass(struct sk_reuseport_md *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
index 8e94e5c080aa..6dc1f28fc4b6 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
@@ -68,7 +68,7 @@ static void set_task_info(struct sock *sk)
}
SEC("fentry/inet_csk_listen_start")
-int BPF_PROG(trace_inet_csk_listen_start, struct sock *sk, int backlog)
+int BPF_PROG(trace_inet_csk_listen_start, struct sock *sk)
{
set_task_info(sk);
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
index 1d61b36e6067..c482110cfc95 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -5,7 +5,7 @@
char _license[] SEC("license") = "GPL";
-SEC("skb_ctx")
+SEC("tc")
int process(struct __sk_buff *skb)
{
#pragma clang loop unroll(full)
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
index 0d31a3b3505f..7e88309d3229 100644
--- a/tools/testing/selftests/bpf/progs/test_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -45,7 +45,7 @@ struct {
#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
-SEC("spin_lock_demo")
+SEC("tc")
int bpf_sping_lock_test(struct __sk_buff *skb)
{
volatile int credit = 0, max_credit = 100, pkt_len = 64;
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
index 2c5c602c6011..e2ae049c2f85 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
@@ -244,7 +244,7 @@ static __always_inline void send_basic_event(struct sock *sk,
bpf_map_update_elem(&ev_record_map, &key, &ev, BPF_ANY);
}
-SEC("dummy_tracepoint")
+SEC("tp/dummy/tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
{
if (!arg->sock)
diff --git a/tools/testing/selftests/bpf/test_btf.h b/tools/testing/selftests/bpf/test_btf.h
index 32c7a57867da..128989bed8b7 100644
--- a/tools/testing/selftests/bpf/test_btf.h
+++ b/tools/testing/selftests/bpf/test_btf.h
@@ -72,4 +72,7 @@
#define BTF_DECL_TAG_ENC(value, type, component_idx) \
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx)
+#define BTF_TYPE_TAG_ENC(value, type) \
+ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type)
+
#endif /* _TEST_BTF_H */
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index 0cda61da5d39..a63787e7bb1a 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -8,6 +8,7 @@
#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
+#include "testing_helpers.h"
char bpf_log_buf[BPF_LOG_BUF_SIZE];
@@ -66,7 +67,7 @@ int main(int argc, char **argv)
prog[0].imm = percpu_map_fd;
prog[7].imm = map_fd;
- prog_fd = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+ prog_fd = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
if (prog_fd < 0) {
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 804dddd97d4c..c299d3452695 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -14,6 +14,7 @@
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
+#include "testing_helpers.h"
#include "bpf_rlimit.h"
#define DEV_CGROUP_PROG "./dev_cgroup.o"
@@ -27,7 +28,7 @@ int main(int argc, char **argv)
int prog_fd, cgroup_fd;
__u32 prog_cnt;
- if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
+ if (bpf_prog_test_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
&obj, &prog_fd)) {
printf("Failed to load DEV_CGROUP program\n");
goto out;
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_user.c b/tools/testing/selftests/bpf/test_lirc_mode2_user.c
index fb5fd6841ef3..ebf68dce5504 100644
--- a/tools/testing/selftests/bpf/test_lirc_mode2_user.c
+++ b/tools/testing/selftests/bpf/test_lirc_mode2_user.c
@@ -45,6 +45,8 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include "testing_helpers.h"
+
int main(int argc, char **argv)
{
struct bpf_object *obj;
@@ -58,8 +60,8 @@ int main(int argc, char **argv)
return 2;
}
- ret = bpf_prog_load("test_lirc_mode2_kern.o",
- BPF_PROG_TYPE_LIRC_MODE2, &obj, &progfd);
+ ret = bpf_prog_test_load("test_lirc_mode2_kern.o",
+ BPF_PROG_TYPE_LIRC_MODE2, &obj, &progfd);
if (ret) {
printf("Failed to load bpf program\n");
return 1;
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 7e9049fa3edf..7f3d1d8460b4 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -42,7 +42,6 @@ static int create_map(int map_type, int map_flags, unsigned int size)
static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key,
void *value)
{
- struct bpf_load_program_attr prog;
struct bpf_create_map_attr map;
struct bpf_insn insns[] = {
BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0),
@@ -76,13 +75,7 @@ static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key,
insns[0].imm = mfd;
- memset(&prog, 0, sizeof(prog));
- prog.prog_type = BPF_PROG_TYPE_SCHED_CLS;
- prog.insns = insns;
- prog.insns_cnt = ARRAY_SIZE(insns);
- prog.license = "GPL";
-
- pfd = bpf_load_program_xattr(&prog, NULL, 0);
+ pfd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, NULL, "GPL", insns, ARRAY_SIZE(insns), NULL);
if (pfd < 0) {
close(mfd);
return -1;
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index c7a36a9378f8..8b31bc1a801d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -25,6 +25,7 @@
#include "bpf_util.h"
#include "bpf_rlimit.h"
#include "test_maps.h"
+#include "testing_helpers.h"
#ifndef ENOTSUPP
#define ENOTSUPP 524
@@ -830,21 +831,21 @@ static void test_sockmap(unsigned int tasks, void *data)
}
/* Load SK_SKB program and Attach */
- err = bpf_prog_load(SOCKMAP_PARSE_PROG,
+ err = bpf_prog_test_load(SOCKMAP_PARSE_PROG,
BPF_PROG_TYPE_SK_SKB, &obj, &parse_prog);
if (err) {
printf("Failed to load SK_SKB parse prog\n");
goto out_sockmap;
}
- err = bpf_prog_load(SOCKMAP_TCP_MSG_PROG,
+ err = bpf_prog_test_load(SOCKMAP_TCP_MSG_PROG,
BPF_PROG_TYPE_SK_MSG, &obj, &msg_prog);
if (err) {
printf("Failed to load SK_SKB msg prog\n");
goto out_sockmap;
}
- err = bpf_prog_load(SOCKMAP_VERDICT_PROG,
+ err = bpf_prog_test_load(SOCKMAP_VERDICT_PROG,
BPF_PROG_TYPE_SK_SKB, &obj, &verdict_prog);
if (err) {
printf("Failed to load SK_SKB verdict prog\n");
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index 9613f7538840..e8edd3dd3ec2 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -328,18 +328,17 @@ static size_t probe_prog_length(const struct bpf_insn *fp)
static int load_sock_prog(const struct bpf_insn *prog,
enum bpf_attach_type attach_type)
{
- struct bpf_load_program_attr attr;
- int ret;
-
- memset(&attr, 0, sizeof(struct bpf_load_program_attr));
- attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
- attr.expected_attach_type = attach_type;
- attr.insns = prog;
- attr.insns_cnt = probe_prog_length(attr.insns);
- attr.license = "GPL";
- attr.log_level = 2;
-
- ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ int ret, insn_cnt;
+
+ insn_cnt = probe_prog_length(prog);
+
+ opts.expected_attach_type = attach_type;
+ opts.log_buf = bpf_log_buf;
+ opts.log_size = BPF_LOG_BUF_SIZE;
+ opts.log_level = 2;
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", prog, insn_cnt, &opts);
if (verbose && ret < 0)
fprintf(stderr, "%s\n", bpf_log_buf);
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index aa3f185fcb89..05c9e4944c01 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -645,17 +645,14 @@ static int mk_sockaddr(int domain, const char *ip, unsigned short port,
static int load_insns(const struct sock_addr_test *test,
const struct bpf_insn *insns, size_t insns_cnt)
{
- struct bpf_load_program_attr load_attr;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
int ret;
- memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
- load_attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
- load_attr.expected_attach_type = test->expected_attach_type;
- load_attr.insns = insns;
- load_attr.insns_cnt = insns_cnt;
- load_attr.license = "GPL";
+ opts.expected_attach_type = test->expected_attach_type;
+ opts.log_buf = bpf_log_buf;
+ opts.log_size = BPF_LOG_BUF_SIZE;
- ret = bpf_load_program_xattr(&load_attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, NULL, "GPL", insns, insns_cnt, &opts);
if (ret < 0 && test->expected_result != LOAD_REJECT) {
log_err(">>> Loading program error.\n"
">>> Verifier output:\n%s\n-------\n", bpf_log_buf);
diff --git a/tools/testing/selftests/bpf/test_stub.c b/tools/testing/selftests/bpf/test_stub.c
deleted file mode 100644
index 47e132726203..000000000000
--- a/tools/testing/selftests/bpf/test_stub.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2019 Netronome Systems, Inc. */
-
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-#include <string.h>
-
-int extra_prog_load_log_flags = 0;
-
-int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
- struct bpf_object **pobj, int *prog_fd)
-{
- struct bpf_prog_load_attr attr;
-
- memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
- attr.file = file;
- attr.prog_type = type;
- attr.expected_attach_type = 0;
- attr.prog_flags = BPF_F_TEST_RND_HI32;
- attr.log_level = extra_prog_load_log_flags;
-
- return bpf_prog_load_xattr(&attr, pobj, prog_fd);
-}
-
-int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
- size_t insns_cnt, const char *license,
- __u32 kern_version, char *log_buf,
- size_t log_buf_sz)
-{
- struct bpf_load_program_attr load_attr;
-
- memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
- load_attr.prog_type = type;
- load_attr.expected_attach_type = 0;
- load_attr.name = NULL;
- load_attr.insns = insns;
- load_attr.insns_cnt = insns_cnt;
- load_attr.license = license;
- load_attr.kern_version = kern_version;
- load_attr.prog_flags = BPF_F_TEST_RND_HI32;
- load_attr.log_level = extra_prog_load_log_flags;
-
- return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
-}
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c
index a3bb6d399daa..4f6cf833b522 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/test_sysctl.c
@@ -17,6 +17,7 @@
#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
+#include "testing_helpers.h"
#define CG_PATH "/foo"
#define MAX_INSNS 512
@@ -1435,14 +1436,10 @@ static int load_sysctl_prog_insns(struct sysctl_test *test,
const char *sysctl_path)
{
struct bpf_insn *prog = test->insns;
- struct bpf_load_program_attr attr;
- int ret;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ int ret, insn_cnt;
- memset(&attr, 0, sizeof(struct bpf_load_program_attr));
- attr.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL;
- attr.insns = prog;
- attr.insns_cnt = probe_prog_length(attr.insns);
- attr.license = "GPL";
+ insn_cnt = probe_prog_length(prog);
if (test->fixup_value_insn) {
char buf[128];
@@ -1465,7 +1462,10 @@ static int load_sysctl_prog_insns(struct sysctl_test *test,
return -1;
}
- ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ opts.log_buf = bpf_log_buf;
+ opts.log_size = BPF_LOG_BUF_SIZE;
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SYSCTL, NULL, "GPL", prog, insn_cnt, &opts);
if (ret < 0 && test->result != LOAD_REJECT) {
log_err(">>> Loading program error.\n"
">>> Verifier output:\n%s\n-------\n", bpf_log_buf);
@@ -1476,15 +1476,10 @@ static int load_sysctl_prog_insns(struct sysctl_test *test,
static int load_sysctl_prog_file(struct sysctl_test *test)
{
- struct bpf_prog_load_attr attr;
struct bpf_object *obj;
int prog_fd;
- memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
- attr.file = test->prog_file;
- attr.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL;
-
- if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) {
+ if (bpf_prog_test_load(test->prog_file, BPF_PROG_TYPE_CGROUP_SYSCTL, &obj, &prog_fd)) {
if (test->result != LOAD_REJECT)
log_err(">>> Loading program (%s) error.\n",
test->prog_file);
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
index 6272c784ca2a..5c7bea525626 100644
--- a/tools/testing/selftests/bpf/test_tag.c
+++ b/tools/testing/selftests/bpf/test_tag.c
@@ -21,6 +21,7 @@
#include "../../../include/linux/filter.h"
#include "bpf_rlimit.h"
+#include "testing_helpers.h"
static struct bpf_insn prog[BPF_MAXINSNS];
@@ -57,7 +58,7 @@ static int bpf_try_load_prog(int insns, int fd_map,
int fd_prog;
bpf_filler(insns, fd_map);
- fd_prog = bpf_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0,
+ fd_prog = bpf_test_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0,
NULL, 0);
assert(fd_prog > 0);
if (fd_map > 0)
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 4a39304cc5a6..4c5114765b23 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -25,6 +25,7 @@
#include "test_tcpnotify.h"
#include "trace_helpers.h"
+#include "testing_helpers.h"
#define SOCKET_BUFFER_SIZE (getpagesize() < 8192L ? getpagesize() : 8192L)
@@ -71,7 +72,6 @@ int main(int argc, char **argv)
{
const char *file = "test_tcpnotify_kern.o";
struct bpf_map *perf_map, *global_map;
- struct perf_buffer_opts pb_opts = {};
struct tcpnotify_globals g = {0};
struct perf_buffer *pb = NULL;
const char *cg_path = "/foo";
@@ -92,7 +92,7 @@ int main(int argc, char **argv)
if (cg_fd < 0)
goto err;
- if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
+ if (bpf_prog_test_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
printf("FAILED: load_bpf_file failed for: %s\n", file);
goto err;
}
@@ -116,8 +116,7 @@ int main(int argc, char **argv)
return -1;
}
- pb_opts.sample_cb = dummyfn;
- pb = perf_buffer__new(bpf_map__fd(perf_map), 8, &pb_opts);
+ pb = perf_buffer__new(bpf_map__fd(perf_map), 8, dummyfn, NULL, NULL, NULL);
if (!pb)
goto err;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 465ef3f112c0..9fbef396a716 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -499,8 +499,7 @@ static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
BPF_EXIT_INSN(),
};
- return bpf_load_program(prog_type, prog,
- ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
+ return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
}
static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
@@ -515,8 +514,7 @@ static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
BPF_EXIT_INSN(),
};
- return bpf_load_program(prog_type, prog,
- ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
+ return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
}
static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
@@ -1089,7 +1087,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
int fd_prog, expected_ret, alignment_prevented_execution;
int prog_len, prog_type = test->prog_type;
struct bpf_insn *prog = test->insns;
- struct bpf_load_program_attr attr;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
int run_errs, run_successes;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
@@ -1129,32 +1127,34 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
test->result_unpriv : test->result;
expected_err = unpriv && test->errstr_unpriv ?
test->errstr_unpriv : test->errstr;
- memset(&attr, 0, sizeof(attr));
- attr.prog_type = prog_type;
- attr.expected_attach_type = test->expected_attach_type;
- attr.insns = prog;
- attr.insns_cnt = prog_len;
- attr.license = "GPL";
+
+ opts.expected_attach_type = test->expected_attach_type;
if (verbose)
- attr.log_level = 1;
+ opts.log_level = 1;
else if (expected_ret == VERBOSE_ACCEPT)
- attr.log_level = 2;
+ opts.log_level = 2;
else
- attr.log_level = 4;
- attr.prog_flags = pflags;
+ opts.log_level = 4;
+ opts.prog_flags = pflags;
if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) {
- attr.attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
- attr.expected_attach_type);
- if (attr.attach_btf_id < 0) {
+ int attach_btf_id;
+
+ attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
+ opts.expected_attach_type);
+ if (attach_btf_id < 0) {
printf("FAIL\nFailed to find BTF ID for '%s'!\n",
test->kfunc);
(*errors)++;
return;
}
+
+ opts.attach_btf_id = attach_btf_id;
}
- fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
+ opts.log_buf = bpf_vlog;
+ opts.log_size = sizeof(bpf_vlog);
+ fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts);
saved_errno = errno;
/* BPF_PROG_TYPE_TRACING requires more setup and
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 800d503e5cb4..52c2f24e0898 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -1,7 +1,11 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
/* Copyright (C) 2020 Facebook, Inc. */
#include <stdlib.h>
+#include <string.h>
#include <errno.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
#include "testing_helpers.h"
int parse_num_list(const char *s, bool **num_set, int *num_set_len)
@@ -78,3 +82,59 @@ __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
}
return info->prog_id;
}
+
+int extra_prog_load_log_flags = 0;
+
+int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
+ struct bpf_object **pobj, int *prog_fd)
+{
+ struct bpf_object_load_attr attr = {};
+ struct bpf_object *obj;
+ struct bpf_program *prog;
+ int err;
+
+ obj = bpf_object__open(file);
+ if (!obj)
+ return -errno;
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (!prog) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ if (type != BPF_PROG_TYPE_UNSPEC)
+ bpf_program__set_type(prog, type);
+
+ bpf_program__set_extra_flags(prog, BPF_F_TEST_RND_HI32);
+
+ attr.obj = obj;
+ attr.log_level = extra_prog_load_log_flags;
+ err = bpf_object__load_xattr(&attr);
+ if (err)
+ goto err_out;
+
+ *pobj = obj;
+ *prog_fd = bpf_program__fd(prog);
+
+ return 0;
+err_out:
+ bpf_object__close(obj);
+ return err;
+}
+
+int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
+ size_t insns_cnt, const char *license,
+ __u32 kern_version, char *log_buf,
+ size_t log_buf_sz)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .kern_version = kern_version,
+ .prog_flags = BPF_F_TEST_RND_HI32,
+ .log_level = extra_prog_load_log_flags,
+ .log_buf = log_buf,
+ .log_size = log_buf_sz,
+ );
+
+ return bpf_prog_load(type, NULL, license, insns, insns_cnt, &opts);
+}
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index d4f8e749611b..f46ebc476ee8 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -6,3 +6,9 @@
int parse_num_list(const char *s, bool **set, int *set_len);
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info);
+int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
+ struct bpf_object **pobj, int *prog_fd);
+int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
+ size_t insns_cnt, const char *license,
+ __u32 kern_version, char *log_buf,
+ size_t log_buf_sz);
diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c
index d78627be060f..a2b006e2fd06 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c
@@ -229,6 +229,24 @@
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)),
+ /* 1-byte read from ingress_ifindex field */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_sk_lookup, ingress_ifindex)),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_sk_lookup, ingress_ifindex) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_sk_lookup, ingress_ifindex) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_sk_lookup, ingress_ifindex) + 3),
+ /* 2-byte read from ingress_ifindex field */
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_sk_lookup, ingress_ifindex)),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_sk_lookup, ingress_ifindex) + 2),
+ /* 4-byte read from ingress_ifindex field */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_sk_lookup, ingress_ifindex)),
+
/* 8-byte read from sk field */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)),
@@ -351,6 +369,20 @@
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
+{
+ "invalid 8-byte read from bpf_sk_lookup ingress_ifindex field",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_sk_lookup, ingress_ifindex)),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+ .expected_attach_type = BPF_SK_LOOKUP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
/* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */
{
"invalid 4-byte read from bpf_sk_lookup sk field",
diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
index 30f12637f4e4..baa870a759a2 100644
--- a/tools/testing/selftests/bpf/xdping.c
+++ b/tools/testing/selftests/bpf/xdping.c
@@ -22,6 +22,7 @@
#include "bpf/libbpf.h"
#include "xdping.h"
+#include "testing_helpers.h"
static int ifindex;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
@@ -173,7 +174,7 @@ int main(int argc, char **argv)
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
- if (bpf_prog_load(filename, BPF_PROG_TYPE_XDP, &obj, &prog_fd)) {
+ if (bpf_prog_test_load(filename, BPF_PROG_TYPE_XDP, &obj, &prog_fd)) {
fprintf(stderr, "load of %s failed\n", filename);
return 1;
}
diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
index 6c7cf8aadc79..fe7f423b8c3f 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.c
+++ b/tools/testing/selftests/bpf/xdpxceiver.c
@@ -744,7 +744,6 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
struct xsk_umem_info *umem = xsk->umem;
u32 idx_rx = 0, idx_fq = 0, rcvd, i;
- u32 total = 0;
int ret;
while (pkt) {
@@ -799,7 +798,6 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
pthread_mutex_lock(&pacing_mutex);
pkts_in_flight -= rcvd;
- total += rcvd;
if (pkts_in_flight < umem->num_frames)
pthread_cond_signal(&pacing_cond);
pthread_mutex_unlock(&pacing_mutex);
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 3313566ce906..7caa4f0e067d 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -66,6 +66,10 @@ NSB_LO_IP=172.16.2.2
NSA_LO_IP6=2001:db8:2::1
NSB_LO_IP6=2001:db8:2::2
+# non-local addresses for freebind tests
+NL_IP=172.17.1.1
+NL_IP6=2001:db8:4::1
+
MD5_PW=abc123
MD5_WRONG_PW=abc1234
@@ -316,6 +320,9 @@ addr2str()
${NSB_LO_IP6}) echo "ns-B loopback IPv6";;
${NSB_LINKIP6}|${NSB_LINKIP6}%*) echo "ns-B IPv6 LLA";;
+ ${NL_IP}) echo "nonlocal IP";;
+ ${NL_IP6}) echo "nonlocal IPv6";;
+
${VRF_IP}) echo "VRF IP";;
${VRF_IP6}) echo "VRF IPv6";;
@@ -1768,6 +1775,14 @@ ipv4_addr_bind_novrf()
done
#
+ # raw socket with nonlocal bind
+ #
+ a=${NL_IP}
+ log_start
+ run_cmd nettest -s -R -P icmp -f -l ${a} -I ${NSA_DEV} -b
+ log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after device bind"
+
+ #
# tcp sockets
#
a=${NSA_IP}
@@ -1816,6 +1831,14 @@ ipv4_addr_bind_vrf()
log_test_addr ${a} $? 1 "Raw socket bind to out of scope address after VRF bind"
#
+ # raw socket with nonlocal bind
+ #
+ a=${NL_IP}
+ log_start
+ run_cmd nettest -s -R -P icmp -f -l ${a} -I ${VRF} -b
+ log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after VRF bind"
+
+ #
# tcp sockets
#
for a in ${NSA_IP} ${VRF_IP}
@@ -1965,6 +1988,7 @@ ipv4_rt()
a=${NSA_IP}
log_start
+
run_cmd nettest ${varg} -s &
sleep 1
run_cmd nettest ${varg} -d ${NSA_DEV} -r ${a} &
@@ -3403,6 +3427,14 @@ ipv6_addr_bind_novrf()
done
#
+ # raw socket with nonlocal bind
+ #
+ a=${NL_IP6}
+ log_start
+ run_cmd nettest -6 -s -R -P icmp -f -l ${a} -I ${NSA_DEV} -b
+ log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address"
+
+ #
# tcp sockets
#
a=${NSA_IP6}
@@ -3444,6 +3476,14 @@ ipv6_addr_bind_vrf()
log_test_addr ${a} $? 1 "Raw socket bind to invalid local address after vrf bind"
#
+ # raw socket with nonlocal bind
+ #
+ a=${NL_IP6}
+ log_start
+ run_cmd nettest -6 -s -R -P icmp -f -l ${a} -I ${VRF} -b
+ log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after VRF bind"
+
+ #
# tcp sockets
#
# address on enslaved device is valid for the VRF or device in a VRF
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index 0faaccd21447..419e71560fd1 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -13,5 +13,9 @@ CONFIG_NFT_COUNTER=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
-CONFIG_NF_TABLES_IPV4=y
-CONFIG_NF_TABLES_IPV6=y
+CONFIG_NF_TABLES_INET=y
+CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SOCKET=m
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index 95e81d557b08..ada9b80774d4 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -75,7 +75,12 @@ struct cfg_cmsg_types {
unsigned int timestampns:1;
};
+struct cfg_sockopt_types {
+ unsigned int transparent:1;
+};
+
static struct cfg_cmsg_types cfg_cmsg_types;
+static struct cfg_sockopt_types cfg_sockopt_types;
static void die_usage(void)
{
@@ -93,6 +98,7 @@ static void die_usage(void)
fprintf(stderr, "\t-u -- check mptcp ulp\n");
fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n");
fprintf(stderr, "\t-c cmsg -- test cmsg type <cmsg>\n");
+ fprintf(stderr, "\t-o option -- test sockopt <option>\n");
fprintf(stderr,
"\t-P [saveWithPeek|saveAfterPeek] -- save data with/after MSG_PEEK form tcp socket\n");
exit(1);
@@ -185,6 +191,22 @@ static void set_mark(int fd, uint32_t mark)
}
}
+static void set_transparent(int fd, int pf)
+{
+ int one = 1;
+
+ switch (pf) {
+ case AF_INET:
+ if (-1 == setsockopt(fd, SOL_IP, IP_TRANSPARENT, &one, sizeof(one)))
+ perror("IP_TRANSPARENT");
+ break;
+ case AF_INET6:
+ if (-1 == setsockopt(fd, IPPROTO_IPV6, IPV6_TRANSPARENT, &one, sizeof(one)))
+ perror("IPV6_TRANSPARENT");
+ break;
+ }
+}
+
static int sock_listen_mptcp(const char * const listenaddr,
const char * const port)
{
@@ -212,6 +234,9 @@ static int sock_listen_mptcp(const char * const listenaddr,
sizeof(one)))
perror("setsockopt");
+ if (cfg_sockopt_types.transparent)
+ set_transparent(sock, pf);
+
if (bind(sock, a->ai_addr, a->ai_addrlen) == 0)
break; /* success */
@@ -944,6 +969,27 @@ static void parse_cmsg_types(const char *type)
exit(1);
}
+static void parse_setsock_options(const char *name)
+{
+ char *next = strchr(name, ',');
+ unsigned int len = 0;
+
+ if (next) {
+ parse_setsock_options(next + 1);
+ len = next - name;
+ } else {
+ len = strlen(name);
+ }
+
+ if (strncmp(name, "TRANSPARENT", len) == 0) {
+ cfg_sockopt_types.transparent = 1;
+ return;
+ }
+
+ fprintf(stderr, "Unrecognized setsockopt option %s\n", name);
+ exit(1);
+}
+
int main_loop(void)
{
int fd;
@@ -1047,7 +1093,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "6jr:lp:s:hut:T:m:S:R:w:M:P:c:")) != -1) {
+ while ((c = getopt(argc, argv, "6jr:lp:s:hut:T:m:S:R:w:M:P:c:o:")) != -1) {
switch (c) {
case 'j':
cfg_join = true;
@@ -1108,6 +1154,9 @@ static void parse_opts(int argc, char **argv)
case 'c':
parse_cmsg_types(optarg);
break;
+ case 'o':
+ parse_setsock_options(optarg);
+ break;
}
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 559173a8e387..a4226b608c68 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -671,6 +671,82 @@ run_tests()
run_tests_lo $1 $2 $3 0
}
+run_test_transparent()
+{
+ local connect_addr="$1"
+ local msg="$2"
+
+ local connector_ns="$ns1"
+ local listener_ns="$ns2"
+ local lret=0
+ local r6flag=""
+
+ # skip if we don't want v6
+ if ! $ipv6 && is_v6 "${connect_addr}"; then
+ return 0
+ fi
+
+ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF"
+flush ruleset
+table inet mangle {
+ chain divert {
+ type filter hook prerouting priority -150;
+
+ meta l4proto tcp socket transparent 1 meta mark set 1 accept
+ tcp dport 20000 tproxy to :20000 meta mark set 1 accept
+ }
+}
+EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: $msg, could not load nft ruleset"
+ return
+ fi
+
+ local local_addr
+ if is_v6 "${connect_addr}"; then
+ local_addr="::"
+ r6flag="-6"
+ else
+ local_addr="0.0.0.0"
+ fi
+
+ ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100
+ if [ $? -ne 0 ]; then
+ ip netns exec "$listener_ns" nft flush ruleset
+ echo "SKIP: $msg, ip $r6flag rule failed"
+ return
+ fi
+
+ ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100
+ if [ $? -ne 0 ]; then
+ ip netns exec "$listener_ns" nft flush ruleset
+ ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100
+ echo "SKIP: $msg, ip route add local $local_addr failed"
+ return
+ fi
+
+ echo "INFO: test $msg"
+
+ TEST_COUNT=10000
+ local extra_args="-o TRANSPARENT"
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP \
+ ${connect_addr} ${local_addr} "${extra_args}"
+ lret=$?
+
+ ip netns exec "$listener_ns" nft flush ruleset
+ ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100
+ ip -net "$listener_ns" route del local $local_addr/0 dev lo table 100
+
+ if [ $lret -ne 0 ]; then
+ echo "FAIL: $msg, mptcp connection error" 1>&2
+ ret=$lret
+ return 1
+ fi
+
+ echo "PASS: $msg"
+ return 0
+}
+
run_tests_peekmode()
{
local peekmode="$1"
@@ -794,5 +870,9 @@ run_tests_peekmode "saveWithPeek"
run_tests_peekmode "saveAfterPeek"
stop_if_error "Tests with peek mode have failed"
+# connect to ns4 ip address, ns2 should intercept/proxy
+run_test_transparent 10.0.3.1 "tproxy ipv4"
+run_test_transparent dead:beef:3::1 "tproxy ipv6"
+
display_time
exit $ret
diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c
index b599003eb5ba..d9a6fd2cd9d3 100644
--- a/tools/testing/selftests/net/nettest.c
+++ b/tools/testing/selftests/net/nettest.c
@@ -85,6 +85,7 @@ struct sock_args {
int version; /* AF_INET/AF_INET6 */
int use_setsockopt;
+ int use_freebind;
int use_cmsg;
const char *dev;
const char *server_dev;
@@ -514,6 +515,29 @@ static int set_membership(int sd, uint32_t grp, uint32_t addr, int ifindex)
return 0;
}
+static int set_freebind(int sd, int version)
+{
+ unsigned int one = 1;
+ int rc = 0;
+
+ switch (version) {
+ case AF_INET:
+ if (setsockopt(sd, SOL_IP, IP_FREEBIND, &one, sizeof(one))) {
+ log_err_errno("setsockopt(IP_FREEBIND)");
+ rc = -1;
+ }
+ break;
+ case AF_INET6:
+ if (setsockopt(sd, SOL_IPV6, IPV6_FREEBIND, &one, sizeof(one))) {
+ log_err_errno("setsockopt(IPV6_FREEBIND");
+ rc = -1;
+ }
+ break;
+ }
+
+ return rc;
+}
+
static int set_broadcast(int sd)
{
unsigned int one = 1;
@@ -1419,6 +1443,9 @@ static int lsock_init(struct sock_args *args)
set_unicast_if(sd, args->ifindex, args->version))
goto err;
+ if (args->use_freebind && set_freebind(sd, args->version))
+ goto err;
+
if (bind_socket(sd, args))
goto err;
@@ -1827,7 +1854,7 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
return client_status;
}
-#define GETOPT_STR "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq"
+#define GETOPT_STR "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbqf"
#define OPT_FORCE_BIND_KEY_IFINDEX 1001
#define OPT_NO_BIND_KEY_IFINDEX 1002
@@ -1864,6 +1891,7 @@ static void print_usage(char *prog)
" -I dev bind socket to given device name - server mode\n"
" -S use setsockopt (IP_UNICAST_IF or IP_MULTICAST_IF)\n"
" to set device binding\n"
+ " -f bind socket with the IP[V6]_FREEBIND option\n"
" -C use cmsg and IP_PKTINFO to specify device binding\n"
"\n"
" -L len send random message of given length\n"
@@ -1999,6 +2027,9 @@ int main(int argc, char *argv[])
case 'S':
args.use_setsockopt = 1;
break;
+ case 'f':
+ args.use_freebind = 1;
+ break;
case 'C':
args.use_cmsg = 1;
break;