summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c14
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c14
-rw-r--r--drivers/net/ethernet/3com/typhoon.c6
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c14
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c14
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c10
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c24
-rw-r--r--drivers/net/ethernet/alteon/acenic.c4
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c6
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c14
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c1
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c7
-rw-r--r--drivers/net/ethernet/amd/sunlance.c9
-rw-r--r--drivers/net/ethernet/apple/macmace.c16
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c24
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c42
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c21
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c21
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c73
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c84
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c375
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h47
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c377
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h91
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c258
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c356
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c351
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c126
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h3
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c919
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h30
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c17
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c80
-rw-r--r--drivers/net/ethernet/cadence/macb.c84
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c819
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c93
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c256
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c75
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c14
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c74
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c13
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c3
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c231
-rw-r--r--drivers/net/ethernet/davicom/dm9000.h11
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c9
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c257
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h103
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c177
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c308
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c22
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec.h10
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c (renamed from drivers/net/ethernet/freescale/fec.c)288
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c16
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c7
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c176
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h8
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c881
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c24
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c16
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c28
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e100.c36
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c131
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h27
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c240
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c408
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c37
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c350
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c134
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c261
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h50
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h60
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c156
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c124
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h52
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c27
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c261
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h53
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h141
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c354
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c29
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1529
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c61
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c18
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c44
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c110
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c186
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c121
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c7
-rw-r--r--drivers/net/ethernet/jme.c6
-rw-r--r--drivers/net/ethernet/marvell/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c378
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c142
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c32
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c27
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c204
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c129
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c55
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c32
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c12
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c4
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c12
-rw-r--r--drivers/net/ethernet/neterion/s2io.c10
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c14
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c20
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c64
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c23
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c222
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h129
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c520
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h204
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c107
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c75
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c125
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c107
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c371
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h263
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c1954
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c1780
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c259
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c70
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c8
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c323
-rw-r--r--drivers/net/ethernet/renesas/Kconfig3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c438
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h222
-rw-r--r--drivers/net/ethernet/s6gmac.c16
-rw-r--r--drivers/net/ethernet/seeq/ether3.c22
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/efx.c267
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/enum.h12
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon.c17
-rw-r--r--drivers/net/ethernet/sfc/filter.c249
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h97
-rw-r--r--drivers/net/ethernet/sfc/nic.c94
-rw-r--r--drivers/net/ethernet/sfc/ptp.c116
-rw-r--r--drivers/net/ethernet/sfc/rx.c793
-rw-r--r--drivers/net/ethernet/sfc/siena.c25
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis900.c41
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c14
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h199
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h81
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c168
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c151
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c156
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c148
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1340
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c211
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h74
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c13
-rw-r--r--drivers/net/ethernet/sun/sunqe.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c17
-rw-r--r--drivers/net/ethernet/ti/cpsw.c334
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c31
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c56
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c1
-rw-r--r--drivers/net/ethernet/ti/tlan.c5
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c240
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c7
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c21
-rw-r--r--drivers/net/ethernet/via/via-rhine.c17
-rw-r--r--drivers/net/ethernet/via/via-velocity.c15
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c33
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c25
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c17
332 files changed, 20569 insertions, 8515 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index f36ff99fd394..adb4bf5eb4b4 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -306,6 +306,7 @@ static int el3_isa_match(struct device *pdev, unsigned int ndev)
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, pdev);
netdev_boot_setup_check(dev);
if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
@@ -595,6 +596,7 @@ static int __init el3_eisa_probe (struct device *device)
return -ENOMEM;
}
+ SET_NETDEV_DEV(dev, device);
netdev_boot_setup_check(dev);
el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index ffd8de28a76a..6fc994fa4abe 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -1165,16 +1165,4 @@ static struct pcmcia_driver tc574_driver = {
.suspend = tc574_suspend,
.resume = tc574_resume,
};
-
-static int __init init_tc574(void)
-{
- return pcmcia_register_driver(&tc574_driver);
-}
-
-static void __exit exit_tc574(void)
-{
- pcmcia_unregister_driver(&tc574_driver);
-}
-
-module_init(init_tc574);
-module_exit(exit_tc574);
+module_pcmcia_driver(tc574_driver);
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index a556c01e011b..078480aaa168 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -928,16 +928,4 @@ static struct pcmcia_driver tc589_driver = {
.suspend = tc589_suspend,
.resume = tc589_resume,
};
-
-static int __init init_tc589(void)
-{
- return pcmcia_register_driver(&tc589_driver);
-}
-
-static void __exit exit_tc589(void)
-{
- pcmcia_unregister_driver(&tc589_driver);
-}
-
-module_init(init_tc589);
-module_exit(exit_tc589);
+module_pcmcia_driver(tc589_driver);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 27aaaf99e73e..144942f6372b 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1690,7 +1690,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
skb_checksum_none_assert(new_skb);
if (rx->rxStatus & TYPHOON_RX_VLAN)
- __vlan_hwaccel_put_tag(new_skb,
+ __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
ntohl(rx->vlanTag) & 0xffff);
netif_receive_skb(new_skb);
@@ -2445,9 +2445,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* settings -- so we only allow the user to toggle the TX processing.
*/
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_HW_VLAN_TX;
+ NETIF_F_HW_VLAN_CTAG_TX;
dev->features = dev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
if(register_netdev(dev) < 0) {
err_msg = "unable to register netdev";
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index cab306a9888e..e1d26433d619 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)
struct ei_device *ei_local;
struct ax_device *ax;
struct resource *irq, *mem, *mem2;
- resource_size_t mem_size, mem2_size = 0;
+ unsigned long mem_size, mem2_size = 0;
int ret = 0;
dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index e1b3941bd149..d801c1410fb0 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -728,19 +728,7 @@ static struct pcmcia_driver axnet_cs_driver = {
.suspend = axnet_suspend,
.resume = axnet_resume,
};
-
-static int __init init_axnet_cs(void)
-{
- return pcmcia_register_driver(&axnet_cs_driver);
-}
-
-static void __exit exit_axnet_cs(void)
-{
- pcmcia_unregister_driver(&axnet_cs_driver);
-}
-
-module_init(init_axnet_cs);
-module_exit(exit_axnet_cs);
+module_pcmcia_driver(axnet_cs_driver);
/*====================================================================*/
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index de1af0bfed4c..46c5aadaca8e 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1694,16 +1694,4 @@ static struct pcmcia_driver pcnet_driver = {
.suspend = pcnet_suspend,
.resume = pcnet_resume,
};
-
-static int __init init_pcnet_cs(void)
-{
- return pcmcia_register_driver(&pcnet_driver);
-}
-
-static void __exit exit_pcnet_cs(void)
-{
- pcmcia_unregister_driver(&pcnet_driver);
-}
-
-module_init(init_pcnet_cs);
-module_exit(exit_pcnet_cs);
+module_pcmcia_driver(pcnet_driver);
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 549b77500579..8b04bfc20cfb 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -594,7 +594,8 @@ static const struct ethtool_ops ethtool_ops;
#ifdef VLAN_SUPPORT
-static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_add_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -608,7 +609,8 @@ static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0;
}
-static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_kill_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -702,7 +704,7 @@ static int starfire_init_one(struct pci_dev *pdev,
#endif /* ZEROCOPY */
#ifdef VLAN_SUPPORT
- dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
#endif /* VLAN_RX_KILL_VID */
#ifdef ADDR_64BITS
dev->features |= NETIF_F_HIGHDMA;
@@ -1496,7 +1498,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
vlid);
}
- __vlan_hwaccel_put_tag(skb, vlid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid);
}
#endif /* VLAN_SUPPORT */
netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index a175d0be1ae1..ee705771bd2c 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -188,10 +188,9 @@ static int desc_list_init(struct net_device *dev)
/* allocate a new skb for next time receive */
new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
- if (!new_skb) {
- pr_notice("init: low on mem - packet dropped\n");
+ if (!new_skb)
goto init_error;
- }
+
skb_reserve(new_skb, NET_IP_ALIGN);
/* Invidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
@@ -1236,7 +1235,6 @@ static void bfin_mac_rx(struct net_device *dev)
new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- netdev_notice(dev, "rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 0be2195e5034..269295403fc4 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,35 +1464,23 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
- 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL);
-
+ greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL | __GFP_ZERO);
if (!greth->tx_bd_base) {
- if (netif_msg_probe(greth))
- dev_err(&dev->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error3;
}
- memset(greth->tx_bd_base, 0, 1024);
-
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
- 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL);
-
+ greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL | __GFP_ZERO);
if (!greth->rx_bd_base) {
- if (netif_msg_probe(greth))
- dev_err(greth->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error4;
}
- memset(greth->rx_bd_base, 0, 1024);
-
/* Get MAC address from: module param, OF property or ID prom */
for (i = 0; i < 6; i++) {
if (macaddr[i] != 0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index c0bc41a784ca..b7894f8af9d1 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -472,7 +472,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
ap->name = pci_name(pdev);
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->watchdog_timeo = 5*HZ;
@@ -2019,7 +2019,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
/* send it up */
if ((bd_flags & BD_FLG_VLAN_TAG))
- __vlan_hwaccel_put_tag(skb, retdesc->vlan);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
netif_rx(skb);
dev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 6e722dc37db7..65926a956575 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -318,8 +318,6 @@ static int lance_rx (struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
- printk ("%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3789affbc0e5..0866e7627433 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -293,7 +293,6 @@ static int lance_rx(struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
- netdev_warn(dev, "Memory squeeze, deferring packet\n");
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 60e2b701afe7..9793767996a2 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -528,7 +528,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
dev->stats.rx_packets++;
} else {
am_writeword (dev, hdraddr + 2, RMD_OWN);
- printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
break;
}
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 42d4e6ad58a5..8e6b665a6726 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -793,7 +793,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
#if AMD8111E_VLAN_TAG_USED
if (vtag == TT_VLAN_TAGGED){
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
- __vlan_hwaccel_put_tag(skb, vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
#endif
netif_receive_skb(skb);
@@ -1869,7 +1869,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
#if AMD8111E_VLAN_TAG_USED
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
#endif
lp = netdev_priv(dev);
@@ -1907,7 +1907,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
#if AMD8111E_VLAN_TAG_USED
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
#endif
/* Probe the external PHY */
amd8111e_probe_ext_phy(dev);
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 98f4522fd17b..c178eb4c8166 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,6 @@ static int ariadne_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- netdev_warn(dev, "Memory squeeze, deferring packet\n");
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 84219df72f51..e8d0ef508f48 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -996,8 +996,6 @@ static int lance_rx( struct net_device *dev )
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
- dev->name ));
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
@@ -1149,9 +1147,7 @@ static struct net_device *atarilance_dev;
static int __init atarilance_module_init(void)
{
atarilance_dev = atarilance_probe(-1);
- if (IS_ERR(atarilance_dev))
- return PTR_ERR(atarilance_dev);
- return 0;
+ return PTR_RET(atarilance_dev);
}
static void __exit atarilance_module_exit(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index de774d419144..688aede742c7 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -727,7 +727,6 @@ static int au1000_rx(struct net_device *dev)
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
if (skb == NULL) {
- netdev_err(dev, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
continue;
}
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index baca0bd1b393..3d86ffeb4e15 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -607,8 +607,6 @@ static int lance_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == 0) {
- printk("%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, rmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 9af3c307862c..a51497c9d2af 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -188,9 +188,7 @@ static struct net_device *dev_mvme147_lance;
int __init init_module(void)
{
dev_mvme147_lance = mvme147lance_probe(-1);
- if (IS_ERR(dev_mvme147_lance))
- return PTR_ERR(dev_mvme147_lance);
- return 0;
+ return PTR_RET(dev_mvme147_lance);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 013b65108536..26fc0ce0faa3 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
int __init init_module(void)
{
dev_ni65 = ni65_probe(-1);
- return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
+ return PTR_RET(dev_ni65);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 9f59bf63514b..d4ed89130c52 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -1508,16 +1508,4 @@ static struct pcmcia_driver nmclan_cs_driver = {
.suspend = nmclan_suspend,
.resume = nmclan_resume,
};
-
-static int __init init_nmclan_cs(void)
-{
- return pcmcia_register_driver(&nmclan_cs_driver);
-}
-
-static void __exit exit_nmclan_cs(void)
-{
- pcmcia_unregister_driver(&nmclan_cs_driver);
-}
-
-module_init(init_nmclan_cs);
-module_exit(exit_nmclan_cs);
+module_pcmcia_driver(nmclan_cs_driver);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 797f847edf13..ed2130727643 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1166,7 +1166,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
if (skb == NULL) {
- netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 74b3891b6483..4375abe61da1 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -812,9 +812,6 @@ static int lance_rx( struct net_device *dev )
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
- dev->name ));
-
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
@@ -943,9 +940,7 @@ static struct net_device *sun3lance_dev;
int __init init_module(void)
{
sun3lance_dev = sun3lance_probe(-1);
- if (IS_ERR(sun3lance_dev))
- return PTR_ERR(sun3lance_dev);
- return 0;
+ return PTR_RET(sun3lance_dev);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 6a40290d3727..f47b780892e9 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -536,8 +536,6 @@ static void lance_rx_dvma(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -708,8 +706,6 @@ static void lance_rx_pio(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1377,10 +1373,9 @@ static int sparc_lance_probe_one(struct platform_device *op,
dma_alloc_coherent(&op->dev,
sizeof(struct lance_init_block),
&lp->init_block_dvma, GFP_ATOMIC);
- if (!lp->init_block_mem) {
- printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
+ if (!lp->init_block_mem)
goto fail;
- }
+
lp->pio_buffer = 0;
lp->init_ring = lance_init_ring_dvma;
lp->rx = lance_rx_dvma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index a206779c68cf..4ce8ceb62205 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -386,20 +386,16 @@ static int mace_open(struct net_device *dev)
/* Allocate the DMA ring buffers */
mp->tx_ring = dma_alloc_coherent(mp->device,
- N_TX_RING * MACE_BUFF_SIZE,
- &mp->tx_ring_phys, GFP_KERNEL);
- if (mp->tx_ring == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
+ N_TX_RING * MACE_BUFF_SIZE,
+ &mp->tx_ring_phys, GFP_KERNEL);
+ if (mp->tx_ring == NULL)
goto out1;
- }
mp->rx_ring = dma_alloc_coherent(mp->device,
- N_RX_RING * MACE_BUFF_SIZE,
- &mp->rx_ring_phys, GFP_KERNEL);
- if (mp->rx_ring == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
+ N_RX_RING * MACE_BUFF_SIZE,
+ &mp->rx_ring_phys, GFP_KERNEL);
+ if (mp->rx_ring == NULL)
goto out2;
- }
mace_dma_off(dev);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 21e261ffbe10..3ef7092e3f1c 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -810,7 +810,7 @@ int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
if (wufc & AT_WUFC_LNKC) {
wol_ctrl |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
- dev_dbg(&pdev->dev, "%s: write phy MII_IER faild.\n",
+ dev_dbg(&pdev->dev, "%s: write phy MII_IER failed.\n",
atl1c_driver_name);
}
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1f07fc633ab9..0ba900762b13 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -417,7 +417,7 @@ static void atl1c_set_multi(struct net_device *netdev)
static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */
*mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
} else {
@@ -494,10 +494,10 @@ static netdev_features_t atl1c_fix_features(struct net_device *netdev,
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
if (netdev->mtu > MAX_TSO_FRAME_SIZE)
features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
@@ -510,7 +510,7 @@ static int atl1c_set_features(struct net_device *netdev,
{
netdev_features_t changed = netdev->features ^ features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl1c_vlan_mode(netdev, features);
return 0;
@@ -1809,7 +1809,7 @@ rrs_checked:
AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
vlan = le16_to_cpu(vlan);
- __vlan_hwaccel_put_tag(skb, vlan);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
}
netif_receive_skb(skb);
@@ -2475,13 +2475,13 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
atl1c_set_ethtool_ops(netdev);
/* TODO: add when ready */
- netdev->hw_features = NETIF_F_SG |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_TSO |
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_TSO |
NETIF_F_TSO6;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_TX;
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_CTAG_TX;
return 0;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 829b5ad71d0d..b5fd934585e9 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
/* how about 0x2000 */
#define MAX_TX_BUF_LEN 0x2000
#define MAX_TX_BUF_SHIFT 13
-/*#define MAX_TX_BUF_LEN 0x3000 */
+#define MAX_TSO_SEG_SIZE 0x3c00
/* rrs word 1 bit 0:31 */
#define RRS_RX_CSUM_MASK 0xFFFF
@@ -438,7 +438,6 @@ struct atl1e_adapter {
struct atl1e_hw hw;
struct atl1e_hw_stats hw_stats;
- bool have_msi;
u32 wol;
u16 link_speed;
u16 link_duplex;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 92f4734f860d..0688bb82b442 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -315,7 +315,7 @@ static void atl1e_set_multi(struct net_device *netdev)
static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */
*mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
} else {
@@ -378,10 +378,10 @@ static netdev_features_t atl1e_fix_features(struct net_device *netdev,
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -391,7 +391,7 @@ static int atl1e_set_features(struct net_device *netdev,
{
netdev_features_t changed = netdev->features ^ features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl1e_vlan_mode(netdev, features);
return 0;
@@ -1420,11 +1420,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
RRS_PKT_SIZE_MASK) - 4; /* CRC */
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
- if (skb == NULL) {
- netdev_warn(netdev,
- "Memory squeeze, deferring packet\n");
+ if (skb == NULL)
goto skip_pkt;
- }
+
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
skb_put(skb, packet_size);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1437,7 +1435,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
netdev_dbg(netdev,
"RXD VLAN TAG<RRD>=0x%04x\n",
prrs->vtag);
- __vlan_hwaccel_put_tag(skb, vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
netif_receive_skb(skb);
@@ -1849,34 +1847,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
struct net_device *netdev = adapter->netdev;
free_irq(adapter->pdev->irq, netdev);
-
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
}
static int atl1e_request_irq(struct atl1e_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
- int flags = 0;
int err = 0;
- adapter->have_msi = true;
- err = pci_enable_msi(pdev);
- if (err) {
- netdev_dbg(netdev,
- "Unable to allocate MSI interrupt Error: %d\n", err);
- adapter->have_msi = false;
- }
-
- if (!adapter->have_msi)
- flags |= IRQF_SHARED;
- err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
+ err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
+ netdev);
if (err) {
netdev_dbg(adapter->netdev,
"Unable to allocate interrupt Error: %d\n", err);
- if (adapter->have_msi)
- pci_disable_msi(pdev);
return err;
}
netdev_dbg(netdev, "atl1e_request_irq OK\n");
@@ -2215,9 +2198,9 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
atl1e_set_ethtool_ops(netdev);
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
- NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features | NETIF_F_LLTX |
- NETIF_F_HW_VLAN_TX;
+ NETIF_F_HW_VLAN_CTAG_TX;
return 0;
}
@@ -2344,6 +2327,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
+ netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
err = register_netdev(netdev);
if (err) {
netdev_err(netdev, "register netdevice failed\n");
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5b0d9931c720..fa0915f3999b 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2024,7 +2024,7 @@ rrd_ok:
((rrd->vlan_tag & 7) << 13) |
((rrd->vlan_tag & 8) << 9);
- __vlan_hwaccel_put_tag(skb, vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
netif_receive_skb(skb);
@@ -2774,7 +2774,7 @@ static int atl1_close(struct net_device *netdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atl1_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2876,23 +2876,18 @@ static int atl1_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
-#define ATL1_PM_OPS (&atl1_pm_ops)
-
-#else
-
-static int atl1_suspend(struct device *dev) { return 0; }
-
-#define ATL1_PM_OPS NULL
-#endif
static void atl1_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM_SLEEP
atl1_suspend(&pdev->dev);
+#endif
pci_wake_from_d3(pdev, adapter->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
@@ -3023,10 +3018,10 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features = NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SG;
- netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+ netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO |
- NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_RX;
/* is this valid? see atl1_setup_mac_ctrl() */
netdev->features |= NETIF_F_RXCSUM;
@@ -3147,7 +3142,7 @@ static struct pci_driver atl1_driver = {
.probe = atl1_probe,
.remove = atl1_remove,
.shutdown = atl1_shutdown,
- .driver.pm = ATL1_PM_OPS,
+ .driver.pm = &atl1_pm_ops,
};
/**
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1278b47022e0..265ce1b752ed 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -363,7 +363,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
{
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */
*ctrl |= MAC_CTRL_RMV_VLAN;
} else {
@@ -399,10 +399,10 @@ static netdev_features_t atl2_fix_features(struct net_device *netdev,
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -412,7 +412,7 @@ static int atl2_set_features(struct net_device *netdev,
{
netdev_features_t changed = netdev->features ^ features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atl2_vlan_mode(netdev, features);
return 0;
@@ -437,9 +437,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
/* alloc new buffer */
skb = netdev_alloc_skb_ip_align(netdev, rx_size);
if (NULL == skb) {
- printk(KERN_WARNING
- "%s: Mem squeeze, deferring packet.\n",
- netdev->name);
/*
* Check that some rx space is free. If not,
* free one and mark stats->rx_dropped++.
@@ -455,7 +452,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
((rxd->status.vtag&7) << 13) |
((rxd->status.vtag&8) << 9);
- __vlan_hwaccel_put_tag(skb, vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
netif_rx(skb);
netdev->stats.rx_bytes += rx_size;
@@ -890,7 +887,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
skb->len-copy_len);
offset = ((u32)(skb->len-copy_len + 3) & ~3);
}
-#ifdef NETIF_F_HW_VLAN_TX
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
if (vlan_tx_tag_present(skb)) {
u16 vlan_tag = vlan_tx_tag_get(skb);
vlan_tag = (vlan_tag << 4) |
@@ -1416,8 +1413,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -EIO;
- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_RX;
- netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */
atl2_phy_init(&adapter->hw);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index f82eb1699464..46a622cceee4 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -220,7 +220,7 @@ static void atlx_link_chg_task(struct work_struct *work)
static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
{
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */
*ctrl |= MAC_CTRL_RMV_VLAN;
} else {
@@ -257,10 +257,10 @@ static netdev_features_t atlx_fix_features(struct net_device *netdev,
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -270,7 +270,7 @@ static int atlx_set_features(struct net_device *netdev,
{
netdev_features_t changed = netdev->features ^ features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
atlx_vlan_mode(netdev, features);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 7d81e059e811..0b3e23ec37f7 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -862,27 +862,25 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
- memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
- memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
@@ -1619,7 +1617,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
const char *clk_name;
- unsigned int iomem_size;
int i, ret;
/* stop if shared driver failed, assume driver->probe will be
@@ -1644,17 +1641,12 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- iomem_size = resource_size(res_mem);
- if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
- ret = -EBUSY;
- goto out;
- }
-
- priv->base = ioremap(res_mem->start, iomem_size);
+ priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
if (priv->base == NULL) {
ret = -ENOMEM;
- goto out_release_mem;
+ goto out;
}
+
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
@@ -1674,9 +1666,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->mac_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
- goto out_unmap;
+ goto out;
}
- clk_enable(priv->mac_clk);
+ clk_prepare_enable(priv->mac_clk);
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1705,7 +1697,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->phy_clk = NULL;
goto out_put_clk_mac;
}
- clk_enable(priv->phy_clk);
+ clk_prepare_enable(priv->phy_clk);
}
/* do minimal hardware init to be able to probe mii bus */
@@ -1733,7 +1725,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
* if a slave is not present on hw */
bus->phy_mask = ~(1 << priv->phy_id);
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
if (!bus->irq) {
ret = -ENOMEM;
goto out_free_mdio;
@@ -1794,10 +1787,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
return 0;
out_unregister_mdio:
- if (priv->mii_bus) {
+ if (priv->mii_bus)
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
- }
out_free_mdio:
if (priv->mii_bus)
@@ -1807,19 +1798,13 @@ out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
out_put_clk_mac:
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
-
-out_unmap:
- iounmap(priv->base);
-
-out_release_mem:
- release_mem_region(res_mem->start, iomem_size);
out:
free_netdev(dev);
return ret;
@@ -1833,7 +1818,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
- struct resource *res;
/* stop netdevice */
dev = platform_get_drvdata(pdev);
@@ -1845,7 +1829,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
if (priv->has_phy) {
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
} else {
struct bcm63xx_enet_platform_data *pd;
@@ -1856,17 +1839,12 @@ static int bcm_enet_remove(struct platform_device *pdev)
bcm_enet_mdio_write_mii);
}
- /* release device resources */
- iounmap(priv->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
/* disable hw block clocks */
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
platform_set_drvdata(pdev, NULL);
@@ -1889,31 +1867,20 @@ struct platform_driver bcm63xx_enet_driver = {
static int bcm_enet_shared_probe(struct platform_device *pdev)
{
struct resource *res;
- unsigned int iomem_size;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- iomem_size = resource_size(res);
- if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
- return -EBUSY;
-
- bcm_enet_shared_base = ioremap(res->start, iomem_size);
- if (!bcm_enet_shared_base) {
- release_mem_region(res->start, iomem_size);
+ bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!bcm_enet_shared_base)
return -ENOMEM;
- }
+
return 0;
}
static int bcm_enet_shared_remove(struct platform_device *pdev)
{
- struct resource *res;
-
- iounmap(bcm_enet_shared_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index da5f4397f87c..eec0af45b859 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <bcm47xx_nvram.h>
@@ -244,10 +245,8 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
/* Alloc skb */
slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
- if (!slot->skb) {
- bgmac_err(bgmac, "Allocation of skb failed!\n");
+ if (!slot->skb)
return -ENOMEM;
- }
/* Poison - if everything goes fine, hardware will overwrite it */
rx = (struct bgmac_rx_header *)slot->skb->data;
@@ -1313,6 +1312,73 @@ static const struct ethtool_ops bgmac_ethtool_ops = {
};
/**************************************************
+ * MII
+ **************************************************/
+
+static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ return bgmac_phy_read(bus->priv, mii_id, regnum);
+}
+
+static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ return bgmac_phy_write(bus->priv, mii_id, regnum, value);
+}
+
+static int bgmac_mii_register(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus;
+ int i, err = 0;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "bgmac mii bus";
+ sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
+ bgmac->core->core_unit);
+ mii_bus->priv = bgmac;
+ mii_bus->read = bgmac_mii_read;
+ mii_bus->write = bgmac_mii_write;
+ mii_bus->parent = &bgmac->core->dev;
+ mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
+
+ mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+ if (!mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_free_bus;
+ }
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ bgmac_err(bgmac, "Registration of mii bus failed\n");
+ goto err_free_irq;
+ }
+
+ bgmac->mii_bus = mii_bus;
+
+ return err;
+
+err_free_irq:
+ kfree(mii_bus->irq);
+err_free_bus:
+ mdiobus_free(mii_bus);
+ return err;
+}
+
+static void bgmac_mii_unregister(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus = bgmac->mii_bus;
+
+ mdiobus_unregister(mii_bus);
+ kfree(mii_bus->irq);
+ mdiobus_free(mii_bus);
+}
+
+/**************************************************
* BCMA bus ops
**************************************************/
@@ -1404,11 +1470,18 @@ static int bgmac_probe(struct bcma_device *core)
if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+ err = bgmac_mii_register(bgmac);
+ if (err) {
+ bgmac_err(bgmac, "Cannot register MDIO\n");
+ err = -ENOTSUPP;
+ goto err_dma_free;
+ }
+
err = register_netdev(bgmac->net_dev);
if (err) {
bgmac_err(bgmac, "Cannot register net device\n");
err = -ENOTSUPP;
- goto err_dma_free;
+ goto err_mii_unregister;
}
netif_carrier_off(net_dev);
@@ -1417,6 +1490,8 @@ static int bgmac_probe(struct bcma_device *core)
return 0;
+err_mii_unregister:
+ bgmac_mii_unregister(bgmac);
err_dma_free:
bgmac_dma_free(bgmac);
@@ -1433,6 +1508,7 @@ static void bgmac_remove(struct bcma_device *core)
netif_napi_del(&bgmac->napi);
unregister_netdev(bgmac->net_dev);
+ bgmac_mii_unregister(bgmac);
bgmac_dma_free(bgmac);
bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4ede614c81f8..98d4b5fcc070 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -399,6 +399,7 @@ struct bgmac {
struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
struct net_device *net_dev;
struct napi_struct napi;
+ struct mii_bus *mii_bus;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2f0ba8f2fd6c..5d204492c603 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -416,7 +416,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
return 0;
}
-struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
@@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp)
sizeof(struct statistics_block);
status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
+ &bp->status_blk_mapping,
+ GFP_KERNEL | __GFP_ZERO);
if (status_blk == NULL)
goto alloc_mem_err;
- memset(status_blk, 0, bp->status_stats_size);
-
bnapi = &bp->bnx2_napi[0];
bnapi->status_blk.msi = status_blk;
bnapi->hw_tx_cons_ptr =
@@ -3212,7 +3211,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
- __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
skb->protocol = eth_type_trans(skb, bp->dev);
@@ -3554,7 +3553,7 @@ bnx2_set_rx_mode(struct net_device *dev)
rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
- if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
+ if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
if (dev->flags & IFF_PROMISC) {
@@ -7696,7 +7695,7 @@ bnx2_fix_features(struct net_device *dev, netdev_features_t features)
struct bnx2 *bp = netdev_priv(dev);
if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
- features |= NETIF_F_HW_VLAN_RX;
+ features |= NETIF_F_HW_VLAN_CTAG_RX;
return features;
}
@@ -7707,12 +7706,12 @@ bnx2_set_features(struct net_device *dev, netdev_features_t features)
struct bnx2 *bp = netdev_priv(dev);
/* TSO with VLAN tag won't work with current firmware */
- if (features & NETIF_F_HW_VLAN_TX)
+ if (features & NETIF_F_HW_VLAN_CTAG_TX)
dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
else
dev->vlan_features &= ~NETIF_F_ALL_TSO;
- if ((!!(features & NETIF_F_HW_VLAN_RX) !=
+ if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
!!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
netif_running(dev)) {
bnx2_netif_stop(bp, false);
@@ -8552,7 +8551,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
dev->vlan_features = dev->hw_features;
- dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= dev->hw_features;
dev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e4605a965084..3dba2a70a00e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -26,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.02-0"
-#define DRV_MODULE_RELDATE "2013/01/14"
+#define DRV_MODULE_VERSION "1.78.17-0"
+#define DRV_MODULE_RELDATE "2013/04/11"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -492,7 +492,6 @@ enum bnx2x_tpa_mode_t {
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
-#define BNX2X_NAPI_WEIGHT 128
struct napi_struct napi;
union host_hc_status_block status_blk;
/* chip independed shortcuts into sb structure */
@@ -613,9 +612,10 @@ struct bnx2x_fastpath {
* START_BD - describes packed
* START_BD(splitted) - includes unpaged data segment for GSO
* PARSING_BD - for TSO and CSUM data
+ * PARSING_BD2 - for encapsulation data
* Frag BDs - decribes pages for frags
*/
-#define BDS_PER_TX_PKT 3
+#define BDS_PER_TX_PKT 4
#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
/* max BDs per tx packet including next pages */
#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
@@ -730,18 +730,24 @@ struct bnx2x_fastpath {
#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
skb->csum_offset))
-#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
-#define XMIT_PLAIN 0
-#define XMIT_CSUM_V4 0x1
-#define XMIT_CSUM_V6 0x2
-#define XMIT_CSUM_TCP 0x4
-#define XMIT_GSO_V4 0x8
-#define XMIT_GSO_V6 0x10
+#define XMIT_PLAIN 0
+#define XMIT_CSUM_V4 (1 << 0)
+#define XMIT_CSUM_V6 (1 << 1)
+#define XMIT_CSUM_TCP (1 << 2)
+#define XMIT_GSO_V4 (1 << 3)
+#define XMIT_GSO_V6 (1 << 4)
+#define XMIT_CSUM_ENC_V4 (1 << 5)
+#define XMIT_CSUM_ENC_V6 (1 << 6)
+#define XMIT_GSO_ENC_V4 (1 << 7)
+#define XMIT_GSO_ENC_V6 (1 << 8)
-#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
-#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
+#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
+#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
+#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
+#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
/* stuff added to make the code fit 80Col */
#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
@@ -844,6 +850,9 @@ struct bnx2x_common {
#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF)
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
+#define CHIP_IS_57811xx(bp) (CHIP_IS_57811(bp) || \
+ CHIP_IS_57811_MF(bp) || \
+ CHIP_IS_57811_VF(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
CHIP_IS_57712_MF(bp) || \
CHIP_IS_57712_VF(bp))
@@ -853,9 +862,7 @@ struct bnx2x_common {
CHIP_IS_57810(bp) || \
CHIP_IS_57810_MF(bp) || \
CHIP_IS_57810_VF(bp) || \
- CHIP_IS_57811(bp) || \
- CHIP_IS_57811_MF(bp) || \
- CHIP_IS_57811_VF(bp) || \
+ CHIP_IS_57811xx(bp) || \
CHIP_IS_57840(bp) || \
CHIP_IS_57840_MF(bp) || \
CHIP_IS_57840_VF(bp))
@@ -1215,14 +1222,16 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_HYPERVISOR_VLAN,
};
struct bnx2x_prev_path_list {
+ struct list_head list;
u8 bus;
u8 slot;
u8 path;
- struct list_head list;
+ u8 aer;
u8 undi;
};
@@ -1269,6 +1278,8 @@ struct bnx2x {
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
#ifdef CONFIG_BNX2X_SRIOV
+ /* protects vf2pf mailbox from simultaneous access */
+ struct mutex vf2pf_mutex;
/* vf pf channel mailbox contains request and response buffers */
struct bnx2x_vf_mbx_msg *vf2pf_mbox;
dma_addr_t vf2pf_mbox_mapping;
@@ -1281,6 +1292,8 @@ struct bnx2x {
dma_addr_t pf2vf_bulletin_mapping;
struct pf_vf_bulletin_content old_bulletin;
+
+ u16 requested_nr_virtfn;
#endif /* CONFIG_BNX2X_SRIOV */
struct net_device *dev;
@@ -1944,12 +1957,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x) \
- memset(x, 0, size); \
- } while (0)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
@@ -2286,7 +2296,7 @@ static const u32 dmae_reg_go_c[] = {
DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
#define BNX2X_MF_SD_PROTOCOL(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4046f97378c2..b8fbe266ab68 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -451,7 +451,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
* Compute number of aggregated segments, and gso_type.
*/
static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
- u16 len_on_bd, unsigned int pkt_len)
+ u16 len_on_bd, unsigned int pkt_len,
+ u16 num_of_coalesced_segs)
{
/* TPA aggregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers.
@@ -480,8 +481,7 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
* to skb_shinfo(skb)->gso_segs
*/
- NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
- skb_shinfo(skb)->gso_size);
+ NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -537,7 +537,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
- le16_to_cpu(cqe->pkt_len));
+ le16_to_cpu(cqe->pkt_len),
+ le16_to_cpu(cqe->num_of_coalesced_segs));
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@ -641,6 +642,14 @@ static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
&iph->saddr, &iph->daddr, 0);
}
+
+static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
+ void (*gro_func)(struct bnx2x*, struct sk_buff*))
+{
+ skb_set_network_header(skb, 0);
+ gro_func(bp, skb);
+ tcp_gro_complete(skb);
+}
#endif
static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -648,19 +657,17 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
{
#ifdef CONFIG_INET
if (skb_shinfo(skb)->gso_size) {
- skb_set_network_header(skb, 0);
switch (be16_to_cpu(skb->protocol)) {
case ETH_P_IP:
- bnx2x_gro_ip_csum(bp, skb);
+ bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
break;
case ETH_P_IPV6:
- bnx2x_gro_ipv6_csum(bp, skb);
+ bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
break;
default:
- BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
be16_to_cpu(skb->protocol));
}
- tcp_gro_complete(skb);
}
#endif
napi_gro_receive(&fp->napi, skb);
@@ -718,7 +725,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
- __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
bnx2x_gro_receive(bp, fp, skb);
} else {
DP(NETIF_MSG_RX_STATUS,
@@ -993,7 +1000,7 @@ reuse_rx:
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN)
- __vlan_hwaccel_put_tag(skb,
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(cqe_fp->vlan_tag));
napi_gro_receive(&fp->napi, skb);
@@ -1037,6 +1044,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
DP(NETIF_MSG_INTR,
"got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
fp->index, fp->fw_sb_id, fp->igu_sb_id);
+
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
#ifdef BNX2X_STOP_ON_ERROR
@@ -1718,7 +1726,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
}
-static int bnx2x_setup_irqs(struct bnx2x *bp)
+int bnx2x_setup_irqs(struct bnx2x *bp)
{
int rc = 0;
if (bp->flags & USING_MSIX_FLAG &&
@@ -2009,7 +2017,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
* Cleans the object that have internal lists without sending
* ramrods. Should be run when interrutps are disabled.
*/
-static void bnx2x_squeeze_objects(struct bnx2x *bp)
+void bnx2x_squeeze_objects(struct bnx2x *bp)
{
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@ -2574,6 +2582,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
+ bnx2x_pre_irq_nic_init(bp);
+
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
@@ -2583,11 +2593,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error2);
}
- /* Setup NIC internals and enable interrupts */
- bnx2x_nic_init(bp, load_code);
-
/* Init per-function objects */
if (IS_PF(bp)) {
+ /* Setup NIC internals and enable interrupts */
+ bnx2x_post_irq_nic_init(bp, load_code);
+
bnx2x_init_bp_objs(bp);
bnx2x_iov_nic_init(bp);
@@ -2614,6 +2624,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
+ /* initialize FW coalescing state machines in RAM */
+ bnx2x_update_coalesce(bp);
+
/* setup the leading queue */
rc = bnx2x_setup_leading(bp);
if (rc) {
@@ -2654,7 +2667,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (IS_PF(bp))
rc = bnx2x_set_eth_mac(bp, true);
else /* vf */
- rc = bnx2x_vfpf_set_mac(bp);
+ rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
+ true);
if (rc) {
BNX2X_ERR("Setting Ethernet MAC failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
@@ -2774,7 +2788,7 @@ load_error0:
#endif /* ! BNX2X_STOP_ON_ERROR */
}
-static int bnx2x_drain_tx_queues(struct bnx2x *bp)
+int bnx2x_drain_tx_queues(struct bnx2x *bp)
{
u8 rc = 0, cos, i;
@@ -2923,9 +2937,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bnx2x_free_fp_mem_cnic(bp);
if (IS_PF(bp)) {
- bnx2x_free_mem(bp);
if (CNIC_LOADED(bp))
bnx2x_free_mem_cnic(bp);
+ bnx2x_free_mem(bp);
}
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
@@ -3086,11 +3100,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
*/
-static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata,
- struct sw_tx_bd *tx_buf,
- struct eth_tx_start_bd **tx_bd, u16 hlen,
- u16 bd_prod, int nbd)
+static u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+ struct sw_tx_bd *tx_buf,
+ struct eth_tx_start_bd **tx_bd, u16 hlen,
+ u16 bd_prod)
{
struct eth_tx_start_bd *h_tx_bd = *tx_bd;
struct eth_tx_bd *d_tx_bd;
@@ -3098,11 +3112,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
int old_len = le16_to_cpu(h_tx_bd->nbytes);
/* first fix first BD */
- h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
- h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
/* now get a new data BD
* (after the pbd) and fill it */
@@ -3131,7 +3144,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
-static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{
__sum16 tsum = (__force __sum16) csum;
@@ -3146,30 +3159,47 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
return bswab16(tsum);
}
-static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
{
u32 rc;
+ __u8 prot = 0;
+ __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- rc = XMIT_PLAIN;
+ return XMIT_PLAIN;
- else {
- if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
- rc = XMIT_CSUM_V6;
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- rc |= XMIT_CSUM_TCP;
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IPV6)) {
+ rc = XMIT_CSUM_V6;
+ prot = ipv6_hdr(skb)->nexthdr;
+ } else {
+ rc = XMIT_CSUM_V4;
+ prot = ip_hdr(skb)->protocol;
+ }
+ if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ rc |= XMIT_CSUM_ENC_V6;
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
} else {
- rc = XMIT_CSUM_V4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ rc |= XMIT_CSUM_ENC_V4;
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
}
}
+ if (prot == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb))
- rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
- else if (skb_is_gso(skb))
- rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else if (skb_is_gso(skb)) {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
return rc;
}
@@ -3254,14 +3284,23 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
+ struct ipv6hdr *ipv6;
+
*parsing_data |= (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
- if ((xmit_type & XMIT_GSO_V6) &&
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+
+ if (xmit_type & XMIT_GSO_ENC_V6)
+ ipv6 = inner_ipv6_hdr(skb);
+ else if (xmit_type & XMIT_GSO_V6)
+ ipv6 = ipv6_hdr(skb);
+ else
+ ipv6 = NULL;
+
+ if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
@@ -3272,13 +3311,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
* @pbd: parse BD
* @xmit_type: xmit flags
*/
-static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
- pbd->tcp_flags = pbd_tcp_flags(skb);
+ pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) {
pbd->ip_id = bswab16(ip_hdr(skb)->id);
@@ -3298,6 +3337,40 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
}
/**
+ * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712/578xx related, when skb has encapsulation
+ */
+static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data;
+ }
+
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_inner_transport_header(skb) +
+ sizeof(struct udphdr) - skb->data;
+}
+
+/**
* bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
*
* @bp: driver handle
@@ -3305,15 +3378,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @parsing_data: data to be updated
* @xmit_type: xmit flags
*
- * 57712 related
+ * 57712/578xx related
*/
-static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- u32 *parsing_data, u32 xmit_type)
+static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3328,17 +3401,15 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
}
-static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+/* set FW indication according to inner or outer protocols if tunneled */
+static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd,
+ u32 xmit_type)
{
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
- if (xmit_type & XMIT_CSUM_V4)
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IP_CSUM;
- else
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IPV6;
+ if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
if (!(xmit_type & XMIT_CSUM_TCP))
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
@@ -3352,9 +3423,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
* @pbd: parse BD to be updated
* @xmit_type: xmit flags
*/
-static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
@@ -3400,6 +3471,75 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
return hlen;
}
+static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e2 *pbd_e2,
+ struct eth_tx_parse_2nd_bd *pbd2,
+ u16 *global_data,
+ u32 xmit_type)
+{
+ u16 hlen_w = 0;
+ u8 outerip_off, outerip_len = 0;
+ /* from outer IP to transport */
+ hlen_w = (skb_inner_transport_header(skb) -
+ skb_network_header(skb)) >> 1;
+
+ /* transport len */
+ if (xmit_type & XMIT_CSUM_TCP)
+ hlen_w += inner_tcp_hdrlen(skb) >> 1;
+ else
+ hlen_w += sizeof(struct udphdr) >> 1;
+
+ pbd2->fw_ip_hdr_to_payload_w = hlen_w;
+
+ if (xmit_type & XMIT_CSUM_ENC_V4) {
+ struct iphdr *iph = ip_hdr(skb);
+ pbd2->fw_ip_csum_wo_len_flags_frag =
+ bswab16(csum_fold((~iph->check) -
+ iph->tot_len - iph->frag_off));
+ } else {
+ pbd2->fw_ip_hdr_to_payload_w =
+ hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+ }
+
+ pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
+
+ pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
+
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_tcpudp_magic(
+ inner_ip_hdr(skb)->saddr,
+ inner_ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ outerip_len = ip_hdr(skb)->ihl << 1;
+ } else {
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_ipv6_magic(
+ &inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ }
+
+ outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+
+ *global_data |=
+ outerip_off |
+ (!!(xmit_type & XMIT_CSUM_V6) <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
+ (outerip_len <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
+
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
+ pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
+ }
+}
+
/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
@@ -3415,6 +3555,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ struct eth_tx_parse_2nd_bd *pbd2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, txq_index;
@@ -3482,7 +3623,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mac_type = MULTICAST_ADDRESS;
}
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
(there will be no violation to FW restrictions) */
@@ -3530,12 +3671,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- SET_FLAG(tx_start_bd->general_data,
- ETH_TX_START_BD_PARSE_NBDS,
- 0);
- /* header nbd */
- SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+ /* header nbd: indirectly zero other flags! */
+ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
/* remember the first BD of the packet */
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -3555,19 +3693,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
-#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp)) {
-#endif
+ if (IS_VF(bp))
tx_start_bd->vlan_or_ethertype =
cpu_to_le16(ntohs(eth->h_proto));
-#ifndef BNX2X_STOP_ON_ERROR
- } else {
+ else
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
- }
-#endif
}
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@ -3577,23 +3712,58 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!CHIP_IS_E1x(bp)) {
pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
- /* Set PBD in checksum offload case */
- if (xmit_type & XMIT_CSUM)
+
+ if (xmit_type & XMIT_CSUM_ENC) {
+ u16 global_data = 0;
+
+ /* Set PBD in enc checksum offload case */
+ hlen = bnx2x_set_pbd_csum_enc(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+
+ /* turn on 2nd parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
+
+ memset(pbd2, 0, sizeof(*pbd2));
+
+ pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
+ (skb_inner_network_header(skb) -
+ skb->data) >> 1;
+
+ if (xmit_type & XMIT_GSO_ENC)
+ bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
+ &global_data,
+ xmit_type);
+
+ pbd2->global_data = cpu_to_le16(global_data);
+
+ /* add addition parse BD indication to start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS, 1);
+ /* set encapsulation flag in start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_TUNNEL_EXIST, 1);
+ nbd++;
+ } else if (xmit_type & XMIT_CSUM) {
+ /* Set PBD in checksum offload case w/o encapsulation */
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
+ }
- if (IS_MF_SI(bp) || IS_VF(bp)) {
- /* fill in the MAC addresses in the PBD - for local
- * switching
- */
- bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
- &pbd_e2->src_mac_addr_mid,
- &pbd_e2->src_mac_addr_lo,
+ /* Add the macs to the parsing BD this is a vf */
+ if (IS_VF(bp)) {
+ /* override GRE parameters in BD */
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ &pbd_e2->data.mac_addr.src_mid,
+ &pbd_e2->data.mac_addr.src_lo,
eth->h_source);
- bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
- &pbd_e2->dst_mac_addr_mid,
- &pbd_e2->dst_mac_addr_lo,
+
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
}
@@ -3615,14 +3785,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
DP(NETIF_MSG_TX_QUEUED,
- "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
+ "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
- le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+ le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield,
le16_to_cpu(tx_start_bd->vlan_or_ethertype));
@@ -3635,10 +3804,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
- if (unlikely(skb_headlen(skb) > hlen))
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ nbd++;
bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
&tx_start_bd, hlen,
- bd_prod, ++nbd);
+ bd_prod);
+ }
if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
xmit_type);
@@ -3728,9 +3899,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e2)
DP(NETIF_MSG_TX_QUEUED,
"PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
- pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
- pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
- pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ pbd_e2,
+ pbd_e2->data.mac_addr.dst_hi,
+ pbd_e2->data.mac_addr.dst_mid,
+ pbd_e2->data.mac_addr.dst_lo,
+ pbd_e2->data.mac_addr.src_hi,
+ pbd_e2->data.mac_addr.src_mid,
+ pbd_e2->data.mac_addr.src_lo,
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
@@ -4580,11 +4755,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
- u16 flags = REG_RD16(bp, addr);
+ u8 flags = REG_RD8(bp, addr);
/* clear and set */
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag;
- REG_WR16(bp, addr, flags);
+ REG_WR8(bp, addr, flags);
DP(NETIF_MSG_IFUP,
"port %x fw_sb_id %d sb_index %d disable %d\n",
port, fw_sb_id, sb_index, disable);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index aee7671ff4c1..151675d66b0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -50,13 +50,13 @@ extern int int_mode;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0, size); \
- } while (0)
+#define BNX2X_PCI_ALLOC(x, y, size) \
+do { \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO); \
+ if (x == NULL) \
+ goto alloc_mem_err; \
+} while (0)
#define BNX2X_ALLOC(x, size) \
do { \
@@ -295,16 +295,29 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
void bnx2x_nic_init_cnic(struct bnx2x *bp);
/**
- * bnx2x_nic_init - init driver internals.
+ * bnx2x_preirq_nic_init - init driver internals.
*
* @bp: driver handle
*
* Initializes:
- * - rings
+ * - fastpath object
+ * - fastpath rings
+ * etc.
+ */
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
+
+/**
+ * bnx2x_postirq_nic_init - init driver internals.
+ *
+ * @bp: driver handle
+ * @load_code: COMMON, PORT or FUNCTION
+ *
+ * Initializes:
* - status blocks
+ * - slowpath rings
* - etc.
*/
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
/**
* bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
*
@@ -496,7 +509,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int bnx2x_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi);
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
@@ -834,7 +850,7 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@ -844,7 +860,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
@@ -970,6 +986,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
+ start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
+
return bnx2x_func_state_change(bp, &func_params);
}
@@ -1396,4 +1415,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
*
*/
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
+
+int bnx2x_drain_tx_queues(struct bnx2x *bp);
+void bnx2x_squeeze_objects(struct bnx2x *bp);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 91ecd6a00d05..4b077a7f16af 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -591,7 +591,7 @@ static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
DCBX_READ_REMOTE_MIB);
if (rc) {
- BNX2X_ERR("Faild to read remote mib from FW\n");
+ BNX2X_ERR("Failed to read remote mib from FW\n");
return rc;
}
@@ -619,7 +619,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
DCBX_READ_LOCAL_MIB);
if (rc) {
- BNX2X_ERR("Faild to read local mib from FW\n");
+ BNX2X_ERR("Failed to read local mib from FW\n");
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index edfa67adf2f9..ce1a91618677 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1364,11 +1364,27 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
return rc;
}
+static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
+ int buf_size)
+{
+ int rc;
+
+ rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
+
+ if (!rc) {
+ __be32 *be = (__be32 *)buf;
+
+ while ((buf_size -= 4) >= 0)
+ *buf++ = be32_to_cpu(*be++);
+ }
+
+ return rc;
+}
+
static int bnx2x_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *eebuf)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc;
if (!netif_running(dev)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1383,9 +1399,7 @@ static int bnx2x_get_eeprom(struct net_device *dev,
/* parameters already validated in ethtool_get_eeprom */
- rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
-
- return rc;
+ return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
}
static int bnx2x_get_module_eeprom(struct net_device *dev,
@@ -1393,10 +1407,9 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
u8 *data)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc = 0, phy_idx;
+ int rc = -EINVAL, phy_idx;
u8 *user_data = data;
- int remaining_len = ee->len, xfer_size;
- unsigned int page_off = ee->offset;
+ unsigned int start_addr = ee->offset, xfer_size = 0;
if (!netif_running(dev)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1405,21 +1418,52 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
}
phy_idx = bnx2x_get_cur_phy_idx(bp);
- bnx2x_acquire_phy_lock(bp);
- while (!rc && remaining_len > 0) {
- xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
- SFP_EEPROM_PAGE_SIZE : remaining_len;
+
+ /* Read A0 section */
+ if (start_addr < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
+ xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
+ else
+ xfer_size = ee->len;
+ bnx2x_acquire_phy_lock(bp);
rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
&bp->link_params,
- page_off,
+ I2C_DEV_ADDR_A0,
+ start_addr,
xfer_size,
user_data);
- remaining_len -= xfer_size;
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
+
+ return -EINVAL;
+ }
user_data += xfer_size;
- page_off += xfer_size;
+ start_addr += xfer_size;
}
- bnx2x_release_phy_lock(bp);
+ /* Read A2 section */
+ if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
+ (start_addr < ETH_MODULE_SFF_8472_LEN)) {
+ xfer_size = ee->len - xfer_size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
+ xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A2,
+ start_addr,
+ xfer_size,
+ user_data);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
+ return -EINVAL;
+ }
+ }
return rc;
}
@@ -1427,24 +1471,50 @@ static int bnx2x_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct bnx2x *bp = netdev_priv(dev);
- int phy_idx;
+ int phy_idx, rc;
+ u8 sff8472_comp, diag_type;
+
if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return -EAGAIN;
}
-
phy_idx = bnx2x_get_cur_phy_idx(bp);
- switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFPP_10G_FIBER:
- case ETH_PHY_SFP_1G_FIBER:
- case ETH_PHY_DA_TWINAX:
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_SFF_8472_COMP_ADDR,
+ SFP_EEPROM_SFF_8472_COMP_SIZE,
+ &sff8472_comp);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_DIAG_TYPE_ADDR,
+ SFP_EEPROM_DIAG_TYPE_SIZE,
+ &diag_type);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
+ return -EINVAL;
+ }
+
+ if (!sff8472_comp ||
+ (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- return 0;
- default:
- return -EOPNOTSUPP;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
+ return 0;
}
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
@@ -1496,9 +1566,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
int buf_size)
{
int rc;
- u32 cmd_flags;
- u32 align_offset;
- __be32 val;
+ u32 cmd_flags, align_offset, val;
+ __be32 val_be;
if (offset + buf_size > bp->common.flash_size) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1517,16 +1586,16 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
align_offset = (offset & ~0x03);
- rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+ rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
if (rc == 0) {
- val &= ~(0xff << BYTE_OFFSET(offset));
- val |= (*data_buf << BYTE_OFFSET(offset));
-
/* nvram data is returned as an array of bytes
* convert it back to cpu order
*/
- val = be32_to_cpu(val);
+ val = be32_to_cpu(val_be);
+
+ val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset));
+ val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset));
rc = bnx2x_nvram_write_dword(bp, align_offset, val,
cmd_flags);
@@ -1820,12 +1889,15 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
bp->link_params.req_flow_ctrl[cfg_idx] =
BNX2X_FLOW_CTRL_AUTO;
}
- bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_NONE;
+ bp->link_params.req_fc_auto_adv = 0;
if (epause->rx_pause)
bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
if (epause->tx_pause)
bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
+
+ if (!bp->link_params.req_fc_auto_adv)
+ bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
}
DP(BNX2X_MSG_ETHTOOL,
@@ -2526,14 +2598,168 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
return rc;
}
+struct code_entry {
+ u32 sram_start_addr;
+ u32 code_attribute;
+#define CODE_IMAGE_TYPE_MASK 0xf0800003
+#define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003
+#define CODE_IMAGE_LENGTH_MASK 0x007ffffc
+#define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000
+ u32 nvm_start_addr;
+};
+
+#define CODE_ENTRY_MAX 16
+#define CODE_ENTRY_EXTENDED_DIR_IDX 15
+#define MAX_IMAGES_IN_EXTENDED_DIR 64
+#define NVRAM_DIR_OFFSET 0x14
+
+#define EXTENDED_DIR_EXISTS(code) \
+ ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
+ (code & CODE_IMAGE_LENGTH_MASK) != 0)
+
#define CRC32_RESIDUAL 0xdebb20e3
+#define CRC_BUFF_SIZE 256
+
+static int bnx2x_nvram_crc(struct bnx2x *bp,
+ int offset,
+ int size,
+ u8 *buff)
+{
+ u32 crc = ~0;
+ int rc = 0, done = 0;
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
+
+ while (done < size) {
+ int count = min_t(int, size - done, CRC_BUFF_SIZE);
+
+ rc = bnx2x_nvram_read(bp, offset + done, buff, count);
+
+ if (rc)
+ return rc;
+
+ crc = crc32_le(crc, buff, count);
+ done += count;
+ }
+
+ if (crc != CRC32_RESIDUAL)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int bnx2x_test_nvram_dir(struct bnx2x *bp,
+ struct code_entry *entry,
+ u8 *buff)
+{
+ size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
+ u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
+ int rc;
+
+ /* Zero-length images and AFEX profiles do not have CRC */
+ if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
+ return 0;
+
+ rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "image %x has failed crc test (rc %d)\n", type, rc);
+
+ return rc;
+}
+
+static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
+{
+ int rc;
+ struct code_entry entry;
+
+ rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
+ if (rc)
+ return rc;
+
+ return bnx2x_test_nvram_dir(bp, &entry, buff);
+}
+
+static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
+{
+ u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
+ struct code_entry entry;
+ int i;
+
+ rc = bnx2x_nvram_read32(bp,
+ dir_offset +
+ sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
+ (u32 *)&entry, sizeof(entry));
+ if (rc)
+ return rc;
+
+ if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
+ return 0;
+
+ rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
+ &cnt, sizeof(u32));
+ if (rc)
+ return rc;
+
+ dir_offset = entry.nvm_start_addr + 8;
+
+ for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
+ rc = bnx2x_test_dir_entry(bp, dir_offset +
+ sizeof(struct code_entry) * i,
+ buff);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
+{
+ u32 rc, dir_offset = NVRAM_DIR_OFFSET;
+ int i;
+
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
+
+ for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
+ rc = bnx2x_test_dir_entry(bp, dir_offset +
+ sizeof(struct code_entry) * i,
+ buff);
+ if (rc)
+ return rc;
+ }
+
+ return bnx2x_test_nvram_ext_dirs(bp, buff);
+}
+
+struct crc_pair {
+ int offset;
+ int size;
+};
+
+static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
+ const struct crc_pair *nvram_tbl, u8 *buf)
+{
+ int i;
+
+ for (i = 0; nvram_tbl[i].size; i++) {
+ int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
+ nvram_tbl[i].size, buf);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram_tbl[%d] has failed crc test (rc %d)\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
static int bnx2x_test_nvram(struct bnx2x *bp)
{
- static const struct {
- int offset;
- int size;
- } nvram_tbl[] = {
+ const struct crc_pair nvram_tbl[] = {
{ 0, 0x14 }, /* bootstrap */
{ 0x14, 0xec }, /* dir */
{ 0x100, 0x350 }, /* manuf_info */
@@ -2542,30 +2768,33 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x708, 0x70 }, /* manuf_key_info */
{ 0, 0 }
};
- __be32 *buf;
- u8 *data;
- int i, rc;
- u32 magic, crc;
+ const struct crc_pair nvram_tbl2[] = {
+ { 0x7e8, 0x350 }, /* manuf_info2 */
+ { 0xb38, 0xf0 }, /* feature_info */
+ { 0, 0 }
+ };
+
+ u8 *buf;
+ int rc;
+ u32 magic;
if (BP_NOMCP(bp))
return 0;
- buf = kmalloc(0x350, GFP_KERNEL);
+ buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
if (!buf) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
rc = -ENOMEM;
goto test_nvram_exit;
}
- data = (u8 *)buf;
- rc = bnx2x_nvram_read(bp, 0, data, 4);
+ rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
if (rc) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"magic value read (rc %d)\n", rc);
goto test_nvram_exit;
}
- magic = be32_to_cpu(buf[0]);
if (magic != 0x669955aa) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"wrong magic value (0x%08x)\n", magic);
@@ -2573,25 +2802,26 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
goto test_nvram_exit;
}
- for (i = 0; nvram_tbl[i].size; i++) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
+ rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
+ if (rc)
+ goto test_nvram_exit;
- rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
- nvram_tbl[i].size);
- if (rc) {
- DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
- "nvram_tbl[%d] read data (rc %d)\n", i, rc);
- goto test_nvram_exit;
- }
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
+ u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+ SHARED_HW_CFG_HIDE_PORT1;
- crc = ether_crc_le(nvram_tbl[i].size, data);
- if (crc != CRC32_RESIDUAL) {
+ if (!hide) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
- "nvram_tbl[%d] wrong crc value (0x%08x)\n", i, crc);
- rc = -ENODEV;
- goto test_nvram_exit;
+ "Port 1 CRC test-set\n");
+ rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
+ if (rc)
+ goto test_nvram_exit;
}
}
+ rc = bnx2x_test_nvram_dirs(bp, buf);
+
test_nvram_exit:
kfree(buf);
return rc;
@@ -3232,7 +3462,32 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev)
+static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
+ .get_settings = bnx2x_get_settings,
+ .set_settings = bnx2x_set_settings,
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .get_link = bnx2x_get_link,
+ .get_coalesce = bnx2x_get_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_strings = bnx2x_get_strings,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+};
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ if (IS_PF(bp))
+ SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ else /* vf */
+ SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index e5f808377c91..84aecdf06f7a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -30,31 +30,31 @@
* IRO[138].m2) + ((sbId) * IRO[138].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[316].base + ((pfId) * IRO[316].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[318].base + ((pfId) * IRO[318].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+ (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[315].base + ((pfId) * IRO[315].m1))
+ (IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[307].base + ((pfId) * IRO[307].m1))
+ (IRO[308].base + ((pfId) * IRO[308].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[306].base + ((pfId) * IRO[306].m1))
+ (IRO[307].base + ((pfId) * IRO[307].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
+ (IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -114,7 +114,7 @@
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
@@ -136,35 +136,32 @@
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1))
-#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
- (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
- IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[318].base + ((pfId) * IRO[318].m1))
+ (IRO[319].base + ((pfId) * IRO[319].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[282].base + ((pfId) * IRO[282].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[280].base + ((pfId) * IRO[280].m1))
+ (IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[279].base + ((pfId) * IRO[279].m1))
+ (IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
+ (IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[281].base + ((pfId) * IRO[281].m1))
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -190,39 +187,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[293].base + ((pfId) * IRO[293].m1))
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
+ (IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[290].base + ((pfId) * IRO[290].m1))
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
+ (IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -389,4 +386,8 @@
#define UNDEF_IRO 0x80000000
+/* used for defining the amount of FCoE tasks supported for PF */
+#define MAX_FCOE_FUNCS_PER_ENGINE 2
+#define MAX_NUM_FCOE_TASKS_PER_ENGINE 4096
+
#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 037860ecc343..12f00a40cdf0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -114,6 +114,10 @@ struct license_key {
#define EPIO_CFG_EPIO30 0x0000001f
#define EPIO_CFG_EPIO31 0x00000020
+struct mac_addr {
+ u32 upper;
+ u32 lower;
+};
struct shared_hw_cfg { /* NVRAM Offset */
/* Up to 16 bytes of NULL-terminated string */
@@ -508,7 +512,22 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
- u32 reserved0[6]; /* 0x178 */
+ /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+ * LOM recommended and tested value is 0xBEB2. Using a different
+ * value means using a value not tested by BRCM
+ */
+ u32 sfi_tap_values; /* 0x178 */
+ #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
+ #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
+
+ /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+ * value is 0x2. LOM recommended and tested value is 0x2. Using a
+ * different value means using a value not tested by BRCM
+ */
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+
+ u32 reserved0[5]; /* 0x17c */
u32 aeu_int_mask; /* 0x190 */
@@ -2821,8 +2840,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 2
-#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3513,11 +3532,14 @@ struct client_init_tx_data {
#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
-#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
-#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
u8 default_vlan_flg;
u8 force_default_pri_flg;
- __le32 reserved3;
+ u8 tunnel_lso_inc_ip_id;
+ u8 refuse_outband_vlan_flg;
+ u8 tunnel_non_lso_pcsum_location;
+ u8 reserved1;
};
/*
@@ -3551,6 +3573,11 @@ struct client_update_ramrod_data {
__le16 silent_vlan_mask;
u8 silent_vlan_removal_flg;
u8 silent_vlan_change_flg;
+ u8 refuse_outband_vlan_flg;
+ u8 refuse_outband_vlan_change_flg;
+ u8 tx_switching_flg;
+ u8 tx_switching_change_flg;
+ __le32 reserved1;
__le32 echo;
};
@@ -3620,7 +3647,8 @@ struct eth_classify_header {
*/
struct eth_classify_mac_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3633,7 +3661,8 @@ struct eth_classify_mac_cmd {
*/
struct eth_classify_pair_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3855,8 +3884,68 @@ struct eth_halt_ramrod_data {
/*
- * Command for setting multicast classification for a client
+ * destination and source mac address.
+ */
+struct eth_mac_addresses {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_lo;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 src_lo;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_hi;
+ __le16 src_mid;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 src_mid;
+ __le16 src_hi;
+#endif
+};
+
+/* tunneling related data */
+struct eth_tunnel_data {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 reserved0;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved1;
+ u8 ip_hdr_start_inner_w;
+ __le16 pseudo_csum;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 pseudo_csum;
+ u8 ip_hdr_start_inner_w;
+ u8 reserved1;
+#endif
+};
+
+/* union for mac addresses and for tunneling data.
+ * considered as tunneling data only if (tunnel_exist == 1).
*/
+union eth_mac_addr_or_tunnel_data {
+ struct eth_mac_addresses mac_addr;
+ struct eth_tunnel_data tunnel_data;
+};
+
+/*Command for setting multicast classification for a client */
struct eth_multicast_rules_cmd {
u8 cmd_general_data;
#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
@@ -3874,7 +3963,6 @@ struct eth_multicast_rules_cmd {
struct regpair reserved3;
};
-
/*
* parameters for multicast classification ramrod
*/
@@ -3883,7 +3971,6 @@ struct eth_multicast_rules_ramrod_data {
struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
};
-
/*
* Place holder for ramrods protocol specific data
*/
@@ -3947,11 +4034,14 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
u8 rss_result_mask;
u8 rss_mode;
- __le32 __reserved2;
+ __le16 udp_4tuple_dst_port_mask;
+ __le16 udp_4tuple_dst_port_value;
u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
__le32 rss_key[T_ETH_RSS_KEY];
__le32 echo;
@@ -4115,6 +4205,23 @@ enum eth_tpa_update_command {
MAX_ETH_TPA_UPDATE_COMMAND
};
+/* In case of LSO over IPv4 tunnel, whether to increment
+ * IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id {
+ EXT_HEADER,
+ INT_HEADER,
+ MAX_ETH_TUNNEL_LSO_INC_IP_ID
+};
+
+/* In case tunnel exist and L4 checksum offload,
+ * the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_pcsum_location {
+ PCSUM_ON_PKT,
+ PCSUM_ON_BD,
+ MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
+};
/*
* Tx regular BD structure
@@ -4166,8 +4273,8 @@ struct eth_tx_start_bd {
#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
-#define ETH_TX_START_BD_RESREVED (0x1<<7)
-#define ETH_TX_START_BD_RESREVED_SHIFT 7
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
};
/*
@@ -4216,15 +4323,10 @@ struct eth_tx_parse_bd_e1x {
* Tx parsing BD structure for ETH E2
*/
struct eth_tx_parse_bd_e2 {
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
+ union eth_mac_addr_or_tunnel_data data;
__le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
@@ -4236,8 +4338,51 @@ struct eth_tx_parse_bd_e2 {
};
/*
- * The last BD in the BD memory will hold a pointer to the next BD memory
+ * Tx 2nd parsing BD structure for ETH packet
*/
+struct eth_tx_parse_2nd_bd {
+ __le16 global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
+ __le16 reserved1;
+ u8 tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+ u8 reserved2;
+ u8 tunnel_udp_hdr_start_w;
+ u8 fw_ip_hdr_to_payload_w;
+ __le16 fw_ip_csum_wo_len_flags_frag;
+ __le16 hw_ip_id;
+ __le32 tcp_send_seq;
+};
+
+/* The last BD in the BD memory will hold a pointer to the next BD memory */
struct eth_tx_next_bd {
__le32 addr_lo;
__le32 addr_hi;
@@ -4252,6 +4397,7 @@ union eth_tx_bd_types {
struct eth_tx_bd reg_bd;
struct eth_tx_parse_bd_e1x parse_bd_e1x;
struct eth_tx_parse_bd_e2 parse_bd_e2;
+ struct eth_tx_parse_2nd_bd parse_2nd_bd;
struct eth_tx_next_bd next_bd;
};
@@ -4663,10 +4809,10 @@ enum common_spqe_cmd_id {
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
+ RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
MAX_COMMON_SPQE_CMD_ID
};
-
/*
* Per-protocol connection types
*/
@@ -4863,7 +5009,7 @@ struct vf_flr_event_data {
*/
struct malicious_vf_event_data {
u8 vf_id;
- u8 reserved0;
+ u8 err_id;
u16 reserved1;
u32 reserved2;
u32 reserved3;
@@ -4969,10 +5115,10 @@ enum event_ring_opcode {
EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES,
EVENT_RING_OPCODE_MULTICAST_RULES,
+ EVENT_RING_OPCODE_SET_TIMESYNC,
MAX_EVENT_RING_OPCODE
};
-
/*
* Modes for fairness algorithm
*/
@@ -5010,14 +5156,18 @@ struct flow_control_configuration {
*/
struct function_start_data {
u8 function_mode;
- u8 reserved;
+ u8 allow_npar_tx_switching;
__le16 sd_vlan_tag;
__le16 vif_id;
u8 path_id;
u8 network_cos_mode;
+ u8 dmae_cmd_id;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ __le16 reserved1[2];
};
-
struct function_update_data {
u8 vif_id_change_flg;
u8 afex_default_vlan_change_flg;
@@ -5027,14 +5177,19 @@ struct function_update_data {
__le16 afex_default_vlan;
u8 allowed_priorities;
u8 network_cos_mode;
+ u8 lb_mode_en_change_flg;
u8 lb_mode_en;
u8 tx_switch_suspend_change_flg;
u8 tx_switch_suspend;
u8 echo;
- __le16 reserved1;
+ u8 reserved1;
+ u8 update_gre_cfg_flg;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ u32 reserved3;
};
-
/*
* FW version stored in the Xstorm RAM
*/
@@ -5061,6 +5216,22 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
+/* GRE RSS Mode */
+enum gre_rss_mode {
+ GRE_OUTER_HEADERS_RSS,
+ GRE_INNER_HEADERS_RSS,
+ NVGRE_KEY_ENTROPY_RSS,
+ MAX_GRE_RSS_MODE
+};
+
+/* GRE Tunnel Mode */
+enum gre_tunnel_type {
+ NO_GRE_TUNNEL,
+ NVGRE_TUNNEL,
+ L2GRE_TUNNEL,
+ IPGRE_TUNNEL,
+ MAX_GRE_TUNNEL_TYPE
+};
/*
* Dynamic Host-Coalescing - Driver(host) counters
@@ -5224,6 +5395,26 @@ enum ip_ver {
MAX_IP_VER
};
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ VF_PF_CHANNEL_NOT_READY,
+ ETH_ILLEGAL_BD_LENGTHS,
+ ETH_PACKET_TOO_SHORT,
+ ETH_PAYLOAD_TOO_BIG,
+ ETH_ILLEGAL_ETH_TYPE,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_TOO_MANY_BDS,
+ ETH_ZERO_HDR_NBDS,
+ ETH_START_BD_NOT_SET,
+ ETH_ILLEGAL_PARSE_NBDS,
+ ETH_IPV6_AND_CHECKSUM,
+ ETH_VLAN_FLG_INCORRECT,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_TUNNEL_NOT_SUPPORTED,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
/*
* Multi-function modes
@@ -5368,7 +5559,6 @@ struct protocol_common_spe {
union protocol_common_specific_data data;
};
-
/*
* The send queue element
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 77ebae0ac64a..9d64b988ab34 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,6 +27,10 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
+typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8);
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -152,6 +156,7 @@
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+ #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
#define SFP_EEPROM_COMP_CODE_ADDR 0x3
@@ -3127,11 +3132,6 @@ static int bnx2x_bsc_read(struct link_params *params,
int rc = 0;
struct bnx2x *bp = params->bp;
- if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
- DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
- return -EINVAL;
- }
-
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
xfer_cnt);
@@ -3426,13 +3426,19 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
- if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ switch (params->req_fc_auto_adv) {
+ case BNX2X_FLOW_CTRL_BOTH:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- else
+ break;
+ case BNX2X_FLOW_CTRL_RX:
+ case BNX2X_FLOW_CTRL_TX:
*ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+ default:
+ break;
+ }
break;
-
case BNX2X_FLOW_CTRL_TX:
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
@@ -3629,6 +3635,16 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
+#define WC_TX_DRIVER(post2, idriver, ipre) \
+ ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
+ (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+
+#define WC_TX_FIR(post, main, pre) \
+ ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
+ (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
+ (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+
static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -3728,7 +3744,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
+ u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
an_adv |= (1<<5);
/* Enable CL37 1G Parallel Detect */
@@ -3753,20 +3769,13 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Set Transmit PMD settings */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
/* Configure the next lane if dual mode */
if (phy->flags & FLAGS_WC_DUAL_MODE)
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
- ((0x02 <<
- MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
0x03f0);
@@ -3909,6 +3918,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+
/* Hold rxSeqStart */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
@@ -3952,23 +3963,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
if (is_xfi) {
misc1_val |= 0x5;
- tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
-
+ tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
} else {
+ cfg_tap_val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].
+ sfi_tap_values));
+
+ tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
+
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+
misc1_val |= 0x9;
- tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+ /* TAP values are controlled by nvram, if value there isn't 0 */
+ if (tx_equal)
+ tap_val = (u16)tx_equal;
+ else
+ tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
+
+ if (tx_drv_brdct)
+ tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
+ 0x06);
+ else
+ tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
}
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4105,15 +4126,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
/* Set Transmit PMD settings */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX_FIR_TAP,
- ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
- MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ (WC_TX_FIR(0x12, 0x2d, 0x00) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x02, 0x02));
}
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -4750,8 +4767,8 @@ void bnx2x_link_status_update(struct link_params *params,
port_mb[port].link_status));
/* Force link UP in non LOOPBACK_EXT loopback mode(s) */
- if (bp->link_params.loopback_mode != LOOPBACK_NONE &&
- bp->link_params.loopback_mode != LOOPBACK_EXT)
+ if (params->loopback_mode != LOOPBACK_NONE &&
+ params->loopback_mode != LOOPBACK_EXT)
vars->link_status |= LINK_STATUS_LINK_UP;
if (bnx2x_eee_has_cap(params))
@@ -7758,7 +7775,8 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params,
static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -7771,7 +7789,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- (byte_cnt | 0xa000));
+ (byte_cnt | (dev_addr << 8)));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
@@ -7845,6 +7863,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
}
static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
+ u8 dev_addr,
u16 addr, u8 byte_cnt,
u8 *o_buf, u8 is_init)
{
@@ -7869,7 +7888,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, 1);
}
- rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+ rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -7885,7 +7904,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -7896,6 +7916,15 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ ((dev_addr << 8) | 1));
+
/* Need to read from 1.8000 to clear it */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -7968,26 +7997,44 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf)
{
- int rc = -EOPNOTSUPP;
+ int rc = 0;
+ struct bnx2x *bp = params->bp;
+ u8 xfer_size;
+ u8 *user_data = o_buf;
+ read_sfp_module_eeprom_func_p read_func;
+
+ if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
+ DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr);
+ return -EINVAL;
+ }
+
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8726_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
- rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8727_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf, 0);
- break;
+ read_func = bnx2x_warpcore_read_sfp_module_eeprom;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ while (!rc && (byte_cnt > 0)) {
+ xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : byte_cnt;
+ rc = read_func(phy, params, dev_addr, addr, xfer_size,
+ user_data, 0);
+ byte_cnt -= xfer_size;
+ user_data += xfer_size;
+ addr += xfer_size;
}
return rc;
}
@@ -8004,6 +8051,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_CON_TYPE_ADDR,
2,
(u8 *)val) != 0) {
@@ -8021,6 +8069,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
*/
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_FC_TX_TECH_ADDR,
1,
&copper_module_type) != 0) {
@@ -8049,20 +8098,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
+ case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
SFP_EEPROM_COMP_CODE_LR_MASK |
SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
- DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ DP(NETIF_MSG_LINK, "1G SFP module detected\n");
gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
- phy->req_line_speed = SPEED_1000;
- if (!CHIP_IS_E1x(bp))
- gport = BP_PATH(bp) + (params->port << 1);
- netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
- " Current SFP module in port %d is not"
- " compliant with 10G Ethernet\n",
- gport);
+ if (phy->req_line_speed != SPEED_1000) {
+ phy->req_line_speed = SPEED_1000;
+ if (!CHIP_IS_E1x(bp)) {
+ gport = BP_PATH(bp) +
+ (params->port << 1);
+ }
+ netdev_err(bp->dev,
+ "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
+ gport);
+ }
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8101,6 +8154,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
u8 options[SFP_EEPROM_OPTIONS_SIZE];
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_OPTIONS_ADDR,
SFP_EEPROM_OPTIONS_SIZE,
options) != 0) {
@@ -8167,6 +8221,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
/* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_VENDOR_NAME_ADDR,
SFP_EEPROM_VENDOR_NAME_SIZE,
(u8 *)vendor_name))
@@ -8175,6 +8230,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_PART_NO_ADDR,
SFP_EEPROM_PART_NO_SIZE,
(u8 *)vendor_pn))
@@ -8205,12 +8261,13 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
for (timeout = 0; timeout < 60; timeout++) {
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
- params, 1,
- 1, &val, 1);
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(
+ phy, params, I2C_DEV_ADDR_A0, 1, 1, &val,
+ 1);
else
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
- &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params,
+ I2C_DEV_ADDR_A0,
+ 1, 1, &val);
if (rc == 0) {
DP(NETIF_MSG_LINK,
"SFP+ module initialization took %d ms\n",
@@ -8219,7 +8276,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
}
usleep_range(5000, 10000);
}
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0,
+ 1, 1, &val);
return rc;
}
@@ -8376,15 +8434,6 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
val);
-
- /* Set 2-wire transfer rate of SFP+ module EEPROM
- * to 100Khz since some DACs(direct attached cables) do
- * not work at 400Khz.
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa001);
break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -9528,8 +9577,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- for (i = 0; i < ARRAY_SIZE(reg_set);
- i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad,
reg_set[i].reg, reg_set[i].val);
@@ -10281,7 +10329,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
bnx2x_eee_an_resolve(phy, params, vars);
}
@@ -12242,7 +12291,7 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_bmac_enable(params, vars, 1, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12261,7 +12310,7 @@ static void bnx2x_init_emac_loopback(struct link_params *params,
vars->phy_flags = PHY_XGXS_FLAG;
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12521,6 +12570,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params->req_line_speed[0], params->req_flow_ctrl[0]);
DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
params->req_line_speed[1], params->req_flow_ctrl[1]);
+ DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv);
vars->link_status = 0;
vars->phy_link_up = 0;
vars->link_up = 0;
@@ -13437,23 +13487,27 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 base_page, next_page, not_kr2_device, lane;
- int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
-
- if (!sigdet) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
- bnx2x_kr2_recovery(params, vars, phy);
- return;
- }
+ int sigdet;
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
- * since some switches tend to reinit the AN process and clear the
- * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * Since some switches tend to reinit the AN process and clear the
+ * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
vars->check_kr2_recovery_cnt--;
return;
}
+
+ sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+ if (!sigdet) {
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No sigdet\n");
+ }
+ return;
+ }
+
lane = bnx2x_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
@@ -13465,8 +13519,10 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* CL73 has not begun yet */
if (base_page == 0) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No BP\n");
+ }
return;
}
@@ -13482,7 +13538,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
- next_page);
+ next_page);
bnx2x_kr2_recovery(params, vars, phy);
}
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 56c2aae4e2c8..4df45234fdc0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,9 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+
#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
@@ -54,6 +57,15 @@
#define SFP_EEPROM_SERIAL_SIZE 16
#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
#define SFP_EEPROM_DATE_SIZE 6
+#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
+#define SFP_EEPROM_DIAG_TYPE_SIZE 1
+#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+
+#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
+#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
+
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -420,8 +432,8 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e81a747ea8ce..b4c9dea93a53 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -75,8 +75,6 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -2955,14 +2953,16 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
/* tx only connections collect statistics (on the same index as the
- * parent connection). The statistics are zeroed when the parent
- * connection is initialized.
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
*/
__set_bit(BNX2X_Q_FLG_STATS, &flags);
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
+ __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
#ifdef BNX2X_STOP_ON_ERROR
__set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@ -3227,16 +3227,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
{
struct eth_stats_info *ether_stat =
&bp->slowpath->drv_info_to_mcp.ether_stat;
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bp->sp_objs->mac_obj;
+ int i;
strlcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
- bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
-
+ /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+ * mac_local field in ether_stat struct. The base address is offset by 2
+ * bytes to account for the field being 8 bytes but a mac address is
+ * only 6 bytes. Likewise, the stride for the get_n_elements function is
+ * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+ * allocated by the ether_stat struct, so the macs will land in their
+ * proper positions.
+ */
+ for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+ memset(ether_stat->mac_local + i, 0,
+ sizeof(ether_stat->mac_local[0]));
+ mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local + MAC_PAD, MAC_PAD,
+ ETH_ALEN);
ether_stat->mtu_size = bp->dev->mtu;
-
if (bp->dev->features & NETIF_F_RXCSUM)
ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
if (bp->dev->features & NETIF_F_TSO)
@@ -3258,8 +3271,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3361,8 +3373,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+ ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -4947,7 +4959,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
q);
}
- if (!NO_FCOE(bp)) {
+ if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
fp = &bp->fp[FCOE_IDX(bp)];
queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -6018,10 +6030,11 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp)
mmiowb();
}
-void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
{
int i;
+ /* Setup NIC internals and enable interrupts */
for_each_eth_queue(bp, i)
bnx2x_init_eth_fp(bp, i);
@@ -6030,17 +6043,26 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
bnx2x_init_rx_rings(bp);
bnx2x_init_tx_rings(bp);
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
+ bnx2x_memset_stats(bp);
return;
+ }
- /* Initialize MOD_ABS interrupts */
- bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
- bp->common.shmem_base, bp->common.shmem2_base,
- BP_PORT(bp));
+ if (IS_PF(bp)) {
+ /* Initialize MOD_ABS interrupts */
+ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+ bp->common.shmem_base,
+ bp->common.shmem2_base, BP_PORT(bp));
+
+ /* initialize the default status block and sp ring */
+ bnx2x_init_def_sb(bp);
+ bnx2x_update_dsb_idx(bp);
+ bnx2x_init_sp_ring(bp);
+ }
+}
- bnx2x_init_def_sb(bp);
- bnx2x_update_dsb_idx(bp);
- bnx2x_init_sp_ring(bp);
+void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
+{
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
bnx2x_pf_init(bp);
@@ -6058,12 +6080,7 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
AEU_INPUTS_ATTN_BITS_SPIO5);
}
-/* end of nic init */
-
-/*
- * gzip service functions
- */
-
+/* gzip service functions */
static int bnx2x_gunzip_init(struct bnx2x *bp)
{
bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
@@ -7757,6 +7774,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+
bnx2x_iov_free_mem(bp);
}
@@ -7773,7 +7792,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
sizeof(struct
host_hc_status_block_e1x));
- if (CONFIGURE_NIC_MODE(bp))
+ if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
/* allocate searcher T2 table, as it wan't allocated before */
BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
@@ -7796,7 +7815,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
{
int i, allocated, context_size;
- if (!CONFIGURE_NIC_MODE(bp))
+ if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
/* allocate searcher T2 table */
BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
@@ -7917,8 +7936,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
- unsigned long ramrod_flags = 0;
-
if (is_zero_ether_addr(bp->dev->dev_addr) &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
@@ -7926,12 +7943,18 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
return 0;
}
- DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+ if (IS_PF(bp)) {
+ unsigned long ramrod_flags = 0;
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- /* Eth MAC is set on RSS leading client (fp[0]) */
- return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
- set, BNX2X_ETH_MAC, &ramrod_flags);
+ DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
+ &bp->sp_objs->mac_obj, set,
+ BNX2X_ETH_MAC, &ramrod_flags);
+ } else { /* vf */
+ return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
+ bp->fp->index, true);
+ }
}
int bnx2x_setup_leading(struct bnx2x *bp)
@@ -9525,6 +9548,10 @@ sp_rtnl_not_reset:
bnx2x_vfpf_storm_rx_mode(bp);
}
+ if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state))
+ bnx2x_pf_set_vfs_vlan(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -9532,8 +9559,10 @@ sp_rtnl_not_reset:
/* enable SR-IOV if applicable */
if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
- &bp->sp_rtnl_state))
+ &bp->sp_rtnl_state)) {
+ bnx2x_disable_sriov(bp);
bnx2x_enable_sriov(bp);
+ }
}
static void bnx2x_period_task(struct work_struct *work)
@@ -9701,6 +9730,31 @@ static struct bnx2x_prev_path_list *
return NULL;
}
+static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ int rc;
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ tmp_list->aer = 1;
+ rc = 0;
+ } else {
+ BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
+ BP_PATH(bp));
+ }
+
+ up(&bnx2x_prev_sem);
+
+ return rc;
+}
+
static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
@@ -9709,14 +9763,15 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
if (down_trylock(&bnx2x_prev_sem))
return false;
- list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
- if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
- bp->pdev->bus->number == tmp_list->bus &&
- BP_PATH(bp) == tmp_list->path) {
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (tmp_list->aer) {
+ DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
+ BP_PATH(bp));
+ } else {
rc = true;
BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
BP_PATH(bp));
- break;
}
}
@@ -9730,6 +9785,28 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
struct bnx2x_prev_path_list *tmp_list;
int rc;
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ /* Check whether the entry for this path already exists */
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (!tmp_list->aer) {
+ BNX2X_ERR("Re-Marking the path.\n");
+ } else {
+ DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
+ BP_PATH(bp));
+ tmp_list->aer = 0;
+ }
+ up(&bnx2x_prev_sem);
+ return 0;
+ }
+ up(&bnx2x_prev_sem);
+
+ /* Create an entry for this path and add it */
tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
if (!tmp_list) {
BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
@@ -9739,6 +9816,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
tmp_list->bus = bp->pdev->bus->number;
tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
tmp_list->path = BP_PATH(bp);
+ tmp_list->aer = 0;
tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
rc = down_interruptible(&bnx2x_prev_sem);
@@ -9746,8 +9824,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
BNX2X_ERR("Received %d when tried to take lock\n", rc);
kfree(tmp_list);
} else {
- BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
- BP_PATH(bp));
+ DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
+ BP_PATH(bp));
list_add(&tmp_list->list, &bnx2x_prev_list);
up(&bnx2x_prev_sem);
}
@@ -9878,6 +9956,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
}
+ if (!CHIP_IS_E1x(bp))
+ /* block FW from writing to host */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+
/* wait until BRB is empty */
tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
while (timer_count) {
@@ -9986,6 +10068,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
}
do {
+ int aer = 0;
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
if (!fw) {
@@ -9994,7 +10077,18 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
break;
}
- if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
+ rc);
+ } else {
+ /* If Path is marked by EEH, ignore unload status */
+ aer = !!(bnx2x_prev_path_get_entry(bp) &&
+ bnx2x_prev_path_get_entry(bp)->aer);
+ up(&bnx2x_prev_sem);
+ }
+
+ if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
rc = bnx2x_prev_unload_common(bp);
break;
}
@@ -10034,8 +10128,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
id = ((val & 0xffff) << 16);
val = REG_RD(bp, MISC_REG_CHIP_REV);
id |= ((val & 0xf) << 12);
- val = REG_RD(bp, MISC_REG_CHIP_METAL);
- id |= ((val & 0xff) << 4);
+
+ /* Metal is read from PCI regs, but we can't access >=0x400 from
+ * the configuration space (so we need to reg_rd)
+ */
+ val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+ id |= (((val >> 24) & 0xf) << 4);
val = REG_RD(bp, MISC_REG_BOND_ID);
id |= (val & 0xf);
bp->common.chip_id = id;
@@ -10571,10 +10669,12 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->link_params.speed_cap_mask[0] =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].speed_capability_mask);
+ dev_info.port_hw_config[port].speed_capability_mask) &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
bp->link_params.speed_cap_mask[1] =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].speed_capability_mask2);
+ dev_info.port_hw_config[port].speed_capability_mask2) &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
bp->port.link_config[0] =
SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
@@ -10699,6 +10799,12 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+ /* Calculate the number of maximum allowed FCoE tasks */
+ bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
+ if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp))
+ bp->cnic_eth_dev.max_fcoe_exchanges /=
+ MAX_FCOE_FUNCS_PER_ENGINE;
+
/* Read the WWN: */
if (!IS_MF(bp)) {
/* Port info */
@@ -10812,14 +10918,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
}
}
- if (IS_MF_STORAGE_SD(bp))
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
- if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
- /* use FIP MAC as primary MAC */
+ /* If this is a storage-only interface, use SAN mac as
+ * primary MAC. Notice that for SD this is already the case,
+ * as the SAN mac was copied from the primary MAC.
+ */
+ if (IS_MF_FCOE_AFEX(bp))
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11056,6 +11160,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+ bp->mf_config[vn] = 0;
+ break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
@@ -11402,26 +11509,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions
*/
-static int bnx2x_open_epilog(struct bnx2x *bp)
-{
- /* Enable sriov via delayed work. This must be done via delayed work
- * because it causes the probe of the vf devices to be run, which invoke
- * register_netdevice which must have rtnl lock taken. As we are holding
- * the lock right now, that could only work if the probe would not take
- * the lock. However, as the probe of the vf may be called from other
- * contexts as well (such as passthrough to vm failes) it can't assume
- * the lock is being held for it. Using delayed work here allows the
- * probe code to simply take the lock (i.e. wait for it to be released
- * if it is being held).
- */
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
- return 0;
-}
-
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
@@ -11791,6 +11878,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_setup_tc = bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
+ .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
+ .ndo_get_vf_config = bnx2x_get_vf_config,
#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
@@ -11953,19 +12042,26 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
- bnx2x_set_ethtool_ops(dev);
+ bnx2x_set_ethtool_ops(bp, dev);
dev->priv_flags |= IFF_UNICAST_FLT;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
- NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+ NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
+ if (!CHIP_IS_E1x(bp)) {
+ dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+ }
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
- dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
+ dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
if (bp->flags & USING_DAC_FLAG)
dev->features |= NETIF_F_HIGHDMA;
@@ -12447,7 +12543,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
* l2 connections.
*/
if (IS_VF(bp)) {
- bnx2x_vf_map_doorbells(bp);
+ bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
goto init_one_exit;
@@ -12475,13 +12571,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit;
}
- /* Enable SRIOV if capability found in configuration space.
- * Once the generic SR-IOV framework makes it in from the
- * pci tree this will be revised, to allow dynamic control
- * over the number of VFs. Right now, change the num of vfs
- * param below to enable SR-IOV.
- */
- rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ /* Enable SRIOV if capability found in configuration space */
+ rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
goto init_one_exit;
@@ -12493,16 +12584,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
- /* disable FCOE for 57840 device, until FW supports it */
- switch (ent->driver_data) {
- case BCM57840_O:
- case BCM57840_4_10:
- case BCM57840_2_20:
- case BCM57840_MFO:
- case BCM57840_MF:
- bp->flags |= NO_FCOE_FLAG;
- }
-
/* Set bp->num_queues for MSI-X mode*/
bnx2x_set_num_queues(bp);
@@ -12636,9 +12717,7 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
{
- int i;
-
- bp->state = BNX2X_STATE_ERROR;
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
bp->rx_mode = BNX2X_RX_MODE_NONE;
@@ -12647,29 +12726,21 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
-
- bnx2x_netif_stop(bp, 0);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
if (CNIC_LOADED(bp))
bnx2x_del_all_napi_cnic(bp);
+ netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
+ cancel_delayed_work(&bp->sp_task);
+ cancel_delayed_work(&bp->period_task);
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* Release IRQs */
- bnx2x_free_irq(bp);
-
- /* Free SKBs, SGEs, TPA pool and driver internals */
- bnx2x_free_skbs(bp);
-
- for_each_rx_queue(bp, i)
- bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-
- bnx2x_free_mem(bp);
+ spin_lock_bh(&bp->stats_lock);
+ bp->stats_state = STATS_STATE_DISABLED;
+ spin_unlock_bh(&bp->stats_lock);
- bp->state = BNX2X_STATE_CLOSED;
+ bnx2x_save_statistics(bp);
netif_carrier_off(bp->dev);
@@ -12705,6 +12776,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
+ BNX2X_ERR("IO error detected\n");
+
netif_device_detach(dev);
if (state == pci_channel_io_perm_failure) {
@@ -12715,6 +12788,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
if (netif_running(dev))
bnx2x_eeh_nic_unload(bp);
+ bnx2x_prev_path_mark_eeh(bp);
+
pci_disable_device(pdev);
rtnl_unlock();
@@ -12733,9 +12808,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp = netdev_priv(dev);
+ int i;
rtnl_lock();
-
+ BNX2X_ERR("IO slot reset initializing...\n");
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
@@ -12745,10 +12821,47 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (netif_running(dev))
bnx2x_set_power_state(bp, PCI_D0);
+ if (netif_running(dev)) {
+ BNX2X_ERR("IO slot reset --> driver unload\n");
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 v;
+
+ v = SHMEM2_RD(bp,
+ drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+ bnx2x_drain_tx_queues(bp);
+ bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_free_irq(bp);
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp, true);
+
+ bp->sp_state = 0;
+ bp->port.pmf = 0;
+
+ bnx2x_prev_unload(bp);
+
+ /* We should have resetted the engine, so It's fair to
+ * assume the FW will no longer write to the bnx2x driver.
+ */
+ bnx2x_squeeze_objects(bp);
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+ }
+
rtnl_unlock();
return PCI_ERS_RESULT_RECOVERED;
@@ -12775,6 +12888,9 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
bnx2x_eeh_recover(bp);
+ bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
if (netif_running(dev))
bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -12797,6 +12913,9 @@ static struct pci_driver bnx2x_pci_driver = {
.suspend = bnx2x_suspend,
.resume = bnx2x_resume,
.err_handler = &bnx2x_err_handler,
+#ifdef CONFIG_BNX2X_SRIOV
+ .sriov_configure = bnx2x_sriov_configure,
+#endif
};
static int __init bnx2x_init(void)
@@ -13354,6 +13473,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
+ bp->cnic_enabled = false;
kfree(bp->cnic_kwq);
bp->cnic_kwq = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 791eb2d53011..d22bc40091ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1491,10 +1491,6 @@
/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
Port. */
#define MISC_REG_BOND_ID 0xa400
-/* [R 8] These bits indicate the metal revision of the chip. This value
- starts at 0x00 for each all-layer tape-out and increments by one for each
- tape-out. */
-#define MISC_REG_CHIP_METAL 0xa404
/* [R 16] These bits indicate the part number for the chip. */
#define MISC_REG_CHIP_NUM 0xa408
/* [R 4] These bits indicate the base revision of the chip. This value
@@ -6331,6 +6327,8 @@
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define PCI_ID_VAL3 0x43c
+
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7306416bc90d..32a9609cc98b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,8 +30,6 @@
#define BNX2X_MAX_EMUL_MULTI 16
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/**** Exe Queue interfaces ****/
/**
@@ -444,30 +442,21 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
}
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf)
+ int n, u8 *base, u8 stride, u8 size)
{
struct bnx2x_vlan_mac_registry_elem *pos;
- u8 *next = buf;
+ u8 *next = base;
int counter = 0;
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
if (counter < n) {
- /* place leading zeroes in buffer */
- memset(next, 0, MAC_LEADING_ZERO_CNT);
-
- /* place mac after leading zeroes*/
- memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
- ETH_ALEN);
-
- /* calculate address of next element and
- * advance counter
- */
+ memcpy(next, &pos->u, size);
counter++;
- next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
+ counter, next);
+ next += stride + size;
- DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
- counter, next, pos->u.mac.mac);
}
}
return counter * ETH_ALEN;
@@ -487,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -520,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -538,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
return NULL;
@@ -573,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem *
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return pos;
return NULL;
@@ -770,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
@@ -786,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.
+ u.mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -974,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
-
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
rule_entry++;
@@ -991,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.
+ vlan_mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -1854,6 +1858,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return rc;
}
list_del(&exeq_pos->link);
+ bnx2x_exe_queue_free_elem(bp, exeq_pos);
}
}
@@ -2012,6 +2017,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
vlan_obj->check_move = bnx2x_check_move;
vlan_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ vlan_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
@@ -4426,6 +4432,12 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+ tx_data->tunnel_lso_inc_ip_id =
+ test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
+ tx_data->tunnel_non_lso_pcsum_location =
+ test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
+ PCSUM_ON_BD;
+
tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index;
tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5669,17 +5681,18 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = (u8)start_params->mf_mode;
- rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
- rdata->path_id = BP_PATH(bp);
- rdata->network_cos_mode = start_params->network_cos_mode;
-
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
+ rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
*/
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index ff907609b9fc..43c00bc84a08 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -100,6 +100,7 @@ struct bnx2x_raw_obj {
/************************* VLAN-MAC commands related parameters ***************/
struct bnx2x_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
};
struct bnx2x_vlan_ramrod_data {
@@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data {
struct bnx2x_vlan_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
u16 vlan;
};
@@ -313,8 +315,9 @@ struct bnx2x_vlan_mac_obj {
*
* @return number of copied bytes
*/
- int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf);
+ int (*get_n_elements)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
+ u8 stride, u8 size);
/**
* Checks if ADD-ramrod with the given params may be performed.
@@ -824,7 +827,9 @@ enum {
BNX2X_Q_FLG_TX_SEC,
BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM,
- BNX2X_Q_FLG_FORCE_DEFAULT_PRI
+ BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+ BNX2X_Q_FLG_PCSUM_ON_PKT,
+ BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
};
/* Queue type options: queue type may be a compination of below. */
@@ -842,6 +847,7 @@ enum bnx2x_q_type {
#define BNX2X_MULTI_TX_COS_E3B0 3
#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
+#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
struct bnx2x_queue_init_params {
struct {
@@ -1118,6 +1124,15 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
+
+ /* NVGRE classification enablement */
+ u8 nvgre_clss_en;
+
+ /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+ u8 gre_tunnel_mode;
+
+ /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
+ u8 gre_tunnel_rss;
};
struct bnx2x_func_switch_update_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6adfa2093581..2ce7c7471367 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -20,7 +20,9 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
#include <linux/crc32.h>
+#include <linux/if_vlan.h>
/* General service functions */
static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
@@ -555,8 +557,7 @@ static int bnx2x_vfop_config_list(struct bnx2x *bp,
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
if (rc >= 0) {
cnt += pos->add ? 1 : -1;
- list_del(&pos->link);
- list_add(&pos->link, &rollback_list);
+ list_move(&pos->link, &rollback_list);
rc = 0;
} else if (rc == -EEXIST) {
rc = 0;
@@ -958,6 +959,12 @@ op_err:
BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
op_done:
case BNX2X_VFOP_QSETUP_DONE:
+ vf->cfg_flags |= VF_CFG_VLAN;
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -1459,7 +1466,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
return bnx2x_is_pcie_pending(dev);
unknown_dev:
- BNX2X_ERR("Unknown device\n");
return false;
}
@@ -1926,20 +1932,22 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* SRIOV can be enabled only with MSIX */
if (int_mode_param == BNX2X_INT_MODE_MSI ||
- int_mode_param == BNX2X_INT_MODE_INTX)
+ int_mode_param == BNX2X_INT_MODE_INTX) {
BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+ return 0;
+ }
err = -EIO;
/* verify ari is enabled */
if (!bnx2x_ari_enabled(bp->pdev)) {
- BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
- return err;
+ BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
+ return 0;
}
/* verify igu is in normal mode */
if (CHIP_INT_MODE_IS_BC(bp)) {
BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
- return err;
+ return 0;
}
/* allocate the vfs database */
@@ -1964,8 +1972,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
if (iov->total == 0)
goto failed;
- /* calculate the actual number of VFs */
- iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
+ iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
+
+ DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
+ num_vfs_param, iov->nr_virtfn);
/* allocate the vf array */
bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
@@ -2378,8 +2388,8 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
goto get_vf;
case EVENT_RING_OPCODE_MALICIOUS_VF:
abs_vfid = elem->message.data.malicious_vf_event.vf_id;
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
- abs_vfid);
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid, elem->message.data.malicious_vf_event.err_id);
goto get_vf;
default:
return 1;
@@ -2436,8 +2446,8 @@ get_vf:
/* Do nothing for now */
break;
case EVENT_RING_OPCODE_MALICIOUS_VF:
- DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
- vf->abs_vfid);
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
+ abs_vfid, elem->message.data.malicious_vf_event.err_id);
/* Do nothing for now */
break;
}
@@ -3012,21 +3022,138 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->op_current = CHANNEL_TLV_NONE;
}
-void bnx2x_enable_sriov(struct bnx2x *bp)
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
- int rc = 0;
- /* disbale sriov in case it is still enabled */
+ struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+
+ DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+
+ /* HW channel is only operational when PF is up */
+ if (bp->state != BNX2X_STATE_OPEN) {
+ BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down");
+ return -EINVAL;
+ }
+
+ /* we are always bound by the total_vfs in the configuration space */
+ if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+ num_vfs_param = BNX2X_NR_VIRTFN(bp);
+ }
+
+ bp->requested_nr_virtfn = num_vfs_param;
+ if (num_vfs_param == 0) {
+ pci_disable_sriov(dev);
+ return 0;
+ } else {
+ return bnx2x_enable_sriov(bp);
+ }
+}
+
+int bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0, req_vfs = bp->requested_nr_virtfn;
+
+ rc = pci_enable_sriov(bp->pdev, req_vfs);
+ if (rc) {
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ return rc;
+ }
+ DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
+ return req_vfs;
+}
+
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
+{
+ int vfidx;
+ struct pf_vf_bulletin_content *bulletin;
+
+ DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
+ for_each_vf(bp, vfidx) {
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ }
+}
+
+void bnx2x_disable_sriov(struct bnx2x *bp)
+{
pci_disable_sriov(bp->pdev);
- DP(BNX2X_MSG_IOV, "sriov disabled\n");
+}
+
+static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf *vf)
+{
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+ vfidx, BNX2X_NR_VIRTFN(bp));
+ return -EINVAL;
+ }
+
+ if (!vf) {
+ BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
- /* enable sriov */
- DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
- rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
+ return 0;
+}
+
+int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ int rc;
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
if (rc)
- BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
- else
- DP(BNX2X_MSG_IOV, "sriov enabled\n");
+ return rc;
+ if (!mac_obj || !vlan_obj || !bulletin) {
+ BNX2X_ERR("VF partially initialized\n");
+ return -EINVAL;
+ }
+
+ ivi->vf = vfidx;
+ ivi->qos = 0;
+ ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->spoofchk = 1; /*always enabled */
+ if (vf->state == VF_ENABLED) {
+ /* mac and vlan are in vlan_mac objects */
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
+ 0, VLAN_HLEN);
+ } else {
+ /* mac */
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
+ /* mac configured by ndo so its in bulletin board */
+ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
+ else
+ /* funtion has not been loaded yet. Show mac as 0s */
+ memset(&ivi->mac, 0, ETH_ALEN);
+
+ /* vlan */
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+ /* vlan configured by ndo so its in bulletin board */
+ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ else
+ /* funtion has not been loaded yet. Show vlans as 0s */
+ memset(&ivi->vlan, 0, VLAN_HLEN);
+ }
+
+ return 0;
}
/* New mac for VF. Consider these cases:
@@ -3044,23 +3171,19 @@ void bnx2x_enable_sriov(struct bnx2x *bp)
* VF to configure any mac for itself except for this mac. In case of a race
* where the VF fails to see the new post on its bulletin board before sending a
* mac configuration request, the PF will simply fail the request and VF can try
- * again after consulting its bulletin board
+ * again after consulting its bulletin board.
*/
-int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state, vfidx = queue;
+ int rc, q_logical_state;
struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
- /* if SRIOV is disabled there is nothing to do (and somewhere, someone
- * has erred).
- */
- if (!IS_SRIOV(bp)) {
- BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
- return -EINVAL;
- }
-
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
if (!is_valid_ether_addr(mac)) {
BNX2X_ERR("mac address invalid\n");
return -EINVAL;
@@ -3085,7 +3208,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
- unsigned long flags = 0;
+ unsigned long ramrod_flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
/* must lock vfpf channel to protect against vf flows */
@@ -3106,14 +3229,133 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
}
/* configure the new mac to device */
- __set_bit(RAMROD_COMP_WAIT, &flags);
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
- BNX2X_ETH_MAC, &flags);
+ BNX2X_ETH_MAC, &ramrod_flags);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
- return rc;
+ return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state;
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
+
+ if (vlan > 4095) {
+ BNX2X_ERR("illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
+ vfidx, vlan, 0);
+
+ /* update PF's copy of the VF's bulletin. No point in posting the vlan
+ * to the VF since it doesn't have anything to do with it. But it useful
+ * to store it here in case the VF is not up yet and we can only
+ * configure the vlan later when it does.
+ */
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ bulletin->vlan = vlan;
+
+ /* is vf initialized and queue set up? */
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the vlan in device on this vf's queue */
+ unsigned long ramrod_flags = 0;
+ unsigned long vlan_mac_flags = 0;
+ struct bnx2x_vlan_mac_obj *vlan_obj =
+ &bnx2x_vfq(vf, 0, vlan_obj);
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ return -EINVAL;
+ }
+
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure the new vlan to device */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ return -EINVAL;
+ }
+
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ }
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ return rc;
+ }
+
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured th Vlan before vf was
+ * and we were called because the VF came up later
+ */
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ }
+ return 0;
}
/* crc is the first field in the bulletin board. compute the crc over the
@@ -3165,20 +3407,26 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
+ /* the vlan in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << VLAN_VALID)
+ memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
+
/* copy new bulletin board to bp */
bp->old_bulletin = bulletin;
return PFVF_BULLETIN_UPDATED;
}
-void bnx2x_vf_map_doorbells(struct bnx2x *bp)
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
{
/* vf doorbells are embedded within the regview */
- bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
+ return bp->regview + PXP_VF_ADDR_DB_START;
}
int bnx2x_vf_pci_alloc(struct bnx2x *bp)
{
+ mutex_init(&bp->vf2pf_mutex);
+
/* allocate vf2pf mailbox for vf to pf channel */
BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
@@ -3196,3 +3444,26 @@ alloc_mem_err:
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
+
+int bnx2x_open_epilog(struct bnx2x *bp)
+{
+ /* Enable sriov via delayed work. This must be done via delayed work
+ * because it causes the probe of the vf devices to be run, which invoke
+ * register_netdevice which must have rtnl lock taken. As we are holding
+ * the lock right now, that could only work if the probe would not take
+ * the lock. However, as the probe of the vf may be called from other
+ * contexts as well (such as passthrough to vm failes) it can't assume
+ * the lock is being held for it. Using delayed work here allows the
+ * probe code to simply take the lock (i.e. wait for it to be released
+ * if it is being held). We only want to do this if the number of VFs
+ * was set before PF driver was loaded.
+ */
+ if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index b4050173add9..d67ddc554c0f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -193,6 +193,7 @@ struct bnx2x_virtf {
#define VF_CFG_TPA 0x0004
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
+#define VF_CFG_VLAN 0x0020
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -712,6 +713,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
u16 length);
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
u16 type, u16 length);
+void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
bool bnx2x_tlv_supported(u16 tlvtype);
@@ -731,7 +733,7 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
-int bnx2x_vfpf_set_mac(struct bnx2x *bp);
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
@@ -750,13 +752,17 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
}
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
-void bnx2x_vf_map_doorbells(struct bnx2x *bp);
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
-void bnx2x_enable_sriov(struct bnx2x *bp);
+int bnx2x_enable_sriov(struct bnx2x *bp);
+void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
}
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
+int bnx2x_open_epilog(struct bnx2x *bp);
#else /* CONFIG_BNX2X_SRIOV */
@@ -779,7 +785,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) {return 0; }
static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
-static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
u8 tx_count, u8 rx_count) {return 0; }
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
@@ -787,7 +794,8 @@ static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
-static inline int bnx2x_vfpf_set_mac(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
+ u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
@@ -802,8 +810,15 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
return PFVF_BULLETIN_UNCHANGED;
}
-static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
+static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+ return NULL;
+}
+
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
+static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 4397f8b76f2e..2ca3d94fcec2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1547,11 +1547,51 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
}
}
+void bnx2x_memset_stats(struct bnx2x *bp)
+{
+ int i;
+
+ /* function stats */
+ for_each_queue(bp, i) {
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
+ if (bp->stats_init) {
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
+ }
+ }
+
+ memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+
+ if (bp->stats_init) {
+ memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
+ memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
+ memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ memset(&bp->func_stats, 0, sizeof(bp->func_stats));
+ }
+
+ bp->stats_state = STATS_STATE_DISABLED;
+
+ if (bp->port.pmf && bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
+
+ /* mark the end of statistics initializiation */
+ bp->stats_init = false;
+}
+
void bnx2x_stats_init(struct bnx2x *bp)
{
int /*abs*/port = BP_PORT(bp);
int mb_idx = BP_FW_MB_IDX(bp);
- int i;
bp->stats_pending = 0;
bp->executer_idx = 0;
@@ -1587,36 +1627,11 @@ void bnx2x_stats_init(struct bnx2x *bp)
&(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
}
- /* function stats */
- for_each_queue(bp, i) {
- struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
-
- memset(&fp_stats->old_tclient, 0,
- sizeof(fp_stats->old_tclient));
- memset(&fp_stats->old_uclient, 0,
- sizeof(fp_stats->old_uclient));
- memset(&fp_stats->old_xclient, 0,
- sizeof(fp_stats->old_xclient));
- if (bp->stats_init) {
- memset(&fp_stats->eth_q_stats, 0,
- sizeof(fp_stats->eth_q_stats));
- memset(&fp_stats->eth_q_stats_old, 0,
- sizeof(fp_stats->eth_q_stats_old));
- }
- }
-
/* Prepare statistics ramrod data */
bnx2x_prep_fw_stats_req(bp);
- memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+ /* Clean SP from previous statistics */
if (bp->stats_init) {
- memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
- memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
- memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
- memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
- memset(&bp->func_stats, 0, sizeof(bp->func_stats));
-
- /* Clean SP from previous statistics */
if (bp->func_stx) {
memset(bnx2x_sp(bp, func_stats), 0,
sizeof(struct host_func_stats));
@@ -1626,13 +1641,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
}
}
- bp->stats_state = STATS_STATE_DISABLED;
-
- if (bp->port.pmf && bp->port.port_stx)
- bnx2x_port_stats_base_init(bp);
-
- /* mark the end of statistics initializiation */
- bp->stats_init = false;
+ bnx2x_memset_stats(bp);
}
void bnx2x_save_statistics(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 198f6f1c9ad5..d117f472816c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -540,8 +540,8 @@ struct bnx2x_fw_port_stats_old {
/* forward */
struct bnx2x;
+void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
-
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 531eebf40d60..928b074d7d80 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -36,6 +36,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
u16 type, u16 length)
{
+ mutex_lock(&bp->vf2pf_mutex);
+
DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
type);
@@ -49,6 +51,15 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
}
+/* releases the mailbox */
+void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
+{
+ DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
+ first_tlv->tl.type);
+
+ mutex_unlock(&bp->vf2pf_mutex);
+}
+
/* list the types and lengths of the tlvs on the buffer */
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
@@ -181,8 +192,10 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
req->vfdev_info.vf_id = vf_id;
req->vfdev_info.vf_os = 0;
@@ -213,7 +226,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* PF timeout */
if (rc)
- return rc;
+ goto out;
/* copy acquire response from buffer to bp */
memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
@@ -253,7 +266,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* PF reports error */
BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
bp->acquire_resp.hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
}
@@ -279,20 +293,24 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->acquire_resp.resc.current_mac_addr,
ETH_ALEN);
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
}
int bnx2x_vfpf_release(struct bnx2x *bp)
{
struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- u32 rc = 0, vf_id;
+ u32 rc, vf_id;
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
req->vf_id = vf_id;
@@ -308,7 +326,8 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
if (rc)
/* PF timeout */
- return rc;
+ goto out;
+
if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
/* PF released us */
DP(BNX2X_MSG_SP, "vf released\n");
@@ -316,10 +335,13 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
/* PF reports error */
BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* Tell PF about SB addresses */
@@ -350,16 +372,20 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc)
- return rc;
+ goto out;
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
}
/* CLOSE VF - opposite to INIT_VF */
@@ -380,6 +406,9 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
for_each_queue(bp, i)
bnx2x_vfpf_teardown_queue(bp, i);
+ /* remove mac */
+ bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
+
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
@@ -401,6 +430,8 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
resp->hdr.status);
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
free_irq:
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 0);
@@ -435,7 +466,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
/* calculate queue flags */
flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
- flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
flags |= VFPF_QUEUE_FLG_VLAN;
DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -486,8 +516,11 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
fp_idx, resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
return rc;
}
@@ -515,41 +548,46 @@ int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
if (rc) {
BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
rc);
- return rc;
+ goto out;
}
/* PF failed the transaction */
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
}
/* request pf to add a mac for the vf */
-int bnx2x_vfpf_set_mac(struct bnx2x *bp)
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
{
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc;
+ struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+ int rc = 0;
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
sizeof(*req));
req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
- req->vf_qid = 0;
+ req->vf_qid = vf_qid;
req->n_mac_vlan_filters = 1;
- req->filters[0].flags =
- VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC;
+
+ req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
+ if (set)
+ req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
/* sample bulletin board for new mac */
bnx2x_sample_bulletin(bp);
/* copy mac from device to request */
- memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN);
+ memcpy(req->filters[0].mac, addr, ETH_ALEN);
/* add list termination tlv */
bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
@@ -562,7 +600,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
- return rc;
+ goto out;
}
/* failure may mean PF was configured with a new mac for us */
@@ -570,6 +608,9 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
DP(BNX2X_MSG_IOV,
"vfpf SET MAC failed. Check bulletin board for new posts\n");
+ /* copy mac from bulletin to device */
+ memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+
/* check if bulletin board was updated */
if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
/* copy mac from device to request */
@@ -587,8 +628,10 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return 0;
}
@@ -643,14 +686,16 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("Sending a message failed: %d\n", rc);
- return rc;
+ goto out;
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return 0;
}
@@ -689,7 +734,8 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
break;
default:
BNX2X_ERR("BAD rx mode (%d)\n", mode);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -708,8 +754,10 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
}
@@ -1004,7 +1052,7 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
/* convert MBX queue-flags to standard SP queue-flags */
-static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
+static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
unsigned long *sp_q_flags)
{
if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
@@ -1015,8 +1063,6 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
__set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
- if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
- __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
__set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
@@ -1025,6 +1071,10 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+
+ /* outer vlan removal is set according to the PF's multi fuction mode */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
}
static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1075,11 +1125,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
init_p->tx.hc_rate = setup_q->txq.hc_rate;
init_p->tx.sb_cq_index = setup_q->txq.sb_index;
- bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
&init_p->tx.flags);
/* tx setup - flags */
- bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
&setup_p->flags);
/* tx setup - general, nothing */
@@ -1107,11 +1157,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* rx init */
init_p->rx.hc_rate = setup_q->rxq.hc_rate;
init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
- bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
&init_p->rx.flags);
/* rx setup - flags */
- bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
&setup_p->flags);
/* rx setup - general */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index bfc80baec00d..41708faab575 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -328,9 +328,15 @@ struct pf_vf_bulletin_content {
#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
* is available for it
*/
+#define VLAN_VALID 1 /* when set, the vf should not access
+ * the vfpf channel
+ */
u8 mac[ETH_ALEN];
- u8 padding[2];
+ u8 mac_padding[2];
+
+ u16 vlan;
+ u8 vlan_padding[6];
};
union pf_vf_bulletin {
@@ -353,6 +359,7 @@ enum channel_tlvs {
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 149a3a038491..40649a8bf390 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5544,8 +5544,10 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
- if (CNIC_SUPPORTS_FCOE(cp))
+ if (CNIC_SUPPORTS_FCOE(cp)) {
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+ cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
+ }
if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 0c9367a0f57d..ec9bb9ad4bb3 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -195,6 +195,7 @@ struct cnic_eth_dev {
u32 max_fcoe_conn;
u32 max_rdma_conn;
u32 fcoe_init_cid;
+ u32 max_fcoe_exchanges;
u32 fcoe_wwn_port_name_hi;
u32 fcoe_wwn_port_name_lo;
u32 fcoe_wwn_node_name_hi;
@@ -313,6 +314,8 @@ struct cnic_dev {
int max_fcoe_conn;
int max_rdma_conn;
+ int max_fcoe_exchanges;
+
union drv_info_to_mcp *stats_addr;
struct fcoe_capabilities *fcoe_cap;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index e9b35da375cb..e80bfb60c3ef 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -831,11 +831,8 @@ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
SMP_CACHE_BYTES * 2 +
NET_IP_ALIGN);
- if (sb_new == NULL) {
- pr_info("%s: sk_buff allocation failed\n",
- d->sbdma_eth->sbm_dev->name);
+ if (sb_new == NULL)
return -ENOBUFS;
- }
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 67d2663b3974..728d42ab2a76 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 130
+#define TG3_MIN_NUM 131
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "February 14, 2013"
+#define DRV_MODULE_RELDATE "April 09, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -212,6 +212,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
#define FIRMWARE_TG3 "tigon/tg3.bin"
+#define FIRMWARE_TG357766 "tigon/tg357766.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -1873,6 +1874,20 @@ static void tg3_link_report(struct tg3 *tp)
tp->link_up = netif_carrier_ok(tp->dev);
}
+static u32 tg3_decode_flowctrl_1000T(u32 adv)
+{
+ u32 flowctrl = 0;
+
+ if (adv & ADVERTISE_PAUSE_CAP) {
+ flowctrl |= FLOW_CTRL_RX;
+ if (!(adv & ADVERTISE_PAUSE_ASYM))
+ flowctrl |= FLOW_CTRL_TX;
+ } else if (adv & ADVERTISE_PAUSE_ASYM)
+ flowctrl |= FLOW_CTRL_TX;
+
+ return flowctrl;
+}
+
static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
{
u16 miireg;
@@ -1889,6 +1904,20 @@ static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
return miireg;
}
+static u32 tg3_decode_flowctrl_1000X(u32 adv)
+{
+ u32 flowctrl = 0;
+
+ if (adv & ADVERTISE_1000XPAUSE) {
+ flowctrl |= FLOW_CTRL_RX;
+ if (!(adv & ADVERTISE_1000XPSE_ASYM))
+ flowctrl |= FLOW_CTRL_TX;
+ } else if (adv & ADVERTISE_1000XPSE_ASYM)
+ flowctrl |= FLOW_CTRL_TX;
+
+ return flowctrl;
+}
+
static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
@@ -2199,7 +2228,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
}
-static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
+static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
{
u32 phy;
@@ -2291,7 +2320,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
tg3_phy_toggle_auxctl_smdsp(tp, false);
}
-static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
{
u32 val;
@@ -2301,7 +2330,7 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
tp->setlpicnt = 0;
if (tp->link_config.autoneg == AUTONEG_ENABLE &&
- current_link_up == 1 &&
+ current_link_up &&
tp->link_config.active_duplex == DUPLEX_FULL &&
(tp->link_config.active_speed == SPEED_100 ||
tp->link_config.active_speed == SPEED_1000)) {
@@ -2323,7 +2352,7 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
}
if (!tp->setlpicnt) {
- if (current_link_up == 1 &&
+ if (current_link_up &&
!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
tg3_phy_toggle_auxctl_smdsp(tp, false);
@@ -2530,6 +2559,13 @@ static void tg3_carrier_off(struct tg3 *tp)
tp->link_up = false;
}
+static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
+{
+ if (tg3_flag(tp, ENABLE_ASF))
+ netdev_warn(tp->dev,
+ "Management side-band traffic will be interrupted during phy settings change\n");
+}
+
/* This will reset the tigon3 PHY if there is no valid
* link unless the FORCE argument is non-zero.
*/
@@ -2669,7 +2705,7 @@ out:
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
tg3_phydsp_write(tp, 0xffb, 0x4000);
- tg3_phy_toggle_automdix(tp, 1);
+ tg3_phy_toggle_automdix(tp, true);
tg3_phy_set_wirespeed(tp);
return 0;
}
@@ -2925,6 +2961,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{
u32 val;
+ if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
+ return;
+
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
if (tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
@@ -3448,11 +3487,58 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
#define TX_CPU_SCRATCH_SIZE 0x04000
/* tp->lock is held. */
-static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
{
int i;
+ const int iters = 10000;
+
+ for (i = 0; i < iters; i++) {
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
+static int tg3_rxcpu_pause(struct tg3 *tp)
+{
+ int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
+
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
+ udelay(10);
+
+ return rc;
+}
+
+/* tp->lock is held. */
+static int tg3_txcpu_pause(struct tg3 *tp)
+{
+ return tg3_pause_cpu(tp, TX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_MODE, 0x00000000);
+}
- BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+/* tp->lock is held. */
+static void tg3_rxcpu_resume(struct tg3 *tp)
+{
+ tg3_resume_cpu(tp, RX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ int rc;
+
+ BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3460,17 +3546,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
return 0;
}
- if (offset == RX_CPU_BASE) {
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
-
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
- udelay(10);
+ if (cpu_base == RX_CPU_BASE) {
+ rc = tg3_rxcpu_pause(tp);
} else {
/*
* There is only an Rx CPU for the 5750 derivative in the
@@ -3479,17 +3556,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
if (tg3_flag(tp, IS_SSB_CORE))
return 0;
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
+ rc = tg3_txcpu_pause(tp);
}
- if (i >= 10000) {
+ if (rc) {
netdev_err(tp->dev, "%s timed out, %s CPU\n",
- __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+ __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
return -ENODEV;
}
@@ -3499,19 +3571,41 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
return 0;
}
-struct fw_info {
- unsigned int fw_base;
- unsigned int fw_len;
- const __be32 *fw_data;
-};
+static int tg3_fw_data_len(struct tg3 *tp,
+ const struct tg3_firmware_hdr *fw_hdr)
+{
+ int fw_len;
+
+ /* Non fragmented firmware have one firmware header followed by a
+ * contiguous chunk of data to be written. The length field in that
+ * header is not the length of data to be written but the complete
+ * length of the bss. The data length is determined based on
+ * tp->fw->size minus headers.
+ *
+ * Fragmented firmware have a main header followed by multiple
+ * fragments. Each fragment is identical to non fragmented firmware
+ * with a firmware header followed by a contiguous chunk of data. In
+ * the main header, the length field is unused and set to 0xffffffff.
+ * In each fragment header the length is the entire size of that
+ * fragment i.e. fragment data + header length. Data length is
+ * therefore length field in the header minus TG3_FW_HDR_LEN.
+ */
+ if (tp->fw_len == 0xffffffff)
+ fw_len = be32_to_cpu(fw_hdr->len);
+ else
+ fw_len = tp->fw->size;
+
+ return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
+}
/* tp->lock is held. */
static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
u32 cpu_scratch_base, int cpu_scratch_size,
- struct fw_info *info)
+ const struct tg3_firmware_hdr *fw_hdr)
{
- int err, lock_err, i;
+ int err, i;
void (*write_op)(struct tg3 *, u32, u32);
+ int total_len = tp->fw->size;
if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
netdev_err(tp->dev,
@@ -3520,30 +3614,49 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
return -EINVAL;
}
- if (tg3_flag(tp, 5705_PLUS))
+ if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
write_op = tg3_write_mem;
else
write_op = tg3_write_indirect_reg32;
- /* It is possible that bootcode is still loading at this point.
- * Get the nvram lock first before halting the cpu.
- */
- lock_err = tg3_nvram_lock(tp);
- err = tg3_halt_cpu(tp, cpu_base);
- if (!lock_err)
- tg3_nvram_unlock(tp);
- if (err)
- goto out;
+ if (tg3_asic_rev(tp) != ASIC_REV_57766) {
+ /* It is possible that bootcode is still loading at this point.
+ * Get the nvram lock first before halting the cpu.
+ */
+ int lock_err = tg3_nvram_lock(tp);
+ err = tg3_halt_cpu(tp, cpu_base);
+ if (!lock_err)
+ tg3_nvram_unlock(tp);
+ if (err)
+ goto out;
- for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
- write_op(tp, cpu_scratch_base + i, 0);
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
- for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
- write_op(tp, (cpu_scratch_base +
- (info->fw_base & 0xffff) +
- (i * sizeof(u32))),
- be32_to_cpu(info->fw_data[i]));
+ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+ write_op(tp, cpu_scratch_base + i, 0);
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE,
+ tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
+ } else {
+ /* Subtract additional main header for fragmented firmware and
+ * advance to the first fragment
+ */
+ total_len -= TG3_FW_HDR_LEN;
+ fw_hdr++;
+ }
+
+ do {
+ u32 *fw_data = (u32 *)(fw_hdr + 1);
+ for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
+ write_op(tp, cpu_scratch_base +
+ (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
+ (i * sizeof(u32)),
+ be32_to_cpu(fw_data[i]));
+
+ total_len -= be32_to_cpu(fw_hdr->len);
+
+ /* Advance to next fragment */
+ fw_hdr = (struct tg3_firmware_hdr *)
+ ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
+ } while (total_len > 0);
err = 0;
@@ -3552,13 +3665,33 @@ out:
}
/* tp->lock is held. */
+static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
+{
+ int i;
+ const int iters = 5;
+
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_PC, pc);
+
+ for (i = 0; i < iters; i++) {
+ if (tr32(cpu_base + CPU_PC) == pc)
+ break;
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(cpu_base + CPU_PC, pc);
+ udelay(1000);
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
- int err, i;
+ const struct tg3_firmware_hdr *fw_hdr;
+ int err;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3566,60 +3699,117 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
-
err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup only the RX cpu. */
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
- break;
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
"should be %08x\n", __func__,
- tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+ tr32(RX_CPU_BASE + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
+
+ tg3_rxcpu_resume(tp);
return 0;
}
+static int tg3_validate_rxcpu_state(struct tg3 *tp)
+{
+ const int iters = 1000;
+ int i;
+ u32 val;
+
+ /* Wait for boot code to complete initialization and enter service
+ * loop. It is then safe to download service patches
+ */
+ for (i = 0; i < iters; i++) {
+ if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
+ break;
+
+ udelay(10);
+ }
+
+ if (i == iters) {
+ netdev_err(tp->dev, "Boot code not ready for service patches\n");
+ return -EBUSY;
+ }
+
+ val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
+ if (val & 0xff) {
+ netdev_warn(tp->dev,
+ "Other patches exist. Not downloading EEE patch\n");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_load_57766_firmware(struct tg3 *tp)
+{
+ struct tg3_firmware_hdr *fw_hdr;
+
+ if (!tg3_flag(tp, NO_NVRAM))
+ return;
+
+ if (tg3_validate_rxcpu_state(tp))
+ return;
+
+ if (!tp->fw)
+ return;
+
+ /* This firmware blob has a different format than older firmware
+ * releases as given below. The main difference is we have fragmented
+ * data to be written to non-contiguous locations.
+ *
+ * In the beginning we have a firmware header identical to other
+ * firmware which consists of version, base addr and length. The length
+ * here is unused and set to 0xffffffff.
+ *
+ * This is followed by a series of firmware fragments which are
+ * individually identical to previous firmware. i.e. they have the
+ * firmware header and followed by data for that fragment. The version
+ * field of the individual fragment header is unused.
+ */
+
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
+ if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
+ return;
+
+ if (tg3_rxcpu_pause(tp))
+ return;
+
+ /* tg3_load_firmware_cpu() will always succeed for the 57766 */
+ tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
+
+ tg3_rxcpu_resume(tp);
+}
+
/* tp->lock is held. */
static int tg3_load_tso_firmware(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
- int err, i;
+ int err;
- if (tg3_flag(tp, HW_TSO_1) ||
- tg3_flag(tp, HW_TSO_2) ||
- tg3_flag(tp, HW_TSO_3))
+ if (!tg3_flag(tp, FW_TSO))
return 0;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3627,10 +3817,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
cpu_scratch_size = tp->fw_len;
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
if (tg3_asic_rev(tp) == ASIC_REV_5705) {
cpu_base = RX_CPU_BASE;
@@ -3643,36 +3830,28 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
err = tg3_load_firmware_cpu(tp, cpu_base,
cpu_scratch_base, cpu_scratch_size,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup the cpu. */
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(cpu_base + CPU_PC) == info.fw_base)
- break;
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev,
"%s fails to set CPU PC, is %08x should be %08x\n",
- __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+ __func__, tr32(cpu_base + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_MODE, 0x00000000);
+
+ tg3_resume_cpu(tp, cpu_base);
return 0;
}
/* tp->lock is held. */
-static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
+static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
{
u32 addr_high, addr_low;
int i;
@@ -3735,7 +3914,7 @@ static int tg3_power_up(struct tg3 *tp)
return err;
}
-static int tg3_setup_phy(struct tg3 *, int);
+static int tg3_setup_phy(struct tg3 *, bool);
static int tg3_power_down_prepare(struct tg3 *tp)
{
@@ -3807,7 +3986,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
- tg3_setup_phy(tp, 0);
+ tg3_setup_phy(tp, false);
}
if (tg3_asic_rev(tp) == ASIC_REV_5906) {
@@ -3848,7 +4027,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
mac_mode = MAC_MODE_PORT_MODE_GMII;
- else
+ else if (tp->phy_flags &
+ TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
+ if (tp->link_config.active_speed == SPEED_1000)
+ mac_mode = MAC_MODE_PORT_MODE_GMII;
+ else
+ mac_mode = MAC_MODE_PORT_MODE_MII;
+ } else
mac_mode = MAC_MODE_PORT_MODE_MII;
mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
@@ -4102,12 +4287,16 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
u32 adv, fc;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
adv = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full;
if (tg3_flag(tp, WOL_SPEED_100MB))
adv |= ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
+ if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
+ adv |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
} else {
@@ -4121,6 +4310,15 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
tg3_phy_autoneg_cfg(tp, adv, fc);
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+ (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
+ /* Normally during power down we want to autonegotiate
+ * the lowest possible speed for WOL. However, to avoid
+ * link flap, we leave it untouched.
+ */
+ return;
+ }
+
tg3_writephy(tp, MII_BMCR,
BMCR_ANENABLE | BMCR_ANRESTART);
} else {
@@ -4177,6 +4375,103 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
}
}
+static int tg3_phy_pull_config(struct tg3 *tp)
+{
+ int err;
+ u32 val;
+
+ err = tg3_readphy(tp, MII_BMCR, &val);
+ if (err)
+ goto done;
+
+ if (!(val & BMCR_ANENABLE)) {
+ tp->link_config.autoneg = AUTONEG_DISABLE;
+ tp->link_config.advertising = 0;
+ tg3_flag_clear(tp, PAUSE_AUTONEG);
+
+ err = -EIO;
+
+ switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
+ case 0:
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ goto done;
+
+ tp->link_config.speed = SPEED_10;
+ break;
+ case BMCR_SPEED100:
+ if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+ goto done;
+
+ tp->link_config.speed = SPEED_100;
+ break;
+ case BMCR_SPEED1000:
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ tp->link_config.speed = SPEED_1000;
+ break;
+ }
+ /* Fall through */
+ default:
+ goto done;
+ }
+
+ if (val & BMCR_FULLDPLX)
+ tp->link_config.duplex = DUPLEX_FULL;
+ else
+ tp->link_config.duplex = DUPLEX_HALF;
+
+ tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
+
+ err = 0;
+ goto done;
+ }
+
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ tp->link_config.advertising = ADVERTISED_Autoneg;
+ tg3_flag_set(tp, PAUSE_AUTONEG);
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ u32 adv;
+
+ err = tg3_readphy(tp, MII_ADVERTISE, &val);
+ if (err)
+ goto done;
+
+ adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
+ tp->link_config.advertising |= adv | ADVERTISED_TP;
+
+ tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
+ } else {
+ tp->link_config.advertising |= ADVERTISED_FIBRE;
+ }
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ u32 adv;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ err = tg3_readphy(tp, MII_CTRL1000, &val);
+ if (err)
+ goto done;
+
+ adv = mii_ctrl1000_to_ethtool_adv_t(val);
+ } else {
+ err = tg3_readphy(tp, MII_ADVERTISE, &val);
+ if (err)
+ goto done;
+
+ adv = tg3_decode_flowctrl_1000X(val);
+ tp->link_config.flowctrl = adv;
+
+ val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
+ adv = mii_adv_to_ethtool_adv_x(val);
+ }
+
+ tp->link_config.advertising |= adv;
+ }
+
+done:
+ return err;
+}
+
static int tg3_init_5401phy_dsp(struct tg3 *tp)
{
int err;
@@ -4196,6 +4491,32 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
return err;
}
+static bool tg3_phy_eee_config_ok(struct tg3 *tp)
+{
+ u32 val;
+ u32 tgtadv = 0;
+ u32 advertising = tp->link_config.advertising;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+ return true;
+
+ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
+ return false;
+
+ val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+
+
+ if (advertising & ADVERTISED_100baseT_Full)
+ tgtadv |= MDIO_AN_EEE_ADV_100TX;
+ if (advertising & ADVERTISED_1000baseT_Full)
+ tgtadv |= MDIO_AN_EEE_ADV_1000T;
+
+ if (val != tgtadv)
+ return false;
+
+ return true;
+}
+
static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
{
u32 advmsk, tgtadv, advertising;
@@ -4262,7 +4583,7 @@ static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
return true;
}
-static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
+static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
{
if (curr_link_up != tp->link_up) {
if (curr_link_up) {
@@ -4280,23 +4601,28 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
return false;
}
-static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
+static void tg3_clear_mac_status(struct tg3 *tp)
{
- int current_link_up;
+ tw32(MAC_EVENT, 0);
+
+ tw32_f(MAC_STATUS,
+ MAC_STATUS_SYNC_CHANGED |
+ MAC_STATUS_CFG_CHANGED |
+ MAC_STATUS_MI_COMPLETION |
+ MAC_STATUS_LNKSTATE_CHANGED);
+ udelay(40);
+}
+
+static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
+{
+ bool current_link_up;
u32 bmsr, val;
u32 lcl_adv, rmt_adv;
u16 current_speed;
u8 current_duplex;
int i, err;
- tw32(MAC_EVENT, 0);
-
- tw32_f(MAC_STATUS,
- (MAC_STATUS_SYNC_CHANGED |
- MAC_STATUS_CFG_CHANGED |
- MAC_STATUS_MI_COMPLETION |
- MAC_STATUS_LNKSTATE_CHANGED));
- udelay(40);
+ tg3_clear_mac_status(tp);
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
tw32_f(MAC_MI_MODE,
@@ -4316,7 +4642,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
!(bmsr & BMSR_LSTATUS))
- force_reset = 1;
+ force_reset = true;
}
if (force_reset)
tg3_phy_reset(tp);
@@ -4380,7 +4706,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
}
- current_link_up = 0;
+ current_link_up = false;
current_speed = SPEED_UNKNOWN;
current_duplex = DUPLEX_UNKNOWN;
tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
@@ -4439,21 +4765,31 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
tp->link_config.active_duplex = current_duplex;
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+ bool eee_config_ok = tg3_phy_eee_config_ok(tp);
+
if ((bmcr & BMCR_ANENABLE) &&
+ eee_config_ok &&
tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
- current_link_up = 1;
+ current_link_up = true;
+
+ /* EEE settings changes take effect only after a phy
+ * reset. If we have skipped a reset due to Link Flap
+ * Avoidance being enabled, do it now.
+ */
+ if (!eee_config_ok &&
+ (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
+ !force_reset)
+ tg3_phy_reset(tp);
} else {
if (!(bmcr & BMCR_ANENABLE) &&
tp->link_config.speed == current_speed &&
- tp->link_config.duplex == current_duplex &&
- tp->link_config.flowctrl ==
- tp->link_config.active_flowctrl) {
- current_link_up = 1;
+ tp->link_config.duplex == current_duplex) {
+ current_link_up = true;
}
}
- if (current_link_up == 1 &&
+ if (current_link_up &&
tp->link_config.active_duplex == DUPLEX_FULL) {
u32 reg, bit;
@@ -4473,11 +4809,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
relink:
- if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
tg3_phy_copper_begin(tp);
if (tg3_flag(tp, ROBOSWITCH)) {
- current_link_up = 1;
+ current_link_up = true;
/* FIXME: when BCM5325 switch is used use 100 MBit/s */
current_speed = SPEED_1000;
current_duplex = DUPLEX_FULL;
@@ -4488,11 +4824,11 @@ relink:
tg3_readphy(tp, MII_BMSR, &bmsr);
if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
- current_link_up = 1;
+ current_link_up = true;
}
tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
- if (current_link_up == 1) {
+ if (current_link_up) {
if (tp->link_config.active_speed == SPEED_100 ||
tp->link_config.active_speed == SPEED_10)
tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -4528,7 +4864,7 @@ relink:
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
if (tg3_asic_rev(tp) == ASIC_REV_5700) {
- if (current_link_up == 1 &&
+ if (current_link_up &&
tg3_5700_link_polarity(tp, tp->link_config.active_speed))
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
else
@@ -4559,7 +4895,7 @@ relink:
udelay(40);
if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
- current_link_up == 1 &&
+ current_link_up &&
tp->link_config.active_speed == SPEED_1000 &&
(tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
udelay(120);
@@ -4999,19 +5335,19 @@ static void tg3_init_bcm8002(struct tg3 *tp)
tg3_writephy(tp, 0x10, 0x8011);
}
-static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
+static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
{
u16 flowctrl;
+ bool current_link_up;
u32 sg_dig_ctrl, sg_dig_status;
u32 serdes_cfg, expected_sg_dig_ctrl;
int workaround, port_a;
- int current_link_up;
serdes_cfg = 0;
expected_sg_dig_ctrl = 0;
workaround = 0;
port_a = 1;
- current_link_up = 0;
+ current_link_up = false;
if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
@@ -5042,7 +5378,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
}
if (mac_status & MAC_STATUS_PCS_SYNCED) {
tg3_setup_flow_control(tp, 0, 0);
- current_link_up = 1;
+ current_link_up = true;
}
goto out;
}
@@ -5063,7 +5399,7 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
MAC_STATUS_RCVD_CFG)) ==
MAC_STATUS_PCS_SYNCED)) {
tp->serdes_counter--;
- current_link_up = 1;
+ current_link_up = true;
goto out;
}
restart_autoneg:
@@ -5098,7 +5434,7 @@ restart_autoneg:
mii_adv_to_ethtool_adv_x(remote_adv);
tg3_setup_flow_control(tp, local_adv, remote_adv);
- current_link_up = 1;
+ current_link_up = true;
tp->serdes_counter = 0;
tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
@@ -5126,7 +5462,7 @@ restart_autoneg:
if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
!(mac_status & MAC_STATUS_RCVD_CFG)) {
tg3_setup_flow_control(tp, 0, 0);
- current_link_up = 1;
+ current_link_up = true;
tp->phy_flags |=
TG3_PHYFLG_PARALLEL_DETECT;
tp->serdes_counter =
@@ -5144,9 +5480,9 @@ out:
return current_link_up;
}
-static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
+static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
{
- int current_link_up = 0;
+ bool current_link_up = false;
if (!(mac_status & MAC_STATUS_PCS_SYNCED))
goto out;
@@ -5173,7 +5509,7 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
tg3_setup_flow_control(tp, local_adv, remote_adv);
- current_link_up = 1;
+ current_link_up = true;
}
for (i = 0; i < 30; i++) {
udelay(20);
@@ -5188,15 +5524,15 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
}
mac_status = tr32(MAC_STATUS);
- if (current_link_up == 0 &&
+ if (!current_link_up &&
(mac_status & MAC_STATUS_PCS_SYNCED) &&
!(mac_status & MAC_STATUS_RCVD_CFG))
- current_link_up = 1;
+ current_link_up = true;
} else {
tg3_setup_flow_control(tp, 0, 0);
/* Forcing 1000FD link up. */
- current_link_up = 1;
+ current_link_up = true;
tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
udelay(40);
@@ -5209,13 +5545,13 @@ out:
return current_link_up;
}
-static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
+static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
{
u32 orig_pause_cfg;
u16 orig_active_speed;
u8 orig_active_duplex;
u32 mac_status;
- int current_link_up;
+ bool current_link_up;
int i;
orig_pause_cfg = tp->link_config.active_flowctrl;
@@ -5252,7 +5588,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
udelay(40);
- current_link_up = 0;
+ current_link_up = false;
tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
@@ -5277,7 +5613,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
mac_status = tr32(MAC_STATUS);
if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
- current_link_up = 0;
+ current_link_up = false;
if (tp->link_config.autoneg == AUTONEG_ENABLE &&
tp->serdes_counter == 0) {
tw32_f(MAC_MODE, (tp->mac_mode |
@@ -5287,7 +5623,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
}
}
- if (current_link_up == 1) {
+ if (current_link_up) {
tp->link_config.active_speed = SPEED_1000;
tp->link_config.active_duplex = DUPLEX_FULL;
tw32(MAC_LED_CTRL, (tp->led_ctrl |
@@ -5312,33 +5648,63 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
return 0;
}
-static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
+static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
{
- int current_link_up, err = 0;
+ int err = 0;
u32 bmsr, bmcr;
- u16 current_speed;
- u8 current_duplex;
- u32 local_adv, remote_adv;
+ u16 current_speed = SPEED_UNKNOWN;
+ u8 current_duplex = DUPLEX_UNKNOWN;
+ bool current_link_up = false;
+ u32 local_adv, remote_adv, sgsr;
+
+ if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) &&
+ !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
+ (sgsr & SERDES_TG3_SGMII_MODE)) {
+
+ if (force_reset)
+ tg3_phy_reset(tp);
+
+ tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
+
+ if (!(sgsr & SERDES_TG3_LINK_UP)) {
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else {
+ current_link_up = true;
+ if (sgsr & SERDES_TG3_SPEED_1000) {
+ current_speed = SPEED_1000;
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ } else if (sgsr & SERDES_TG3_SPEED_100) {
+ current_speed = SPEED_100;
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ } else {
+ current_speed = SPEED_10;
+ tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+ }
+
+ if (sgsr & SERDES_TG3_FULL_DUPLEX)
+ current_duplex = DUPLEX_FULL;
+ else
+ current_duplex = DUPLEX_HALF;
+ }
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
+
+ tg3_clear_mac_status(tp);
+
+ goto fiber_setup_done;
+ }
tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
- tw32(MAC_EVENT, 0);
-
- tw32_f(MAC_STATUS,
- (MAC_STATUS_SYNC_CHANGED |
- MAC_STATUS_CFG_CHANGED |
- MAC_STATUS_MI_COMPLETION |
- MAC_STATUS_LNKSTATE_CHANGED));
- udelay(40);
+ tg3_clear_mac_status(tp);
if (force_reset)
tg3_phy_reset(tp);
- current_link_up = 0;
- current_speed = SPEED_UNKNOWN;
- current_duplex = DUPLEX_UNKNOWN;
tp->link_config.rmt_adv = 0;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -5424,7 +5790,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
if (bmsr & BMSR_LSTATUS) {
current_speed = SPEED_1000;
- current_link_up = 1;
+ current_link_up = true;
if (bmcr & BMCR_FULLDPLX)
current_duplex = DUPLEX_FULL;
else
@@ -5451,12 +5817,13 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
} else if (!tg3_flag(tp, 5780_CLASS)) {
/* Link is up via parallel detect */
} else {
- current_link_up = 0;
+ current_link_up = false;
}
}
}
- if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
+fiber_setup_done:
+ if (current_link_up && current_duplex == DUPLEX_FULL)
tg3_setup_flow_control(tp, local_adv, remote_adv);
tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
@@ -5535,7 +5902,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
}
}
-static int tg3_setup_phy(struct tg3 *tp, int force_reset)
+static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
{
u32 val;
int err;
@@ -5625,10 +5992,13 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ if (tg3_flag(tp, PTP_CAPABLE)) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ }
if (tp->ptp_clock)
info->phc_index = ptp_clock_index(tp->ptp_clock);
@@ -6348,7 +6718,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
if (desc->type_flags & RXD_FLAG_VLAN &&
!(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
- __vlan_hwaccel_put_tag(skb,
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
desc->err_vlan & RXD_VLAN_MASK);
napi_gro_receive(&tnapi->napi, skb);
@@ -6436,7 +6806,7 @@ static void tg3_poll_link(struct tg3 *tp)
MAC_STATUS_LNKSTATE_CHANGED));
udelay(40);
} else
- tg3_setup_phy(tp, 0);
+ tg3_setup_phy(tp, false);
spin_unlock(&tp->lock);
}
}
@@ -7533,7 +7903,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
u32 val, bmcr, mac_mode, ptest = 0;
tg3_phy_toggle_apd(tp, false);
- tg3_phy_toggle_automdix(tp, 0);
+ tg3_phy_toggle_automdix(tp, false);
if (extlpbk && tg3_phy_set_extloopbk(tp))
return -EIO;
@@ -7641,7 +8011,7 @@ static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
spin_lock_bh(&tp->lock);
tg3_mac_loopback(tp, false);
/* Force link status check */
- tg3_setup_phy(tp, 1);
+ tg3_setup_phy(tp, true);
spin_unlock_bh(&tp->lock);
netdev_info(dev, "Internal MAC loopback mode disabled.\n");
}
@@ -8039,11 +8409,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->rx_rcb)
goto err_out;
-
- memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
return 0;
@@ -8093,12 +8461,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
sizeof(struct tg3_hw_stats),
&tp->stats_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tp->hw_stats)
goto err_out;
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
-
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
@@ -8106,11 +8472,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
TG3_HW_STATUS_SIZE,
&tnapi->status_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->hw_status)
goto err_out;
- memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status;
if (tg3_flag(tp, ENABLE_RSS)) {
@@ -8157,7 +8522,7 @@ err_out:
/* To stop a block, clear the enable bit and poll till it
* clears. tp->lock is held.
*/
-static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
+static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
{
unsigned int i;
u32 val;
@@ -8201,7 +8566,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
}
/* tp->lock is held. */
-static int tg3_abort_hw(struct tg3 *tp, int silent)
+static int tg3_abort_hw(struct tg3 *tp, bool silent)
{
int i, err;
@@ -8561,6 +8926,9 @@ static int tg3_chip_reset(struct tg3 *tp)
/* Reprobe ASF enable state. */
tg3_flag_clear(tp, ENABLE_ASF);
+ tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
+ TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
+
tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
@@ -8572,6 +8940,12 @@ static int tg3_chip_reset(struct tg3 *tp)
tp->last_event_jiffies = jiffies;
if (tg3_flag(tp, 5750_PLUS))
tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
+ if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
+ tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
+ if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
+ tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
}
}
@@ -8582,7 +8956,7 @@ static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
/* tp->lock is held. */
-static int tg3_halt(struct tg3 *tp, int kind, int silent)
+static int tg3_halt(struct tg3 *tp, int kind, bool silent)
{
int err;
@@ -8593,7 +8967,7 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
tg3_abort_hw(tp, silent);
err = tg3_chip_reset(tp);
- __tg3_set_mac_addr(tp, 0);
+ __tg3_set_mac_addr(tp, false);
tg3_write_sig_legacy(tp, kind);
tg3_write_sig_post_reset(tp, kind);
@@ -8617,7 +8991,8 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
{
struct tg3 *tp = netdev_priv(dev);
struct sockaddr *addr = p;
- int err = 0, skip_mac_1 = 0;
+ int err = 0;
+ bool skip_mac_1 = false;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -8638,7 +9013,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
/* Skip MAC addr 1 if ASF is using it. */
if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
!(addr1_high == 0 && addr1_low == 0))
- skip_mac_1 = 1;
+ skip_mac_1 = true;
}
spin_lock_bh(&tp->lock);
__tg3_set_mac_addr(tp, skip_mac_1);
@@ -9057,7 +9432,7 @@ static void tg3_rss_write_indir_tbl(struct tg3 *tp)
}
/* tp->lock is held. */
-static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
{
u32 val, rdmac_mode;
int i, err, limit;
@@ -9106,6 +9481,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
}
+ if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
+ !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
+ tg3_phy_pull_config(tp);
+ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+ }
+
if (reset_phy)
tg3_phy_reset(tp);
@@ -9444,7 +9825,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_rings_reset(tp);
/* Initialize MAC address and backoff seed. */
- __tg3_set_mac_addr(tp, 0);
+ __tg3_set_mac_addr(tp, false);
/* MTU + ethernet header + FCS + optional VLAN tag */
tw32(MAC_RX_MTU_SIZE,
@@ -9781,6 +10162,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err;
}
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ /* Ignore any errors for the firmware download. If download
+ * fails, the device will operate with EEE disabled
+ */
+ tg3_load_57766_firmware(tp);
+ }
+
if (tg3_flag(tp, TSO_CAPABLE)) {
err = tg3_load_tso_firmware(tp);
if (err)
@@ -9888,7 +10276,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
- err = tg3_setup_phy(tp, 0);
+ err = tg3_setup_phy(tp, false);
if (err)
return err;
@@ -9968,7 +10356,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Called at device open time to get the chip ready for
* packet processing. Invoked with tp->lock held.
*/
-static int tg3_init_hw(struct tg3 *tp, int reset_phy)
+static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
{
tg3_switch_clocks(tp);
@@ -10229,7 +10617,7 @@ static void tg3_timer(unsigned long __opaque)
phy_event = 1;
if (phy_event)
- tg3_setup_phy(tp, 0);
+ tg3_setup_phy(tp, false);
} else if (tg3_flag(tp, POLL_SERDES)) {
u32 mac_stat = tr32(MAC_STATUS);
int need_setup = 0;
@@ -10252,7 +10640,7 @@ static void tg3_timer(unsigned long __opaque)
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
}
- tg3_setup_phy(tp, 0);
+ tg3_setup_phy(tp, false);
}
} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
tg3_flag(tp, 5780_CLASS)) {
@@ -10338,7 +10726,7 @@ static void tg3_timer_stop(struct tg3 *tp)
/* Restart hardware after configuration changes, self-test, etc.
* Invoked with tp->lock held.
*/
-static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
__releases(tp->lock)
__acquires(tp->lock)
{
@@ -10388,7 +10776,7 @@ static void tg3_reset_task(struct work_struct *work)
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- err = tg3_init_hw(tp, 1);
+ err = tg3_init_hw(tp, true);
if (err)
goto out;
@@ -10558,7 +10946,7 @@ static int tg3_test_msi(struct tg3 *tp)
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_init_hw(tp, 1);
+ err = tg3_init_hw(tp, true);
tg3_full_unlock(tp);
@@ -10570,7 +10958,7 @@ static int tg3_test_msi(struct tg3 *tp)
static int tg3_request_firmware(struct tg3 *tp)
{
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
@@ -10578,15 +10966,15 @@ static int tg3_request_firmware(struct tg3 *tp)
return -ENOENT;
}
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
* start address and _full_ length including BSS sections
* (which must be longer than the actual data, of course
*/
- tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
- if (tp->fw_len < (tp->fw->size - 12)) {
+ tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
+ if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
tp->fw_len, tp->fw_needed);
release_firmware(tp->fw);
@@ -10885,7 +11273,15 @@ static int tg3_open(struct net_device *dev)
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
- if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ if (err) {
+ netdev_warn(tp->dev, "EEE capability disabled\n");
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+ } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+ netdev_warn(tp->dev, "EEE capability restored\n");
+ tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+ }
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
if (err)
return err;
} else if (err) {
@@ -10910,7 +11306,9 @@ static int tg3_open(struct net_device *dev)
tg3_full_unlock(tp);
- err = tg3_start(tp, true, true, true);
+ err = tg3_start(tp,
+ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
+ true, true);
if (err) {
tg3_frob_aux_power(tp, false);
pci_set_power_state(tp->pdev, PCI_D3hot);
@@ -11416,8 +11814,12 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
tp->link_config.duplex = cmd->duplex;
}
+ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+
+ tg3_warn_mgmt_link_flap(tp);
+
if (netif_running(dev))
- tg3_setup_phy(tp, 1);
+ tg3_setup_phy(tp, true);
tg3_full_unlock(tp);
@@ -11494,6 +11896,8 @@ static int tg3_nway_reset(struct net_device *dev)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
return -EINVAL;
+ tg3_warn_mgmt_link_flap(tp);
+
if (tg3_flag(tp, USE_PHYLIB)) {
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
@@ -11571,7 +11975,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, 1);
+ err = tg3_restart_hw(tp, false);
if (!err)
tg3_netif_start(tp);
}
@@ -11606,6 +12010,9 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
struct tg3 *tp = netdev_priv(dev);
int err = 0;
+ if (tp->link_config.autoneg == AUTONEG_ENABLE)
+ tg3_warn_mgmt_link_flap(tp);
+
if (tg3_flag(tp, USE_PHYLIB)) {
u32 newadv;
struct phy_device *phydev;
@@ -11692,7 +12099,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, 1);
+ err = tg3_restart_hw(tp, false);
if (!err)
tg3_netif_start(tp);
}
@@ -11700,6 +12107,8 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tg3_full_unlock(tp);
}
+ tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
+
return err;
}
@@ -12760,7 +13169,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
goto done;
}
- err = tg3_reset_hw(tp, 1);
+ err = tg3_reset_hw(tp, true);
if (err) {
data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
@@ -12927,7 +13336,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
tg3_flag_set(tp, INIT_COMPLETE);
- err2 = tg3_restart_hw(tp, 1);
+ err2 = tg3_restart_hw(tp, true);
if (!err2)
tg3_netif_start(tp);
}
@@ -13244,7 +13653,8 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
static int tg3_change_mtu(struct net_device *dev, int new_mtu)
{
struct tg3 *tp = netdev_priv(dev);
- int err, reset_phy = 0;
+ int err;
+ bool reset_phy = false;
if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
return -EINVAL;
@@ -13271,7 +13681,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
* breaks all requests to 256 bytes.
*/
if (tg3_asic_rev(tp) == ASIC_REV_57766)
- reset_phy = 1;
+ reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
@@ -13837,6 +14247,12 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
case FLASH_5762_EEPROM_LD:
nvmpinstrp = FLASH_5720_EEPROM_LD;
break;
+ case FLASH_5720VENDOR_M_ST_M45PE20:
+ /* This pinstrap supports multiple sizes, so force it
+ * to read the actual size from location 0xf0.
+ */
+ nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
+ break;
}
}
@@ -14289,14 +14705,18 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
(cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
- if (tg3_flag(tp, PCI_EXPRESS) &&
- tg3_asic_rev(tp) != ASIC_REV_5785 &&
- !tg3_flag(tp, 57765_PLUS)) {
+ if (tg3_flag(tp, PCI_EXPRESS)) {
u32 cfg3;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
- if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
+ if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
+ !tg3_flag(tp, 57765_PLUS) &&
+ (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
tg3_flag_set(tp, ASPM_WORKAROUND);
+ if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
+ tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
+ if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
+ tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
}
if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
@@ -14450,6 +14870,12 @@ static int tg3_phy_probe(struct tg3 *tp)
}
}
+ if (!tg3_flag(tp, ENABLE_ASF) &&
+ !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+ tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
+ TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
+
if (tg3_flag(tp, USE_PHYLIB))
return tg3_phy_init(tp);
@@ -14515,6 +14941,7 @@ static int tg3_phy_probe(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
(tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5762 ||
(tg3_asic_rev(tp) == ASIC_REV_5717 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
@@ -14524,7 +14951,8 @@ static int tg3_phy_probe(struct tg3 *tp)
tg3_phy_init_link_config(tp);
- if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
+ !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
!tg3_flag(tp, ENABLE_APE) &&
!tg3_flag(tp, ENABLE_ASF)) {
u32 bmsr, dummy;
@@ -14604,8 +15032,11 @@ static void tg3_read_vpd(struct tg3 *tp)
if (j + len > block_end)
goto partno;
- memcpy(tp->fw_ver, &vpd_data[j], len);
- strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
+ if (len >= sizeof(tp->fw_ver))
+ len = sizeof(tp->fw_ver) - 1;
+ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
+ &vpd_data[j]);
}
partno:
@@ -15297,7 +15728,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
tg3_asic_rev(tp) != ASIC_REV_5701 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
- tg3_flag_set(tp, TSO_BUG);
+ tg3_flag_set(tp, FW_TSO);
+ tg3_flag_set(tp, TSO_BUG);
if (tg3_asic_rev(tp) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
@@ -15308,7 +15740,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- tp->fw_needed) {
+ tg3_flag(tp, FW_TSO)) {
/* For firmware TSO, assume ASF is disabled.
* We'll disable TSO later if we discover ASF
* is enabled in tg3_get_eeprom_hw_cfg().
@@ -15323,6 +15755,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
tp->fw_needed = FIRMWARE_TG3;
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ tp->fw_needed = FIRMWARE_TG357766;
+
tp->irq_max = 1;
if (tg3_flag(tp, 5750_PLUS)) {
@@ -15595,7 +16030,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
*/
tg3_get_eeprom_hw_cfg(tp);
- if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -15783,6 +16218,11 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
udelay(50);
tg3_nvram_init(tp);
+ /* If the device has an NVRAM, no need to load patch firmware */
+ if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
+ !tg3_flag(tp, NO_NVRAM))
+ tp->fw_needed = NULL;
+
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
@@ -16141,7 +16581,7 @@ out:
}
static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
- int size, int to_device)
+ int size, bool to_device)
{
struct tg3_internal_buffer_desc test_desc;
u32 sram_dma_descs;
@@ -16341,7 +16781,7 @@ static int tg3_test_dma(struct tg3 *tp)
p[i] = i;
/* Send the buffer to the chip. */
- ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
if (ret) {
dev_err(&tp->pdev->dev,
"%s: Buffer write failed. err = %d\n",
@@ -16364,7 +16804,7 @@ static int tg3_test_dma(struct tg3 *tp)
}
#endif
/* Now read it back. */
- ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
+ ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
if (ret) {
dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
"err = %d\n", __func__, ret);
@@ -16760,7 +17200,7 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config(tp);
- features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
@@ -17045,7 +17485,7 @@ static int tg3_suspend(struct device *device)
tg3_full_lock(tp, 0);
tg3_flag_set(tp, INIT_COMPLETE);
- err2 = tg3_restart_hw(tp, 1);
+ err2 = tg3_restart_hw(tp, true);
if (err2)
goto out;
@@ -17079,7 +17519,8 @@ static int tg3_resume(struct device *device)
tg3_full_lock(tp, 0);
tg3_flag_set(tp, INIT_COMPLETE);
- err = tg3_restart_hw(tp, 1);
+ err = tg3_restart_hw(tp,
+ !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
if (err)
goto out;
@@ -17095,15 +17536,9 @@ out:
return err;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
-#define TG3_PM_OPS (&tg3_pm_ops)
-
-#else
-
-#define TG3_PM_OPS NULL
-
-#endif /* CONFIG_PM_SLEEP */
/**
* tg3_io_error_detected - called when PCI error is detected
@@ -17218,7 +17653,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_full_lock(tp, 0);
tg3_flag_set(tp, INIT_COMPLETE);
- err = tg3_restart_hw(tp, 1);
+ err = tg3_restart_hw(tp, true);
if (err) {
tg3_full_unlock(tp);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
@@ -17251,7 +17686,7 @@ static struct pci_driver tg3_driver = {
.probe = tg3_init_one,
.remove = tg3_remove_one,
.err_handler = &tg3_err_handler,
- .driver.pm = TG3_PM_OPS,
+ .driver.pm = &tg3_pm_ops,
};
static int __init tg3_init(void)
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 8d7d4c2ab5d6..9b2d3ac2474a 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2198,6 +2198,8 @@
#define NIC_SRAM_DATA_CFG_3 0x00000d3c
#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002
+#define NIC_SRAM_LNK_FLAP_AVOID 0x00400000
+#define NIC_SRAM_1G_ON_VAUX_OK 0x00800000
#define NIC_SRAM_DATA_CFG_4 0x00000d60
#define NIC_SRAM_GMII_MODE 0x00000002
@@ -2222,6 +2224,12 @@
#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
+#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000
+#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000
+#define TG3_57766_FW_BASE_ADDR 0x00030000
+#define TG3_57766_FW_HANDSHAKE 0x0003fccc
+#define TG3_SBROM_IN_SERVICE_LOOP 0x51
+
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
@@ -2365,6 +2373,13 @@
#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b
#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020
+/* Serdes PHY Register Definitions */
+#define SERDES_TG3_1000X_STATUS 0x14
+#define SERDES_TG3_SGMII_MODE 0x0001
+#define SERDES_TG3_LINK_UP 0x0002
+#define SERDES_TG3_FULL_DUPLEX 0x0004
+#define SERDES_TG3_SPEED_100 0x0008
+#define SERDES_TG3_SPEED_1000 0x0010
/* APE registers. Accessible through BAR1 */
#define TG3_APE_GPIO_MSG 0x0008
@@ -3009,17 +3024,18 @@ enum TG3_FLAGS {
TG3_FLAG_JUMBO_CAPABLE,
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
- TG3_FLAG_TSO_BUG,
TG3_FLAG_MAX_RXPEND_64,
- TG3_FLAG_TSO_CAPABLE,
TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
TG3_FLAG_ASF_NEW_HANDSHAKE,
TG3_FLAG_HW_AUTONEG,
TG3_FLAG_IS_NIC,
TG3_FLAG_FLASH,
+ TG3_FLAG_FW_TSO,
TG3_FLAG_HW_TSO_1,
TG3_FLAG_HW_TSO_2,
TG3_FLAG_HW_TSO_3,
+ TG3_FLAG_TSO_CAPABLE,
+ TG3_FLAG_TSO_BUG,
TG3_FLAG_ICH_WORKAROUND,
TG3_FLAG_1SHOT_MSI,
TG3_FLAG_NO_FWARE_REPORTED,
@@ -3064,6 +3080,13 @@ enum TG3_FLAGS {
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
};
+struct tg3_firmware_hdr {
+ __be32 version; /* unused for fragments */
+ __be32 base_addr;
+ __be32 len;
+};
+#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr))
+
struct tg3 {
/* begin "general, frequently-used members" cacheline section */
@@ -3267,6 +3290,7 @@ struct tg3 {
#define TG3_PHYFLG_IS_LOW_POWER 0x00000001
#define TG3_PHYFLG_IS_CONNECTED 0x00000002
#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004
+#define TG3_PHYFLG_USER_CONFIGURED 0x00000008
#define TG3_PHYFLG_PHY_SERDES 0x00000010
#define TG3_PHYFLG_MII_SERDES 0x00000020
#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \
@@ -3284,6 +3308,8 @@ struct tg3 {
#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
#define TG3_PHYFLG_EEE_CAP 0x00040000
+#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000
+#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000
#define TG3_PHYFLG_MDIX_STATE 0x00200000
u32 led_ctrl;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 3227fdde521b..f2b73ffa9122 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -76,7 +76,7 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7cce42dc2f20..ce4a030d3d0c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -610,7 +610,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb->rxq->rx_bytes += length;
if (flags & BNA_CQ_EF_VLAN)
- __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
napi_gro_frags(&rx_ctrl->napi);
@@ -1264,9 +1264,8 @@ bnad_mem_alloc(struct bnad *bnad,
mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva =
dma_alloc_coherent(&bnad->pcidev->dev,
- mem_info->len, &dma_pa,
- GFP_KERNEL);
-
+ mem_info->len, &dma_pa,
+ GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL)
goto err_return;
@@ -3069,8 +3068,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
}
static int
-bnad_vlan_rx_add_vid(struct net_device *netdev,
- unsigned short vid)
+bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct bnad *bnad = netdev_priv(netdev);
unsigned long flags;
@@ -3091,8 +3089,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
}
static int
-bnad_vlan_rx_kill_vid(struct net_device *netdev,
- unsigned short vid)
+bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct bnad *bnad = netdev_priv(netdev);
unsigned long flags;
@@ -3171,14 +3168,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 3becdb2deb46..cc9a185f0abb 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -47,22 +47,19 @@ static int at91ether_start(struct net_device *dev)
int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- &lp->rx_ring_dma, GFP_KERNEL);
- if (!lp->rx_ring) {
- netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
+ (MAX_RX_DESCR *
+ sizeof(struct macb_dma_desc)),
+ &lp->rx_ring_dma, GFP_KERNEL);
+ if (!lp->rx_ring)
return -ENOMEM;
- }
lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * MAX_RBUFF_SZ,
- &lp->rx_buffers_dma, GFP_KERNEL);
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ &lp->rx_buffers_dma, GFP_KERNEL);
if (!lp->rx_buffers) {
- netdev_err(dev, "unable to alloc rx data DMA buffer\n");
-
dma_free_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- lp->rx_ring, lp->rx_ring_dma);
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
return -ENOMEM;
}
@@ -209,7 +206,6 @@ static void at91ether_rx(struct net_device *dev)
netif_rx(skb);
} else {
lp->stats.rx_dropped++;
- netdev_notice(dev, "Memory squeeze, dropping packet.\n");
}
if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
@@ -303,42 +299,7 @@ static const struct of_device_id at91ether_dt_ids[] = {
{ .compatible = "cdns,emac" },
{ /* sentinel */ }
};
-
MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
-
-static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
-
- if (np)
- return of_get_phy_mode(np);
-
- return -ENODEV;
-}
-
-static int at91ether_get_hwaddr_dt(struct macb *bp)
-{
- struct device_node *np = bp->pdev->dev.of_node;
-
- if (np) {
- const char *mac = of_get_mac_address(np);
- if (mac) {
- memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
- return 0;
- }
- }
-
- return -ENODEV;
-}
-#else
-static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-static int at91ether_get_hwaddr_dt(struct macb *bp)
-{
- return -ENODEV;
-}
#endif
/* Detect MAC & PHY and perform ethernet interface initialization */
@@ -352,6 +313,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
struct macb *lp;
int res;
u32 reg;
+ const char *mac;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
@@ -403,11 +365,13 @@ static int __init at91ether_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- res = at91ether_get_hwaddr_dt(lp);
- if (res < 0)
+ mac = of_get_mac_address(pdev->dev.of_node);
+ if (mac)
+ memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
+ else
macb_get_hwaddr(lp);
- res = at91ether_get_phy_mode_dt(pdev);
+ res = of_get_phy_mode(pdev->dev.of_node);
if (res < 0) {
if (board_data && board_data->is_rmii)
lp->phy_interface = PHY_INTERFACE_MODE_RMII;
@@ -430,7 +394,8 @@ static int __init at91ether_probe(struct platform_device *pdev)
if (res)
goto err_disable_clock;
- if (macb_mii_init(lp) != 0)
+ res = macb_mii_init(lp);
+ if (res)
goto err_out_unregister_netdev;
/* will be enabled in open() */
@@ -519,18 +484,7 @@ static struct platform_driver at91ether_driver = {
},
};
-static int __init at91ether_init(void)
-{
- return platform_driver_probe(&at91ether_driver, at91ether_probe);
-}
-
-static void __exit at91ether_exit(void)
-{
- platform_driver_unregister(&at91ether_driver);
-}
-
-module_init(at91ether_init)
-module_exit(at91ether_exit)
+module_platform_driver_probe(at91ether_driver, at91ether_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 79039439bfdc..6be513deb17f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -485,6 +485,8 @@ static void macb_tx_interrupt(struct macb *bp)
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
+ macb_writel(bp, ISR, MACB_BIT(TCOMP));
+
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
(unsigned long)status);
@@ -736,6 +738,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* now.
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+ macb_writel(bp, ISR, MACB_BIT(RCOMP));
if (napi_schedule_prep(&bp->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -1054,6 +1057,7 @@ static void macb_configure_dma(struct macb *bp)
dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
dmacfg |= GEM_BF(FBLDO, 16);
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
+ dmacfg &= ~GEM_BIT(ENDIA);
gem_writel(bp, DMACFG, dmacfg);
}
}
@@ -1472,41 +1476,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,gem" },
{ /* sentinel */ }
};
-
MODULE_DEVICE_TABLE(of, macb_dt_ids);
-
-static int macb_get_phy_mode_dt(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
-
- if (np)
- return of_get_phy_mode(np);
-
- return -ENODEV;
-}
-
-static int macb_get_hwaddr_dt(struct macb *bp)
-{
- struct device_node *np = bp->pdev->dev.of_node;
- if (np) {
- const char *mac = of_get_mac_address(np);
- if (mac) {
- memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
- return 0;
- }
- }
-
- return -ENODEV;
-}
-#else
-static int macb_get_phy_mode_dt(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-static int macb_get_hwaddr_dt(struct macb *bp)
-{
- return -ENODEV;
-}
#endif
static int __init macb_probe(struct platform_device *pdev)
@@ -1519,6 +1489,7 @@ static int __init macb_probe(struct platform_device *pdev)
u32 config;
int err = -ENXIO;
struct pinctrl *pinctrl;
+ const char *mac;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -1557,14 +1528,14 @@ static int __init macb_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get macb_clk\n");
goto err_out_free_dev;
}
- clk_enable(bp->pclk);
+ clk_prepare_enable(bp->pclk);
bp->hclk = clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
dev_err(&pdev->dev, "failed to get hclk\n");
goto err_out_put_pclk;
}
- clk_enable(bp->hclk);
+ clk_prepare_enable(bp->hclk);
bp->regs = ioremap(regs->start, resource_size(regs));
if (!bp->regs) {
@@ -1592,11 +1563,13 @@ static int __init macb_probe(struct platform_device *pdev)
config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
- err = macb_get_hwaddr_dt(bp);
- if (err < 0)
+ mac = of_get_mac_address(pdev->dev.of_node);
+ if (mac)
+ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ else
macb_get_hwaddr(bp);
- err = macb_get_phy_mode_dt(pdev);
+ err = of_get_phy_mode(pdev->dev.of_node);
if (err < 0) {
pdata = pdev->dev.platform_data;
if (pdata && pdata->is_rmii)
@@ -1629,9 +1602,9 @@ static int __init macb_probe(struct platform_device *pdev)
goto err_out_free_irq;
}
- if (macb_mii_init(bp) != 0) {
+ err = macb_mii_init(bp);
+ if (err)
goto err_out_unregister_netdev;
- }
platform_set_drvdata(pdev, dev);
@@ -1654,9 +1627,9 @@ err_out_free_irq:
err_out_iounmap:
iounmap(bp->regs);
err_out_disable_clocks:
- clk_disable(bp->hclk);
+ clk_disable_unprepare(bp->hclk);
clk_put(bp->hclk);
- clk_disable(bp->pclk);
+ clk_disable_unprepare(bp->pclk);
err_out_put_pclk:
clk_put(bp->pclk);
err_out_free_dev:
@@ -1683,9 +1656,9 @@ static int __exit macb_remove(struct platform_device *pdev)
unregister_netdev(dev);
free_irq(dev->irq, dev);
iounmap(bp->regs);
- clk_disable(bp->hclk);
+ clk_disable_unprepare(bp->hclk);
clk_put(bp->hclk);
- clk_disable(bp->pclk);
+ clk_disable_unprepare(bp->pclk);
clk_put(bp->pclk);
free_netdev(dev);
platform_set_drvdata(pdev, NULL);
@@ -1703,8 +1676,8 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
netif_carrier_off(netdev);
netif_device_detach(netdev);
- clk_disable(bp->hclk);
- clk_disable(bp->pclk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
return 0;
}
@@ -1714,8 +1687,8 @@ static int macb_resume(struct platform_device *pdev)
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
- clk_enable(bp->pclk);
- clk_enable(bp->hclk);
+ clk_prepare_enable(bp->pclk);
+ clk_prepare_enable(bp->hclk);
netif_device_attach(netdev);
@@ -1737,18 +1710,7 @@ static struct platform_driver macb_driver = {
},
};
-static int __init macb_init(void)
-{
- return platform_driver_probe(&macb_driver, macb_probe);
-}
-
-static void __exit macb_exit(void)
-{
- platform_driver_unregister(&macb_driver);
-}
-
-module_init(macb_init);
-module_exit(macb_exit);
+module_platform_driver_probe(macb_driver, macb_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 570908b93578..993d70380688 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -173,6 +173,8 @@
/* Bitfields in DMACFG. */
#define GEM_FBLDO_OFFSET 0
#define GEM_FBLDO_SIZE 5
+#define GEM_ENDIA_OFFSET 7
+#define GEM_ENDIA_SIZE 1
#define GEM_RXBMS_OFFSET 8
#define GEM_RXBMS_SIZE 2
#define GEM_TXPBMS_OFFSET 10
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index a170065b5973..4a1f2fa812ab 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -163,6 +163,7 @@
#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
/* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */
#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev)
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+ /* Mask power mgt interrupt */
+ writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
+
/* XGMAC requires AXI bus init. This is a 'magic number' for now */
writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
struct sk_buff *skb;
int frame_len;
+ if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
+ break;
+
entry = priv->rx_tail;
p = priv->dma_rx + entry;
if (desc_get_owner(p))
@@ -1475,7 +1482,7 @@ static int xgmac_set_features(struct net_device *dev, netdev_features_t features
u32 ctrl;
struct xgmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (!(changed & NETIF_F_RXCSUM))
return 0;
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
unsigned int pmt = 0;
if (mode & WAKE_MAGIC)
- pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
if (mode & WAKE_UCAST)
pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
@@ -1879,12 +1886,9 @@ static int xgmac_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
-#define XGMAC_PM_OPS (&xgmac_pm_ops)
-#else
-#define XGMAC_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id xgmac_of_match[] = {
{ .compatible = "calxeda,hb-xgmac", },
@@ -1899,7 +1903,7 @@ static struct platform_driver xgmac_driver = {
},
.probe = xgmac_probe,
.remove = xgmac_remove,
- .driver.pm = XGMAC_PM_OPS,
+ .driver.pm = &xgmac_pm_ops,
};
module_platform_driver(xgmac_driver);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 20d2085f61c5..9624cfe7df57 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -856,10 +856,10 @@ static netdev_features_t t1_fix_features(struct net_device *dev,
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -869,7 +869,7 @@ static int t1_set_features(struct net_device *dev, netdev_features_t features)
netdev_features_t changed = dev->features ^ features;
struct adapter *adapter = dev->ml_priv;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
t1_vlan_mode(adapter, features);
return 0;
@@ -1085,8 +1085,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= NETIF_F_HIGHDMA;
if (vlan_tso_capable(adapter)) {
netdev->features |=
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- netdev->hw_features |= NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
/* T204: disable TSO */
if (!(is_T2(adapter)) || bi->port_number != 4) {
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 482976925154..8061fb0ef7ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -734,7 +734,7 @@ void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
{
struct sge *sge = adapter->sge;
- if (features & NETIF_F_HW_VLAN_RX)
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
sge->sge_control |= F_VLAN_XTRACT;
else
sge->sge_control &= ~F_VLAN_XTRACT;
@@ -835,7 +835,7 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
struct sk_buff *skb;
dma_addr_t mapping;
- skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
+ skb = dev_alloc_skb(q->rx_buffer_size);
if (!skb)
break;
@@ -1046,11 +1046,10 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
const struct freelQ_ce *ce = &fl->centries[fl->cidx];
if (len < copybreak) {
- skb = alloc_skb(len + 2, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(NULL, len);
if (!skb)
goto use_orig_buf;
- skb_reserve(skb, 2); /* align IP header */
skb_put(skb, len);
pci_dma_sync_single_for_cpu(pdev,
dma_unmap_addr(ce, dma_addr),
@@ -1387,7 +1386,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
if (p->vlan_valid) {
st->vlan_xtract++;
- __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
}
netif_receive_skb(skb);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 2b5e62193cea..71497e835f42 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1181,14 +1181,15 @@ static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
if (adapter->params.rev > 0) {
t3_set_vlan_accel(adapter, 1 << pi->port_id,
- features & NETIF_F_HW_VLAN_RX);
+ features & NETIF_F_HW_VLAN_CTAG_RX);
} else {
/* single control for all ports */
- unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
+ unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
for_each_port(adapter, i)
have_vlans |=
- adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
+ adapter->port[i]->features &
+ NETIF_F_HW_VLAN_CTAG_RX;
t3_set_vlan_accel(adapter, 1, have_vlans);
}
@@ -2563,10 +2564,10 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -2575,7 +2576,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
netdev_features_t changed = dev->features ^ features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
cxgb_vlan_mode(dev, features);
return 0;
@@ -3288,8 +3289,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
- netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
+ NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features |= netdev->features & VLAN_FEAT;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 4232767862b5..0c96e5fe99cc 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
- dev = __vlan_find_dev_deep(dev, vlan);
+ dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
} else if (netif_is_bond_slave(dev)) {
struct net_device *upper_dev;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 9d67eb794c4b..f12e6b85a653 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2030,7 +2030,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
if (p->vlan_valid) {
qs->port_stats[SGE_PSTAT_VLANEX]++;
- __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
}
if (rq->polling) {
if (lro)
@@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
if (cpl->vlan_valid) {
qs->port_stats[SGE_PSTAT_VLANEX]++;
- __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
}
napi_gro_frags(&qs->napi);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6db997c78a5f..681804b30a3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -54,6 +54,10 @@
#define FW_VERSION_MINOR 1
#define FW_VERSION_MICRO 0
+#define FW_VERSION_MAJOR_T5 0
+#define FW_VERSION_MINOR_T5 0
+#define FW_VERSION_MICRO_T5 0
+
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
enum {
@@ -66,7 +70,9 @@ enum {
enum {
MEM_EDC0,
MEM_EDC1,
- MEM_MC
+ MEM_MC,
+ MEM_MC0 = MEM_MC,
+ MEM_MC1
};
enum {
@@ -74,8 +80,10 @@ enum {
MEMWIN0_BASE = 0x1b800,
MEMWIN1_APERTURE = 32768,
MEMWIN1_BASE = 0x28000,
+ MEMWIN1_BASE_T5 = 0x52000,
MEMWIN2_APERTURE = 65536,
MEMWIN2_BASE = 0x30000,
+ MEMWIN2_BASE_T5 = 0x54000,
};
enum dev_master {
@@ -431,6 +439,7 @@ struct sge_txq {
spinlock_t db_lock;
int db_disabled;
unsigned short db_pidx;
+ u64 udb;
};
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
@@ -504,13 +513,44 @@ struct sge {
struct l2t_data;
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A3,
+
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_FIRST_REV = T5_A1,
+ T5_LAST_REV = T5_A1,
+};
+
+#ifdef CONFIG_PCI_IOV
+
+/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
+ * Configuration initialization for T5 only has SR-IOV functionality enabled
+ * on PF0-3 in order to simplify everything.
+ */
+#define NUM_OF_PF_WITH_SRIOV 4
+
+#endif
+
struct adapter {
void __iomem *regs;
+ void __iomem *bar2;
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned int mbox;
unsigned int fn;
unsigned int flags;
+ enum chip_type chip;
int msg_enable;
@@ -673,6 +713,16 @@ enum {
VLAN_REWRITE
};
+static inline int is_t5(enum chip_type chip)
+{
+ return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV);
+}
+
+static inline int is_t4(enum chip_type chip)
+{
+ return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -858,7 +908,8 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
+ u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e707e31abd81..c59ec3ddaa66 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -68,8 +68,8 @@
#include "t4fw_api.h"
#include "l2t.h"
-#define DRV_VERSION "1.3.0-ko"
-#define DRV_DESC "Chelsio T4 Network Driver"
+#define DRV_VERSION "2.0.0-ko"
+#define DRV_DESC "Chelsio T4/T5 Network Driver"
/*
* Max interrupt hold-off timer value in us. Queues fall back to this value
@@ -229,11 +229,51 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x440a, 4),
CH_DEVICE(0x440d, 4),
CH_DEVICE(0x440e, 4),
+ CH_DEVICE(0x5001, 4),
+ CH_DEVICE(0x5002, 4),
+ CH_DEVICE(0x5003, 4),
+ CH_DEVICE(0x5004, 4),
+ CH_DEVICE(0x5005, 4),
+ CH_DEVICE(0x5006, 4),
+ CH_DEVICE(0x5007, 4),
+ CH_DEVICE(0x5008, 4),
+ CH_DEVICE(0x5009, 4),
+ CH_DEVICE(0x500A, 4),
+ CH_DEVICE(0x500B, 4),
+ CH_DEVICE(0x500C, 4),
+ CH_DEVICE(0x500D, 4),
+ CH_DEVICE(0x500E, 4),
+ CH_DEVICE(0x500F, 4),
+ CH_DEVICE(0x5010, 4),
+ CH_DEVICE(0x5011, 4),
+ CH_DEVICE(0x5012, 4),
+ CH_DEVICE(0x5013, 4),
+ CH_DEVICE(0x5401, 4),
+ CH_DEVICE(0x5402, 4),
+ CH_DEVICE(0x5403, 4),
+ CH_DEVICE(0x5404, 4),
+ CH_DEVICE(0x5405, 4),
+ CH_DEVICE(0x5406, 4),
+ CH_DEVICE(0x5407, 4),
+ CH_DEVICE(0x5408, 4),
+ CH_DEVICE(0x5409, 4),
+ CH_DEVICE(0x540A, 4),
+ CH_DEVICE(0x540B, 4),
+ CH_DEVICE(0x540C, 4),
+ CH_DEVICE(0x540D, 4),
+ CH_DEVICE(0x540E, 4),
+ CH_DEVICE(0x540F, 4),
+ CH_DEVICE(0x5410, 4),
+ CH_DEVICE(0x5411, 4),
+ CH_DEVICE(0x5412, 4),
+ CH_DEVICE(0x5413, 4),
{ 0, }
};
#define FW_FNAME "cxgb4/t4fw.bin"
+#define FW5_FNAME "cxgb4/t5fw.bin"
#define FW_CFNAME "cxgb4/t4-config.txt"
+#define FW5_CFNAME "cxgb4/t5-config.txt"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
@@ -241,6 +281,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE(FW5_FNAME);
/*
* Normally we're willing to become the firmware's Master PF but will be happy
@@ -319,7 +360,10 @@ static bool vf_acls;
module_param(vf_acls, bool, 0644);
MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
-static unsigned int num_vf[4];
+/* Configure the number of PCI-E Virtual Function which are to be instantiated
+ * on SR-IOV Capable Physical Functions.
+ */
+static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
module_param_array(num_vf, uint, NULL, 0644);
MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
@@ -515,7 +559,7 @@ static int link_start(struct net_device *dev)
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
- !!(dev->features & NETIF_F_HW_VLAN_RX), true);
+ !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
@@ -601,6 +645,21 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
u8 opcode = ((const struct rss_header *)rsp)->opcode;
rsp++; /* skip RSS header */
+
+ /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+ */
+ if (unlikely(opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
+ rsp++;
+ opcode = ((const struct rss_header *)rsp)->opcode;
+ rsp++;
+ if (opcode != CPL_SGE_EGR_UPDATE) {
+ dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
+ , opcode);
+ goto out;
+ }
+ }
+
if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
const struct cpl_sge_egr_update *p = (void *)rsp;
unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
@@ -635,6 +694,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
+out:
return 0;
}
@@ -652,6 +712,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
{
struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+ /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
+ */
+ if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
+ rsp += 2;
+
if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
rxq->stats.nomem++;
return -1;
@@ -1002,21 +1068,36 @@ freeout: t4_free_sge_resources(adap);
static int upgrade_fw(struct adapter *adap)
{
int ret;
- u32 vers;
+ u32 vers, exp_major;
const struct fw_hdr *hdr;
const struct firmware *fw;
struct device *dev = adap->pdev_dev;
+ char *fw_file_name;
- ret = request_firmware(&fw, FW_FNAME, dev);
+ switch (CHELSIO_CHIP_VERSION(adap->chip)) {
+ case CHELSIO_T4:
+ fw_file_name = FW_FNAME;
+ exp_major = FW_VERSION_MAJOR;
+ break;
+ case CHELSIO_T5:
+ fw_file_name = FW5_FNAME;
+ exp_major = FW_VERSION_MAJOR_T5;
+ break;
+ default:
+ dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
+ return -EINVAL;
+ }
+
+ ret = request_firmware(&fw, fw_file_name, dev);
if (ret < 0) {
- dev_err(dev, "unable to load firmware image " FW_FNAME
- ", error %d\n", ret);
+ dev_err(dev, "unable to load firmware image %s, error %d\n",
+ fw_file_name, ret);
return ret;
}
hdr = (const struct fw_hdr *)fw->data;
vers = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+ if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
ret = -EINVAL; /* wrong major version, won't do */
goto out;
}
@@ -1024,18 +1105,15 @@ static int upgrade_fw(struct adapter *adap)
/*
* If the flash FW is unusable or we found something newer, load it.
*/
- if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+ if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
vers > adap->params.fw_vers) {
dev_info(dev, "upgrading firmware ...\n");
ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
/*force=*/false);
if (!ret)
- dev_info(dev, "firmware successfully upgraded to "
- FW_FNAME " (%d.%d.%d.%d)\n",
- FW_HDR_FW_VER_MAJOR_GET(vers),
- FW_HDR_FW_VER_MINOR_GET(vers),
- FW_HDR_FW_VER_MICRO_GET(vers),
- FW_HDR_FW_VER_BUILD_GET(vers));
+ dev_info(dev,
+ "firmware upgraded to version %pI4 from %s\n",
+ &hdr->fw_ver, fw_file_name);
else
dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
} else {
@@ -1308,6 +1386,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"VLANinsertions ",
"GROpackets ",
"GROmerged ",
+ "WriteCoalSuccess ",
+ "WriteCoalFail ",
};
static int get_sset_count(struct net_device *dev, int sset)
@@ -1321,10 +1401,15 @@ static int get_sset_count(struct net_device *dev, int sset)
}
#define T4_REGMAP_SIZE (160 * 1024)
+#define T5_REGMAP_SIZE (332 * 1024)
static int get_regs_len(struct net_device *dev)
{
- return T4_REGMAP_SIZE;
+ struct adapter *adap = netdev2adap(dev);
+ if (is_t4(adap->chip))
+ return T4_REGMAP_SIZE;
+ else
+ return T5_REGMAP_SIZE;
}
static int get_eeprom_len(struct net_device *dev)
@@ -1398,11 +1483,25 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ u32 val1, val2;
t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+ data += sizeof(struct queue_port_stats) / sizeof(u64);
+ if (!is_t4(adapter->chip)) {
+ t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
+ val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
+ val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
+ *data = val1 - val2;
+ data++;
+ *data = val2;
+ data++;
+ } else {
+ memset(data, 0, 2 * sizeof(u64));
+ *data += 2;
+ }
}
/*
@@ -1413,7 +1512,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
*/
static inline unsigned int mk_adap_vers(const struct adapter *ap)
{
- return 4 | (ap->params.rev << 10) | (1 << 16);
+ return CHELSIO_CHIP_VERSION(ap->chip) |
+ (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
}
static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -1428,7 +1528,7 @@ static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
- static const unsigned int reg_ranges[] = {
+ static const unsigned int t4_reg_ranges[] = {
0x1008, 0x1108,
0x1180, 0x11b4,
0x11fc, 0x123c,
@@ -1648,13 +1748,452 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x27e00, 0x27e04
};
+ static const unsigned int t5_reg_ranges[] = {
+ 0x1008, 0x1148,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1280, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3028,
+ 0x3060, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x58bc,
+ 0x5940, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a9c,
+ 0x5b9c, 0x5bfc,
+ 0x6000, 0x6040,
+ 0x6058, 0x614c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x78fc,
+ 0x7b00, 0x7c54,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e84,
+ 0x8ea0, 0x8f84,
+ 0x8fc0, 0x90f8,
+ 0x9400, 0x9470,
+ 0x9600, 0x96f4,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0x11088,
+ 0x1109c, 0x1117c,
+ 0x11190, 0x11204,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c60,
+ 0x19c94, 0x19e10,
+ 0x19e50, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fe4,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30100, 0x30144,
+ 0x30190, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x30834,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309ac,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30c24,
+ 0x30d08, 0x30d14,
+ 0x30d1c, 0x30d20,
+ 0x30d3c, 0x30d50,
+ 0x31200, 0x3120c,
+ 0x31220, 0x31220,
+ 0x31240, 0x31240,
+ 0x31600, 0x31600,
+ 0x31608, 0x3160c,
+ 0x31a00, 0x31a1c,
+ 0x31e04, 0x31e20,
+ 0x31e38, 0x31e3c,
+ 0x31e80, 0x31e80,
+ 0x31e88, 0x31ea8,
+ 0x31eb0, 0x31eb4,
+ 0x31ec8, 0x31ed4,
+ 0x31fb8, 0x32004,
+ 0x32208, 0x3223c,
+ 0x32600, 0x32630,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b70,
+ 0x33000, 0x33048,
+ 0x33060, 0x3309c,
+ 0x330f0, 0x33148,
+ 0x33160, 0x3319c,
+ 0x331f0, 0x332e4,
+ 0x332f8, 0x333e4,
+ 0x333f8, 0x33448,
+ 0x33460, 0x3349c,
+ 0x334f0, 0x33548,
+ 0x33560, 0x3359c,
+ 0x335f0, 0x336e4,
+ 0x336f8, 0x337e4,
+ 0x337f8, 0x337fc,
+ 0x33814, 0x33814,
+ 0x3382c, 0x3382c,
+ 0x33880, 0x3388c,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x33948,
+ 0x33960, 0x3399c,
+ 0x339f0, 0x33ae4,
+ 0x33af8, 0x33b10,
+ 0x33b28, 0x33b28,
+ 0x33b3c, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c28, 0x33c28,
+ 0x33c3c, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34100, 0x34144,
+ 0x34190, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x34834,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349ac,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34c24,
+ 0x34d08, 0x34d14,
+ 0x34d1c, 0x34d20,
+ 0x34d3c, 0x34d50,
+ 0x35200, 0x3520c,
+ 0x35220, 0x35220,
+ 0x35240, 0x35240,
+ 0x35600, 0x35600,
+ 0x35608, 0x3560c,
+ 0x35a00, 0x35a1c,
+ 0x35e04, 0x35e20,
+ 0x35e38, 0x35e3c,
+ 0x35e80, 0x35e80,
+ 0x35e88, 0x35ea8,
+ 0x35eb0, 0x35eb4,
+ 0x35ec8, 0x35ed4,
+ 0x35fb8, 0x36004,
+ 0x36208, 0x3623c,
+ 0x36600, 0x36630,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b70,
+ 0x37000, 0x37048,
+ 0x37060, 0x3709c,
+ 0x370f0, 0x37148,
+ 0x37160, 0x3719c,
+ 0x371f0, 0x372e4,
+ 0x372f8, 0x373e4,
+ 0x373f8, 0x37448,
+ 0x37460, 0x3749c,
+ 0x374f0, 0x37548,
+ 0x37560, 0x3759c,
+ 0x375f0, 0x376e4,
+ 0x376f8, 0x377e4,
+ 0x377f8, 0x377fc,
+ 0x37814, 0x37814,
+ 0x3782c, 0x3782c,
+ 0x37880, 0x3788c,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x37948,
+ 0x37960, 0x3799c,
+ 0x379f0, 0x37ae4,
+ 0x37af8, 0x37b10,
+ 0x37b28, 0x37b28,
+ 0x37b3c, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c28, 0x37c28,
+ 0x37c3c, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x38000, 0x38030,
+ 0x38100, 0x38144,
+ 0x38190, 0x381d0,
+ 0x38200, 0x38318,
+ 0x38400, 0x3852c,
+ 0x38540, 0x3861c,
+ 0x38800, 0x38834,
+ 0x388c0, 0x38908,
+ 0x38910, 0x389ac,
+ 0x38a00, 0x38a04,
+ 0x38a0c, 0x38a2c,
+ 0x38a44, 0x38a50,
+ 0x38a74, 0x38c24,
+ 0x38d08, 0x38d14,
+ 0x38d1c, 0x38d20,
+ 0x38d3c, 0x38d50,
+ 0x39200, 0x3920c,
+ 0x39220, 0x39220,
+ 0x39240, 0x39240,
+ 0x39600, 0x39600,
+ 0x39608, 0x3960c,
+ 0x39a00, 0x39a1c,
+ 0x39e04, 0x39e20,
+ 0x39e38, 0x39e3c,
+ 0x39e80, 0x39e80,
+ 0x39e88, 0x39ea8,
+ 0x39eb0, 0x39eb4,
+ 0x39ec8, 0x39ed4,
+ 0x39fb8, 0x3a004,
+ 0x3a208, 0x3a23c,
+ 0x3a600, 0x3a630,
+ 0x3aa00, 0x3aabc,
+ 0x3ab00, 0x3ab70,
+ 0x3b000, 0x3b048,
+ 0x3b060, 0x3b09c,
+ 0x3b0f0, 0x3b148,
+ 0x3b160, 0x3b19c,
+ 0x3b1f0, 0x3b2e4,
+ 0x3b2f8, 0x3b3e4,
+ 0x3b3f8, 0x3b448,
+ 0x3b460, 0x3b49c,
+ 0x3b4f0, 0x3b548,
+ 0x3b560, 0x3b59c,
+ 0x3b5f0, 0x3b6e4,
+ 0x3b6f8, 0x3b7e4,
+ 0x3b7f8, 0x3b7fc,
+ 0x3b814, 0x3b814,
+ 0x3b82c, 0x3b82c,
+ 0x3b880, 0x3b88c,
+ 0x3b8e8, 0x3b8ec,
+ 0x3b900, 0x3b948,
+ 0x3b960, 0x3b99c,
+ 0x3b9f0, 0x3bae4,
+ 0x3baf8, 0x3bb10,
+ 0x3bb28, 0x3bb28,
+ 0x3bb3c, 0x3bb50,
+ 0x3bbf0, 0x3bc10,
+ 0x3bc28, 0x3bc28,
+ 0x3bc3c, 0x3bc50,
+ 0x3bcf0, 0x3bcfc,
+ 0x3c000, 0x3c030,
+ 0x3c100, 0x3c144,
+ 0x3c190, 0x3c1d0,
+ 0x3c200, 0x3c318,
+ 0x3c400, 0x3c52c,
+ 0x3c540, 0x3c61c,
+ 0x3c800, 0x3c834,
+ 0x3c8c0, 0x3c908,
+ 0x3c910, 0x3c9ac,
+ 0x3ca00, 0x3ca04,
+ 0x3ca0c, 0x3ca2c,
+ 0x3ca44, 0x3ca50,
+ 0x3ca74, 0x3cc24,
+ 0x3cd08, 0x3cd14,
+ 0x3cd1c, 0x3cd20,
+ 0x3cd3c, 0x3cd50,
+ 0x3d200, 0x3d20c,
+ 0x3d220, 0x3d220,
+ 0x3d240, 0x3d240,
+ 0x3d600, 0x3d600,
+ 0x3d608, 0x3d60c,
+ 0x3da00, 0x3da1c,
+ 0x3de04, 0x3de20,
+ 0x3de38, 0x3de3c,
+ 0x3de80, 0x3de80,
+ 0x3de88, 0x3dea8,
+ 0x3deb0, 0x3deb4,
+ 0x3dec8, 0x3ded4,
+ 0x3dfb8, 0x3e004,
+ 0x3e208, 0x3e23c,
+ 0x3e600, 0x3e630,
+ 0x3ea00, 0x3eabc,
+ 0x3eb00, 0x3eb70,
+ 0x3f000, 0x3f048,
+ 0x3f060, 0x3f09c,
+ 0x3f0f0, 0x3f148,
+ 0x3f160, 0x3f19c,
+ 0x3f1f0, 0x3f2e4,
+ 0x3f2f8, 0x3f3e4,
+ 0x3f3f8, 0x3f448,
+ 0x3f460, 0x3f49c,
+ 0x3f4f0, 0x3f548,
+ 0x3f560, 0x3f59c,
+ 0x3f5f0, 0x3f6e4,
+ 0x3f6f8, 0x3f7e4,
+ 0x3f7f8, 0x3f7fc,
+ 0x3f814, 0x3f814,
+ 0x3f82c, 0x3f82c,
+ 0x3f880, 0x3f88c,
+ 0x3f8e8, 0x3f8ec,
+ 0x3f900, 0x3f948,
+ 0x3f960, 0x3f99c,
+ 0x3f9f0, 0x3fae4,
+ 0x3faf8, 0x3fb10,
+ 0x3fb28, 0x3fb28,
+ 0x3fb3c, 0x3fb50,
+ 0x3fbf0, 0x3fc10,
+ 0x3fc28, 0x3fc28,
+ 0x3fc3c, 0x3fc50,
+ 0x3fcf0, 0x3fcfc,
+ 0x40000, 0x4000c,
+ 0x40040, 0x40068,
+ 0x40080, 0x40144,
+ 0x40180, 0x4018c,
+ 0x40200, 0x40298,
+ 0x402ac, 0x4033c,
+ 0x403f8, 0x403fc,
+ 0x41300, 0x413c4,
+ 0x41400, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x44078,
+ 0x440c0, 0x44278,
+ 0x442c0, 0x44478,
+ 0x444c0, 0x44678,
+ 0x446c0, 0x44878,
+ 0x448c0, 0x449fc,
+ 0x45000, 0x45068,
+ 0x45080, 0x45084,
+ 0x450a0, 0x450b0,
+ 0x45200, 0x45268,
+ 0x45280, 0x45284,
+ 0x452a0, 0x452b0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x48000, 0x4800c,
+ 0x48040, 0x48068,
+ 0x48080, 0x48144,
+ 0x48180, 0x4818c,
+ 0x48200, 0x48298,
+ 0x482ac, 0x4833c,
+ 0x483f8, 0x483fc,
+ 0x49300, 0x493c4,
+ 0x49400, 0x4941c,
+ 0x49480, 0x494d0,
+ 0x4c000, 0x4c078,
+ 0x4c0c0, 0x4c278,
+ 0x4c2c0, 0x4c478,
+ 0x4c4c0, 0x4c678,
+ 0x4c6c0, 0x4c878,
+ 0x4c8c0, 0x4c9fc,
+ 0x4d000, 0x4d068,
+ 0x4d080, 0x4d084,
+ 0x4d0a0, 0x4d0b0,
+ 0x4d200, 0x4d268,
+ 0x4d280, 0x4d284,
+ 0x4d2a0, 0x4d2b0,
+ 0x4e0c0, 0x4e0e4,
+ 0x4f000, 0x4f08c,
+ 0x4f200, 0x4f250,
+ 0x4f400, 0x4f420,
+ 0x4f600, 0x4f618,
+ 0x4f800, 0x4f814,
+ 0x50000, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x5101c,
+ 0x51300, 0x51308,
+ };
+
int i;
struct adapter *ap = netdev2adap(dev);
+ static const unsigned int *reg_ranges;
+ int arr_size = 0, buf_size = 0;
+
+ if (is_t4(ap->chip)) {
+ reg_ranges = &t4_reg_ranges[0];
+ arr_size = ARRAY_SIZE(t4_reg_ranges);
+ buf_size = T4_REGMAP_SIZE;
+ } else {
+ reg_ranges = &t5_reg_ranges[0];
+ arr_size = ARRAY_SIZE(t5_reg_ranges);
+ buf_size = T5_REGMAP_SIZE;
+ }
regs->version = mk_adap_vers(ap);
- memset(buf, 0, T4_REGMAP_SIZE);
- for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+ memset(buf, 0, buf_size);
+ for (i = 0; i < arr_size; i += 2)
reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
}
@@ -2205,14 +2744,14 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
netdev_features_t changed = dev->features ^ features;
int err;
- if (!(changed & NETIF_F_HW_VLAN_RX))
+ if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
-1, -1, -1,
- !!(features & NETIF_F_HW_VLAN_RX), true);
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err))
- dev->features = features ^ NETIF_F_HW_VLAN_RX;
+ dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
return err;
}
@@ -2363,8 +2902,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
int ret, ofst;
__be32 data[16];
- if (mem == MEM_MC)
- ret = t4_mc_read(adap, pos, data, NULL);
+ if ((mem == MEM_MC) || (mem == MEM_MC1))
+ ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
else
ret = t4_edc_read(adap, mem, pos, data, NULL);
if (ret)
@@ -2405,18 +2944,37 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
static int setup_debugfs(struct adapter *adap)
{
int i;
+ u32 size;
if (IS_ERR_OR_NULL(adap->debugfs_root))
return -1;
i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
- if (i & EDRAM0_ENABLE)
- add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
- if (i & EDRAM1_ENABLE)
- add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
- if (i & EXT_MEM_ENABLE)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+ if (i & EDRAM0_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM0_BAR);
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
+ }
+ if (i & EDRAM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM1_BAR);
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
+ }
+ if (is_t4(adap->chip)) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ if (i & EXT_MEM_ENABLE)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(size));
+ } else {
+ if (i & EXT_MEM_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ add_debugfs_mem(adap, "mc0", MEM_MC0,
+ EXT_MEM_SIZE_GET(size));
+ }
+ if (i & EXT_MEM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
+ add_debugfs_mem(adap, "mc1", MEM_MC1,
+ EXT_MEM_SIZE_GET(size));
+ }
+ }
if (adap->l2t)
debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
&t4_l2t_fops);
@@ -2747,10 +3305,18 @@ EXPORT_SYMBOL(cxgb4_port_chan);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
{
struct adapter *adap = netdev2adap(dev);
- u32 v;
+ u32 v1, v2, lp_count, hp_count;
- v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
+ v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ if (is_t4(adap->chip)) {
+ lp_count = G_LP_COUNT(v1);
+ hp_count = G_HP_COUNT(v1);
+ } else {
+ lp_count = G_LP_COUNT_T5(v1);
+ hp_count = G_HP_COUNT_T5(v2);
+ }
+ return lpfifo ? lp_count : hp_count;
}
EXPORT_SYMBOL(cxgb4_dbfifo_count);
@@ -2853,6 +3419,25 @@ out:
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
+void cxgb4_disable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
+ F_NOCOALESCE);
+}
+EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
+
+void cxgb4_enable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
+}
+EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
+
static struct pci_driver cxgb4_driver;
static void check_neigh_update(struct neighbour *neigh)
@@ -2888,14 +3473,23 @@ static struct notifier_block cxgb4_netevent_nb = {
static void drain_db_fifo(struct adapter *adap, int usecs)
{
- u32 v;
+ u32 v1, v2, lp_count, hp_count;
do {
+ v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ if (is_t4(adap->chip)) {
+ lp_count = G_LP_COUNT(v1);
+ hp_count = G_HP_COUNT(v1);
+ } else {
+ lp_count = G_LP_COUNT_T5(v1);
+ hp_count = G_HP_COUNT_T5(v2);
+ }
+
+ if (lp_count == 0 && hp_count == 0)
+ break;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(usecs));
- v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
- break;
} while (1);
}
@@ -3004,24 +3598,62 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
+ if (is_t4(adap->chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
+ drain_db_fifo(adap, 1);
+ recover_all_queues(adap);
+ enable_dbs(adap);
+ } else {
+ u32 dropped_db = t4_read_reg(adap, 0x010ac);
+ u16 qid = (dropped_db >> 15) & 0x1ffff;
+ u16 pidx_inc = dropped_db & 0x1fff;
+ unsigned int s_qpp;
+ unsigned short udb_density;
+ unsigned long qpshift;
+ int page;
+ u32 udb;
+
+ dev_warn(adap->pdev_dev,
+ "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
+ dropped_db, qid,
+ (dropped_db >> 14) & 1,
+ (dropped_db >> 13) & 1,
+ pidx_inc);
+
+ drain_db_fifo(adap, 1);
+
+ s_qpp = QUEUESPERPAGEPF1 * adap->fn;
+ udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ qpshift = PAGE_SHIFT - ilog2(udb_density);
+ udb = qid << qpshift;
+ udb &= PAGE_MASK;
+ page = udb / PAGE_SIZE;
+ udb += (qid - (page * udb_density)) * 128;
+
+ writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
+
+ /* Re-enable BAR2 WC */
+ t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
+ }
+
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
- disable_dbs(adap);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
- drain_db_fifo(adap, 1);
- recover_all_queues(adap);
- enable_dbs(adap);
}
void t4_db_full(struct adapter *adap)
{
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
- queue_work(workq, &adap->db_full_task);
+ if (is_t4(adap->chip)) {
+ t4_set_reg_field(adap, SGE_INT_ENABLE3,
+ DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
+ queue_work(workq, &adap->db_full_task);
+ }
}
void t4_db_dropped(struct adapter *adap)
{
- queue_work(workq, &adap->db_drop_task);
+ if (is_t4(adap->chip))
+ queue_work(workq, &adap->db_drop_task);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3566,17 +4198,27 @@ void t4_fatal_err(struct adapter *adap)
static void setup_memwin(struct adapter *adap)
{
- u32 bar0;
+ u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
+ if (is_t4(adap->chip)) {
+ mem_win0_base = bar0 + MEMWIN0_BASE;
+ mem_win1_base = bar0 + MEMWIN1_BASE;
+ mem_win2_base = bar0 + MEMWIN2_BASE;
+ } else {
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win0_base = MEMWIN0_BASE;
+ mem_win1_base = MEMWIN1_BASE_T5;
+ mem_win2_base = MEMWIN2_BASE_T5;
+ }
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
- (bar0 + MEMWIN0_BASE) | BIR(0) |
+ mem_win0_base | BIR(0) |
WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
- (bar0 + MEMWIN1_BASE) | BIR(0) |
+ mem_win1_base | BIR(0) |
WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
- (bar0 + MEMWIN2_BASE) | BIR(0) |
+ mem_win2_base | BIR(0) |
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
}
@@ -3745,6 +4387,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
unsigned long mtype = 0, maddr = 0;
u32 finiver, finicsum, cfcsum;
int ret, using_flash;
+ char *fw_config_file, fw_config_file_path[256];
/*
* Reset device if necessary.
@@ -3761,7 +4404,21 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* then use that. Otherwise, use the configuration file stored
* in the adapter flash ...
*/
- ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
+ switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ case CHELSIO_T4:
+ fw_config_file = FW_CFNAME;
+ break;
+ case CHELSIO_T5:
+ fw_config_file = FW5_CFNAME;
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+ adapter->pdev->device);
+ ret = -EINVAL;
+ goto bye;
+ }
+
+ ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
if (ret < 0) {
using_flash = 1;
mtype = FW_MEMTYPE_CF_FLASH;
@@ -3877,6 +4534,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret < 0)
goto bye;
+ sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
@@ -3887,7 +4545,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
"Configuration File %s, version %#x, computed checksum %#x\n",
(using_flash
? "in device FLASH"
- : "/lib/firmware/" FW_CFNAME),
+ : fw_config_file_path),
finiver, cfcsum);
return 0;
@@ -4354,6 +5012,15 @@ static int adap_init0(struct adapter *adap)
adap->tids.aftid_end = val[1];
}
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
+ val[0] = 1;
+ (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
+
/*
* Get device capabilities so we can determine what resources we need
* to manage.
@@ -4814,7 +5481,8 @@ static void print_port_info(const struct net_device *dev)
sprintf(bufp, "BASE-%s", base[pi->port_type]);
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
- adap->params.vpd.id, adap->params.rev, buf,
+ adap->params.vpd.id,
+ CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
@@ -4854,10 +5522,11 @@ static void free_some_resources(struct adapter *adapter)
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+#define SEGMENT_SIZE 128
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int func, i, err;
+ int func, i, err, s_qpp, qpp, num_seg;
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
@@ -4934,7 +5603,34 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_prep_adapter(adapter);
if (err)
- goto out_unmap_bar;
+ goto out_unmap_bar0;
+
+ if (!is_t4(adapter->chip)) {
+ s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
+ qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ num_seg = PAGE_SIZE / SEGMENT_SIZE;
+
+ /* Each segment size is 128B. Write coalescing is enabled only
+ * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
+ * queue is less no of segments that can be accommodated in
+ * a page size.
+ */
+ if (qpp > num_seg) {
+ dev_err(&pdev->dev,
+ "Incorrect number of egress queues per page\n");
+ err = -EINVAL;
+ goto out_unmap_bar0;
+ }
+ adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!adapter->bar2) {
+ dev_err(&pdev->dev, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ goto out_unmap_bar0;
+ }
+ }
+
setup_memwin(adapter);
err = adap_init0(adapter);
setup_memwin_rdma(adapter);
@@ -4963,7 +5659,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5063,6 +5759,9 @@ sriov:
out_free_dev:
free_some_resources(adapter);
out_unmap_bar:
+ if (!is_t4(adapter->chip))
+ iounmap(adapter->bar2);
+ out_unmap_bar0:
iounmap(adapter->regs);
out_free_adapter:
kfree(adapter);
@@ -5113,6 +5812,8 @@ static void remove_one(struct pci_dev *pdev)
free_some_resources(adapter);
iounmap(adapter->regs);
+ if (!is_t4(adapter->chip))
+ iounmap(adapter->bar2);
kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e2bbc7f3e2de..4faf4d067ee7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -269,4 +269,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
int cxgb4_flush_eq_cache(struct net_device *dev);
+void cxgb4_disable_db_coalescing(struct net_device *dev);
+void cxgb4_enable_db_coalescing(struct net_device *dev);
+
#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fe9a2ea3588b..2bfbb206b35a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -506,10 +506,14 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
+ u32 val;
if (q->pend_cred >= 8) {
+ val = PIDX(q->pend_cred / 8);
+ if (!is_t4(adap->chip))
+ val |= DBTYPE(1);
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
- QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
+ QID(q->cntxt_id) | val);
q->pend_cred &= 7;
}
}
@@ -812,6 +816,22 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*end = 0;
}
+/* This function copies 64 byte coalesced work request to
+ * memory mapped BAR2 space(user space writes).
+ * For coalesced WR SGE, fetches data from the FIFO instead of from Host.
+ */
+static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
+{
+ int count = 8;
+
+ while (count) {
+ writeq(*src, dst);
+ src++;
+ dst++;
+ count--;
+ }
+}
+
/**
* ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
@@ -822,11 +842,25 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
+ unsigned int *wr, index;
+
wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock);
if (!q->db_disabled) {
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(n));
+ if (is_t4(adap->chip)) {
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(n));
+ } else {
+ if (n == 1) {
+ index = q->pidx ? (q->pidx - 1) : (q->size - 1);
+ wr = (unsigned int *)&q->desc[index];
+ cxgb_pio_copy((u64 __iomem *)
+ (adap->bar2 + q->udb + 64),
+ (u64 *)wr);
+ } else
+ writel(n, adap->bar2 + q->udb + 8);
+ wmb();
+ }
}
q->db_pidx = q->pidx;
spin_unlock(&q->db_lock);
@@ -1555,7 +1589,6 @@ static noinline int handle_trace_pkt(struct adapter *adap,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- struct cpl_trace_pkt *p;
skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
if (unlikely(!skb)) {
@@ -1563,8 +1596,11 @@ static noinline int handle_trace_pkt(struct adapter *adap,
return 0;
}
- p = (struct cpl_trace_pkt *)skb->data;
- __skb_pull(skb, sizeof(*p));
+ if (is_t4(adap->chip))
+ __skb_pull(skb, sizeof(struct cpl_trace_pkt));
+ else
+ __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
+
skb_reset_mac_header(skb);
skb->protocol = htons(0xffff);
skb->dev = adap->port[0];
@@ -1597,7 +1633,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
- __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
}
ret = napi_gro_frags(&rxq->rspq.napi);
@@ -1625,8 +1661,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
struct sge *s = &q->adap->sge;
+ int cpl_trace_pkt = is_t4(q->adap->chip) ?
+ CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
- if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
+ if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
@@ -1667,7 +1705,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
- __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
}
netif_receive_skb(skb);
@@ -2143,11 +2181,27 @@ err:
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
{
+ q->cntxt_id = id;
+ if (!is_t4(adap->chip)) {
+ unsigned int s_qpp;
+ unsigned short udb_density;
+ unsigned long qpshift;
+ int page;
+
+ s_qpp = QUEUESPERPAGEPF1 * adap->fn;
+ udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp));
+ qpshift = PAGE_SHIFT - ilog2(udb_density);
+ q->udb = q->cntxt_id << qpshift;
+ q->udb &= PAGE_MASK;
+ page = q->udb / PAGE_SIZE;
+ q->udb += (q->cntxt_id - (page * udb_density)) * 128;
+ }
+
q->in_use = 0;
q->cidx = q->pidx = 0;
q->stops = q->restarts = 0;
q->stat = (void *)&q->desc[q->size];
- q->cntxt_id = id;
spin_lock_init(&q->db_lock);
adap->sge.egr_map[id - adap->sge.egr_start] = q;
}
@@ -2587,11 +2641,20 @@ static int t4_sge_init_hard(struct adapter *adap)
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
- t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
- V_HP_INT_THRESH(M_HP_INT_THRESH) |
- V_LP_INT_THRESH(M_LP_INT_THRESH),
- V_HP_INT_THRESH(dbfifo_int_thresh) |
- V_LP_INT_THRESH(dbfifo_int_thresh));
+ if (is_t4(adap->chip)) {
+ t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+ V_HP_INT_THRESH(M_HP_INT_THRESH) |
+ V_LP_INT_THRESH(M_LP_INT_THRESH),
+ V_HP_INT_THRESH(dbfifo_int_thresh) |
+ V_LP_INT_THRESH(dbfifo_int_thresh));
+ } else {
+ t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+ V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
+ V_LP_INT_THRESH_T5(dbfifo_int_thresh));
+ t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
+ V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
+ V_HP_INT_THRESH_T5(dbfifo_int_thresh));
+ }
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
F_ENABLE_DROP);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8049268ce0f2..d02d4e8c4417 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -282,6 +282,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
* t4_mc_read - read from MC through backdoor accesses
* @adap: the adapter
* @addr: address of first byte requested
+ * @idx: which MC to access
* @data: 64 bytes of data containing the requested address
* @ecc: where to store the corresponding 64-bit ECC word
*
@@ -289,22 +290,38 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
* that covers the requested address @addr. If @parity is not %NULL it
* is assigned the 64-bit ECC word for the read data.
*/
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
{
int i;
+ u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
+ u32 mc_bist_status_rdata, mc_bist_data_pattern;
+
+ if (is_t4(adap->chip)) {
+ mc_bist_cmd = MC_BIST_CMD;
+ mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
+ mc_bist_cmd_len = MC_BIST_CMD_LEN;
+ mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
+ mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
+ } else {
+ mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
+ mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
+ mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
+ mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
+ mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+ }
- if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
+ if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
return -EBUSY;
- t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
- t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
- t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
- t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
+ t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
+ t4_write_reg(adap, mc_bist_cmd_len, 64);
+ t4_write_reg(adap, mc_bist_data_pattern, 0xc);
+ t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
BIST_CMD_GAP(1));
- i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
+ i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
if (i)
return i;
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
for (i = 15; i >= 0; i--)
*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
@@ -329,20 +346,39 @@ int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
{
int i;
+ u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
+ u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
+
+ if (is_t4(adap->chip)) {
+ edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
+ edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
+ idx);
+ edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
+ idx);
+ } else {
+ edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
+ edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern =
+ EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
+ edc_bist_status_rdata =
+ EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+ }
- idx *= EDC_STRIDE;
- if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
+ if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
return -EBUSY;
- t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
- t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
- t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
- t4_write_reg(adap, EDC_BIST_CMD + idx,
+ t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
+ t4_write_reg(adap, edc_bist_cmd_len, 64);
+ t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
+ t4_write_reg(adap, edc_bist_cmd,
BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
- i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
+ i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
if (i)
return i;
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
for (i = 15; i >= 0; i--)
*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
@@ -366,6 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
{
int i;
+ u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
/*
* Setup offset into PCIE memory window. Address must be a
@@ -374,7 +411,7 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
* values.)
*/
t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
- addr & ~(MEMWIN0_APERTURE - 1));
+ (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf);
t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
/* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
@@ -410,6 +447,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
__be32 *buf, int dir)
{
u32 pos, start, end, offset, memoffset;
+ u32 edc_size, mc_size;
int ret = 0;
__be32 *data;
@@ -423,13 +461,21 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
if (!data)
return -ENOMEM;
- /*
- * Offset into the region of memory which is being accessed
+ /* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
* MEM_EDC1 = 1
- * MEM_MC = 2
+ * MEM_MC = 2 -- T4
+ * MEM_MC0 = 2 -- For T5
+ * MEM_MC1 = 3 -- For T5
*/
- memoffset = (mtype * (5 * 1024 * 1024));
+ edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1)
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
+ MA_EXT_MEMORY_BAR));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
/* Determine the PCIE_MEM_ACCESS_OFFSET */
addr = addr + memoffset;
@@ -497,9 +543,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
}
#define EEPROM_STAT_ADDR 0x7bfc
-#define VPD_LEN 512
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
+#define VPD_LEN 1024
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -856,6 +902,7 @@ int t4_check_fw_version(struct adapter *adapter)
{
u32 api_vers[2];
int ret, major, minor, micro;
+ int exp_major, exp_minor, exp_micro;
ret = get_fw_version(adapter, &adapter->params.fw_vers);
if (!ret)
@@ -870,17 +917,35 @@ int t4_check_fw_version(struct adapter *adapter)
major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
+
+ switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ case CHELSIO_T4:
+ exp_major = FW_VERSION_MAJOR;
+ exp_minor = FW_VERSION_MINOR;
+ exp_micro = FW_VERSION_MICRO;
+ break;
+ case CHELSIO_T5:
+ exp_major = FW_VERSION_MAJOR_T5;
+ exp_minor = FW_VERSION_MINOR_T5;
+ exp_micro = FW_VERSION_MICRO_T5;
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
+ adapter->chip);
+ return -EINVAL;
+ }
+
memcpy(adapter->params.api_vers, api_vers,
sizeof(adapter->params.api_vers));
- if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ if (major != exp_major) { /* major mismatch - fail */
dev_err(adapter->pdev_dev,
"card FW has major version %u, driver wants %u\n",
- major, FW_VERSION_MAJOR);
+ major, exp_major);
return -EINVAL;
}
- if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ if (minor == exp_minor && micro == exp_micro)
return 0; /* perfect match */
/* Minor/micro version mismatch. Report it but often it's OK. */
@@ -1246,6 +1311,45 @@ static void pcie_intr_handler(struct adapter *adapter)
{ 0 }
};
+ static struct intr_info t5_pcie_intr_info[] = {
+ { MSTGRPPERR, "Master Response Read Queue parity error",
+ -1, 1 },
+ { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ -1, 1 },
+ { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR, "PCI DMA channel write request parity error",
+ -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ -1, 1 },
+ { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
+ { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR, "Outbound read error", -1, 0 },
+ { 0 }
+ };
+
int fat;
fat = t4_handle_intr_status(adapter,
@@ -1254,7 +1358,10 @@ static void pcie_intr_handler(struct adapter *adapter)
t4_handle_intr_status(adapter,
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
pcie_port_intr_info) +
- t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ is_t4(adapter->chip) ?
+ pcie_intr_info : t5_pcie_intr_info);
+
if (fat)
t4_fatal_err(adapter);
}
@@ -1664,7 +1771,14 @@ static void ncsi_intr_handler(struct adapter *adap)
*/
static void xgmac_intr_handler(struct adapter *adap, int port)
{
- u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ u32 v, int_cause_reg;
+
+ if (is_t4(adap->chip))
+ int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
+ else
+ int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
+
+ v = t4_read_reg(adap, int_cause_reg);
v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
if (!v)
@@ -2126,7 +2240,9 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
u32 bgmap = get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
- t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
+ t4_read_reg64(adap, \
+ (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
+ T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
p->tx_octets = GET_STAT(TX_PORT_BYTES);
@@ -2205,14 +2321,26 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr)
{
+ u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
+
+ if (is_t4(adap->chip)) {
+ mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ } else {
+ mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+ }
+
if (addr) {
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
+ t4_write_reg(adap, mag_id_reg_l,
(addr[2] << 24) | (addr[3] << 16) |
(addr[4] << 8) | addr[5]);
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
+ t4_write_reg(adap, mag_id_reg_h,
(addr[0] << 8) | addr[1]);
}
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
+ t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
addr ? MAGICEN : 0);
}
@@ -2235,16 +2363,23 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
u64 mask0, u64 mask1, unsigned int crc, bool enable)
{
int i;
+ u32 port_cfg_reg;
+
+ if (is_t4(adap->chip))
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ else
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
if (!enable) {
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
- PATEN, 0);
+ t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
return 0;
}
if (map > 0xff)
return -EINVAL;
-#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
+#define EPIO_REG(name) \
+ (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
+ T5_PORT_REG(port, MAC_PORT_EPIO_##name))
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2322,24 +2457,24 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
* @addr: address of first byte requested aligned on 32b.
* @data: len bytes to hold the data read
* @len: amount of data to read from window. Must be <=
- * MEMWIN0_APERATURE after adjusting for 16B alignment
- * requirements of the the memory window.
+ * MEMWIN0_APERATURE after adjusting for 16B for T4 and
+ * 128B for T5 alignment requirements of the the memory window.
*
* Read len bytes of data from MC starting at @addr.
*/
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
{
- int i;
- int off;
+ int i, off;
+ u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
- /*
- * Align on a 16B boundary.
+ /* Align on a 2KB boundary.
*/
- off = addr & 15;
+ off = addr & MEMWIN0_APERTURE;
if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
return -EINVAL;
- t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
+ t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
+ (addr & ~MEMWIN0_APERTURE) | win_pf);
t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
for (i = 0; i < len; i += 4)
@@ -3162,6 +3297,9 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
int i, ret;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p;
+ unsigned int max_naddr = is_t4(adap->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
if (naddr > 7)
return -EINVAL;
@@ -3187,8 +3325,8 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
if (idx)
- idx[i] = index >= NEXACT_MAC ? 0xffff : index;
- if (index < NEXACT_MAC)
+ idx[i] = index >= max_naddr ? 0xffff : index;
+ if (index < max_naddr)
ret++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[i]));
@@ -3221,6 +3359,9 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
+ unsigned int max_mac_addr = is_t4(adap->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
@@ -3238,7 +3379,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
- if (ret >= NEXACT_MAC)
+ if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
@@ -3547,7 +3688,8 @@ static int get_flash_params(struct adapter *adap)
*/
int t4_prep_adapter(struct adapter *adapter)
{
- int ret;
+ int ret, ver;
+ uint16_t device_id;
ret = t4_wait_dev_ready(adapter);
if (ret < 0)
@@ -3562,6 +3704,28 @@ int t4_prep_adapter(struct adapter *adapter)
return ret;
}
+ /* Retrieve adapter's device ID
+ */
+ pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
+ ver = device_id >> 12;
+ switch (ver) {
+ case CHELSIO_T4:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4,
+ adapter->params.rev);
+ break;
+ case CHELSIO_T5:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5,
+ adapter->params.rev);
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+ device_id);
+ return -EINVAL;
+ }
+
+ /* Reassign the updated revision field */
+ adapter->params.rev = adapter->chip;
+
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index f534ed7e10e9..1d1623be9f1e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@ enum {
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
- NEXACT_MAC = 336, /* # of exact MAC address filters */
L2T_SIZE = 4096, /* # of L2T entries */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 261d17703adc..01d484441200 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -74,6 +74,7 @@ enum {
CPL_PASS_ESTABLISH = 0x41,
CPL_RX_DATA_DDP = 0x42,
CPL_PASS_ACCEPT_REQ = 0x44,
+ CPL_TRACE_PKT_T5 = 0x48,
CPL_RDMA_READ_REQ = 0x60,
@@ -157,6 +158,7 @@ union opcode_tid {
};
#define CPL_OPCODE(x) ((x) << 24)
+#define G_CPL_OPCODE(x) (((x) >> 24) & 0xFF)
#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
@@ -287,6 +289,23 @@ struct cpl_act_open_req {
__be32 opt2;
};
+#define S_FILTER_TUPLE 24
+#define M_FILTER_TUPLE 0xFFFFFFFFFF
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
+struct cpl_t5_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+};
+
struct cpl_act_open_req6 {
WR_HDR;
union opcode_tid ot;
@@ -566,6 +585,11 @@ struct cpl_rx_pkt {
#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
+#define S_RX_T5_ETHHDR_LEN 0
+#define M_RX_T5_ETHHDR_LEN 0x3F
+#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
+#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
+
#define S_RX_MACIDX 8
#define M_RX_MACIDX 0x1FF
#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
@@ -612,6 +636,28 @@ struct cpl_trace_pkt {
__be64 tstamp;
};
+struct cpl_t5_trace_pkt {
+ __u8 opcode;
+ __u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 runt:4;
+ __u8 filter_hit:4;
+ __u8:6;
+ __u8 err:1;
+ __u8 trunc:1;
+#else
+ __u8 filter_hit:4;
+ __u8 runt:4;
+ __u8 trunc:1;
+ __u8 err:1;
+ __u8:6;
+#endif
+ __be16 rsvd;
+ __be16 len;
+ __be64 tstamp;
+ __be64 rsvd1;
+};
+
struct cpl_l2t_write_req {
WR_HDR;
union opcode_tid ot;
@@ -643,6 +689,15 @@ struct cpl_sge_egr_update {
__be16 pidx;
};
+/* cpl_fw*.type values */
+enum {
+ FW_TYPE_CMD_RPL = 0,
+ FW_TYPE_WR_RPL = 1,
+ FW_TYPE_CQE = 2,
+ FW_TYPE_OFLD_CONNECTION_WR_RPL = 3,
+ FW_TYPE_RSSCPL = 4,
+};
+
struct cpl_fw4_pld {
u8 opcode;
u8 rsvd0[3];
@@ -692,6 +747,7 @@ enum {
FW6_TYPE_WR_RPL = 1,
FW6_TYPE_CQE = 2,
FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3,
+ FW6_TYPE_RSSCPL = FW_TYPE_RSSCPL,
};
struct cpl_fw6_msg_ofld_connection_wr_rpl {
@@ -742,4 +798,12 @@ struct ulp_mem_io {
#define ULP_MEMIO_LOCK(x) ((x) << 31)
};
+#define S_T5_ULP_MEMIO_IMM 23
+#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
+#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
+
+#define S_T5_ULP_MEMIO_ORDER 22
+#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
+#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
+
#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 83ec5f7844ac..ef146c0ba481 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -68,9 +68,14 @@
#define QID_SHIFT 15
#define QID(x) ((x) << QID_SHIFT)
#define DBPRIO(x) ((x) << 14)
+#define DBTYPE(x) ((x) << 13)
#define PIDX_MASK 0x00003fffU
#define PIDX_SHIFT 0
#define PIDX(x) ((x) << PIDX_SHIFT)
+#define S_PIDX_T5 0
+#define M_PIDX_T5 0x1fffU
+#define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
+
#define SGE_PF_GTS 0x4
#define INGRESSQID_MASK 0xffff0000U
@@ -152,6 +157,8 @@
#define QUEUESPERPAGEPF0_MASK 0x0000000fU
#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+#define QUEUESPERPAGEPF1 4
+
#define SGE_INT_CAUSE1 0x1024
#define SGE_INT_CAUSE2 0x1030
#define SGE_INT_CAUSE3 0x103c
@@ -234,6 +241,10 @@
#define SGE_DOORBELL_CONTROL 0x10a8
#define ENABLE_DROP (1 << 13)
+#define S_NOCOALESCE 26
+#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
+#define F_NOCOALESCE V_NOCOALESCE(1U)
+
#define SGE_TIMER_VALUE_0_AND_1 0x10b8
#define TIMERVALUE0_MASK 0xffff0000U
#define TIMERVALUE0_SHIFT 16
@@ -272,17 +283,36 @@
#define S_HP_INT_THRESH 28
#define M_HP_INT_THRESH 0xfU
#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
+#define S_LP_INT_THRESH_T5 18
+#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
+#define M_LP_COUNT_T5 0x3ffffU
+#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
#define M_HP_COUNT 0x7ffU
#define S_HP_COUNT 16
#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
#define S_LP_INT_THRESH 12
#define M_LP_INT_THRESH 0xfU
+#define M_LP_INT_THRESH_T5 0xfffU
#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
#define M_LP_COUNT 0x7ffU
#define S_LP_COUNT 0
#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
#define A_SGE_DBFIFO_STATUS 0x10a4
+#define SGE_STAT_TOTAL 0x10e4
+#define SGE_STAT_MATCH 0x10e8
+
+#define SGE_STAT_CFG 0x10ec
+#define S_STATSOURCE_T5 9
+#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
+
+#define SGE_DBFIFO_STATUS2 0x1118
+#define M_HP_COUNT_T5 0x3ffU
+#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5)
+#define S_HP_INT_THRESH_T5 10
+#define M_HP_INT_THRESH_T5 0xfU
+#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
+
#define S_ENABLE_DROP 13
#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
#define F_ENABLE_DROP V_ENABLE_DROP(1U)
@@ -331,8 +361,27 @@
#define MSIADDRHPERR 0x00000002U
#define MSIADDRLPERR 0x00000001U
+#define READRSPERR 0x20000000U
+#define TRGT1GRPPERR 0x10000000U
+#define IPSOTPERR 0x08000000U
+#define IPRXDATAGRPPERR 0x02000000U
+#define IPRXHDRGRPPERR 0x01000000U
+#define MAGRPPERR 0x00400000U
+#define VFIDPERR 0x00200000U
+#define HREQWRPERR 0x00010000U
+#define DREQWRPERR 0x00002000U
+#define MSTTAGQPERR 0x00000400U
+#define PIOREQGRPPERR 0x00000100U
+#define PIOCPLGRPPERR 0x00000080U
+#define MSIXSTIPERR 0x00000004U
+#define MSTTIMEOUTPERR 0x00000002U
+#define MSTGRPPERR 0x00000001U
+
#define PCIE_NONFAT_ERR 0x3010
#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
+#define S_PCIEOFST 10
+#define M_PCIEOFST 0x3fffffU
+#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
#define PCIEOFST_MASK 0xfffffc00U
#define BIR_MASK 0x00000300U
#define BIR_SHIFT 8
@@ -342,6 +391,9 @@
#define WINDOW(x) ((x) << WINDOW_SHIFT)
#define PCIE_MEM_ACCESS_OFFSET 0x306c
+#define S_PFNUM 0
+#define V_PFNUM(x) ((x) << S_PFNUM)
+
#define PCIE_FW 0x30b8
#define PCIE_FW_ERR 0x80000000U
#define PCIE_FW_INIT 0x40000000U
@@ -407,12 +459,18 @@
#define MC_BIST_STATUS_RDATA 0x7688
+#define MA_EDRAM0_BAR 0x77c0
+#define MA_EDRAM1_BAR 0x77c4
+#define EDRAM_SIZE_MASK 0xfffU
+#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK)
+
#define MA_EXT_MEMORY_BAR 0x77c8
#define EXT_MEM_SIZE_MASK 0x00000fffU
#define EXT_MEM_SIZE_SHIFT 0
#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
#define MA_TARGET_MEM_ENABLE 0x77d8
+#define EXT_MEM1_ENABLE 0x00000010U
#define EXT_MEM_ENABLE 0x00000004U
#define EDRAM1_ENABLE 0x00000002U
#define EDRAM0_ENABLE 0x00000001U
@@ -431,6 +489,7 @@
#define MA_PCIE_FW 0x30b8
#define MA_PARITY_ERROR_STATUS 0x77f4
+#define MA_EXT_MEMORY1_BAR 0x7808
#define EDC_0_BASE_ADDR 0x7900
#define EDC_BIST_CMD 0x7904
@@ -801,6 +860,15 @@
#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define MAC_PORT_CFG2 0x818
+#define MAC_PORT_MAGIC_MACID_LO 0x824
+#define MAC_PORT_MAGIC_MACID_HI 0x828
+#define MAC_PORT_EPIO_DATA0 0x8c0
+#define MAC_PORT_EPIO_DATA1 0x8c4
+#define MAC_PORT_EPIO_DATA2 0x8c8
+#define MAC_PORT_EPIO_DATA3 0x8cc
+#define MAC_PORT_EPIO_OP 0x8d0
+
#define MPS_CMN_CTL 0x9000
#define NUMPORTS_MASK 0x00000003U
#define NUMPORTS_SHIFT 0
@@ -1063,6 +1131,7 @@
#define ADDRESS_SHIFT 0
#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
+#define MAC_PORT_INT_CAUSE 0x8dc
#define XGMAC_PORT_INT_CAUSE 0x10dc
#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
@@ -1101,4 +1170,33 @@
#define V_PORT(x) ((x) << S_PORT)
#define F_PORT V_PORT(1U)
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
+
+#define T5_PORT0_BASE 0x30000
+#define T5_PORT_STRIDE 0x4000
+#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
+#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
+
+#define MC_0_BASE_ADDR 0x40000
+#define MC_1_BASE_ADDR 0x48000
+#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
+#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
+
+#define MC_P_BIST_CMD 0x41400
+#define MC_P_BIST_CMD_ADDR 0x41404
+#define MC_P_BIST_CMD_LEN 0x41408
+#define MC_P_BIST_DATA_PATTERN 0x4140c
+#define MC_P_BIST_STATUS_RDATA 0x41488
+#define EDC_T50_BASE_ADDR 0x50000
+#define EDC_H_BIST_CMD 0x50004
+#define EDC_H_BIST_CMD_ADDR 0x50008
+#define EDC_H_BIST_CMD_LEN 0x5000c
+#define EDC_H_BIST_DATA_PATTERN 0x50010
+#define EDC_H_BIST_STATUS_RDATA 0x50028
+
+#define EDC_T51_BASE_ADDR 0x50800
+#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a0dcccd846c9..d1c755f78aaf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -574,7 +574,7 @@ struct fw_eth_tx_pkt_vm_wr {
__be16 vlantci;
};
-#define FW_CMD_MAX_TIMEOUT 3000
+#define FW_CMD_MAX_TIMEOUT 10000
/*
* If a host driver does a HELLO and discovers that there's already a MASTER
@@ -973,7 +973,9 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
- FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E
+ FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E,
+ FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30,
+ FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31
};
/*
@@ -1758,6 +1760,25 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
};
+enum fw_port_mod_sub_type {
+ FW_PORT_MOD_SUB_TYPE_NA,
+ FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1,
+ FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2,
+ FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3,
+ FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4,
+ FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5,
+ FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8,
+
+ /* The following will never been in the VPD. They are TWINAX cable
+ * lengths decoded from SFP+ module i2c PROMs. These should
+ * almost certainly go somewhere else ...
+ */
+ FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
+};
+
/* port stats */
#define FW_NUM_PORT_STATS 50
#define FW_NUM_PORT_TX_STATS 23
@@ -2123,11 +2144,11 @@ struct fw_hdr {
u8 intfver_ri;
u8 intfver_iscsipdu;
u8 intfver_iscsi;
+ u8 intfver_fcoepdu;
u8 intfver_fcoe;
- u8 reserved2;
+ __u32 reserved2;
__u32 reserved3;
__u32 reserved4;
- __u32 reserved5;
__be32 flags;
__be32 reserved6[23];
};
@@ -2137,6 +2158,17 @@ struct fw_hdr {
#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
+enum fw_hdr_intfver {
+ FW_HDR_INTFVER_NIC = 0x00,
+ FW_HDR_INTFVER_VNIC = 0x00,
+ FW_HDR_INTFVER_OFLD = 0x00,
+ FW_HDR_INTFVER_RI = 0x00,
+ FW_HDR_INTFVER_ISCSIPDU = 0x00,
+ FW_HDR_INTFVER_ISCSI = 0x00,
+ FW_HDR_INTFVER_FCOEPDU = 0x00,
+ FW_HDR_INTFVER_FCOE = 0x00,
+};
+
enum fw_hdr_flags {
FW_HDR_FLAGS_RESET_HALT = 0x00000001,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..be5c7ef6ca93 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -344,6 +344,7 @@ struct adapter {
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
+ enum chip_type chip;
struct adapter_params params;
/* queue and interrupt resources */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 56b46ab2d4c5..40c22e7de15c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -54,8 +54,8 @@
/*
* Generic information about the driver.
*/
-#define DRV_VERSION "1.0.0"
-#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
+#define DRV_VERSION "2.0.0-ko"
+#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
/*
* Module Parameters.
@@ -409,6 +409,20 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
break;
}
+ case CPL_FW4_MSG: {
+ /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+ */
+ const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
+ opcode = G_CPL_OPCODE(ntohl(p->opcode_qid));
+ if (opcode != CPL_SGE_EGR_UPDATE) {
+ dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
+ , opcode);
+ break;
+ }
+ cpl = (void *)p;
+ /*FALLTHROUGH*/
+ }
+
case CPL_SGE_EGR_UPDATE: {
/*
* We've received an Egress Queue Status Update message. We
@@ -1050,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter)
/*
* Chip version 4, revision 0x3f (cxgb4vf).
*/
- return 4 | (0x3f << 10);
+ return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10);
}
/*
@@ -1100,10 +1114,10 @@ static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -1114,9 +1128,9 @@ static int cxgb4vf_set_features(struct net_device *dev,
struct port_info *pi = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
- features & NETIF_F_HW_VLAN_TX, 0);
+ features & NETIF_F_HW_VLAN_CTAG_TX, 0);
return 0;
}
@@ -2072,6 +2086,7 @@ static int adap_init0(struct adapter *adapter)
struct sge *s = &adapter->sge;
unsigned int ethqsets;
int err;
+ u32 param, val = 0;
/*
* Wait for the device to become ready before proceeding ...
@@ -2099,6 +2114,15 @@ static int adap_init0(struct adapter *adapter)
return err;
}
+ switch (adapter->pdev->device >> 12) {
+ case CHELSIO_T4:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ break;
+ case CHELSIO_T5:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0);
+ break;
+ }
+
/*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
@@ -2144,6 +2168,16 @@ static int adap_init0(struct adapter *adapter)
return err;
}
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ param = FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
+ val = 1;
+ (void) t4vf_set_params(adapter, 1, &param, &val);
+
/*
* Retrieve our RX interrupt holdoff timer values and counter
* threshold values from the SGE parameters.
@@ -2614,11 +2648,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HIGHDMA;
- netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX;
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_CTAG_TX;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
@@ -2888,6 +2923,26 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x480a, 0), /* T404-bt */
CH_DEVICE(0x480d, 0), /* T480-cr */
CH_DEVICE(0x480e, 0), /* T440-lp-cr */
+ CH_DEVICE(0x5800, 0), /* T580-dbg */
+ CH_DEVICE(0x5801, 0), /* T520-cr */
+ CH_DEVICE(0x5802, 0), /* T522-cr */
+ CH_DEVICE(0x5803, 0), /* T540-cr */
+ CH_DEVICE(0x5804, 0), /* T520-bch */
+ CH_DEVICE(0x5805, 0), /* T540-bch */
+ CH_DEVICE(0x5806, 0), /* T540-ch */
+ CH_DEVICE(0x5807, 0), /* T520-so */
+ CH_DEVICE(0x5808, 0), /* T520-cx */
+ CH_DEVICE(0x5809, 0), /* T520-bt */
+ CH_DEVICE(0x580a, 0), /* T504-bt */
+ CH_DEVICE(0x580b, 0), /* T520-sr */
+ CH_DEVICE(0x580c, 0), /* T504-bt */
+ CH_DEVICE(0x580d, 0), /* T580-cr */
+ CH_DEVICE(0x580e, 0), /* T540-lp-cr */
+ CH_DEVICE(0x580f, 0), /* Amsterdam */
+ CH_DEVICE(0x5810, 0), /* T580-lp-cr */
+ CH_DEVICE(0x5811, 0), /* T520-lp-cr */
+ CH_DEVICE(0x5812, 0), /* T560-cr */
+ CH_DEVICE(0x5813, 0), /* T580-cr */
{ 0, }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9488032d6d2d..df296af20bd5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -528,17 +528,21 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
*/
static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
+ u32 val;
+
/*
* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
* of multiples of Free List Entries per Egress Queue Units ...
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
+ val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
+ if (!is_t4(adapter->chip))
+ val |= DBTYPE(1);
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
DBPRIO(1) |
- QID(fl->cntxt_id) |
- PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
+ QID(fl->cntxt_id) | val);
fl->pend_cred %= FL_PER_EQ_UNIT;
}
}
@@ -1478,7 +1482,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb_record_rx_queue(skb, rxq->rspq.idx);
if (pkt->vlan_ex) {
- __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
+ be16_to_cpu(pkt->vlan));
rxq->stats.vlan_ex++;
}
ret = napi_gro_frags(&rxq->rspq.napi);
@@ -1547,7 +1552,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
if (pkt->vlan_ex) {
rxq->stats.vlan_ex++;
- __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
}
netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 283f9d0d37fd..53cbfed21d0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -38,6 +38,25 @@
#include "../cxgb4/t4fw_api.h"
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A3,
+
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_FIRST_REV = T5_A1,
+ T5_LAST_REV = T5_A1,
+};
+
/*
* The "len16" field of a Firmware Command Structure ...
*/
@@ -232,6 +251,11 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
}
+static inline int is_t4(enum chip_type chip)
+{
+ return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+}
+
int t4vf_wait_dev_ready(struct adapter *);
int t4vf_port_init(struct adapter *, int);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 7127c7b9efde..9f96dc3bb112 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1027,8 +1027,11 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
+ unsigned int max_naddr = is_t4(adapter->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
- if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
+ if (naddr > max_naddr)
return -EINVAL;
for (offset = 0; offset < naddr; /**/) {
@@ -1069,10 +1072,10 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
if (idx)
idx[offset+i] =
- (index >= FW_CLS_TCAM_NUM_ENTRIES
+ (index >= max_naddr
? 0xffff
: index);
- if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ if (index < max_naddr)
nfilters++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
@@ -1118,6 +1121,9 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
+ unsigned int max_naddr = is_t4(adapter->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
/*
* If this is a new allocation, determine whether it should be
@@ -1140,7 +1146,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
if (ret == 0) {
p = &rpl.u.exact[0];
ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
- if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
+ if (ret >= max_naddr)
ret = -ENOMEM;
}
return ret;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 138446957786..19f642a45f40 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -101,23 +101,6 @@ static char version[] __initdata =
* them to system IRQ numbers. This mapping is card specific and is set to
* the configuration of the Cirrus Eval board for this chip.
*/
-#if defined(CONFIG_MACH_IXDP2351)
-#define CS89x0_NONISA_IRQ
-static unsigned int netcard_portlist[] __used __initdata = {
- IXDP2351_VIRT_CS8900_BASE, 0
-};
-static unsigned int cs8900_irq_map[] = {
- IRQ_IXDP2351_CS8900, 0, 0, 0
-};
-#elif defined(CONFIG_ARCH_IXDP2X01)
-#define CS89x0_NONISA_IRQ
-static unsigned int netcard_portlist[] __used __initdata = {
- IXDP2X01_CS8900_VIRT_BASE, 0
-};
-static unsigned int cs8900_irq_map[] = {
- IRQ_IXDP2X01_CS8900, 0, 0, 0
-};
-#else
#ifndef CONFIG_CS89x0_PLATFORM
static unsigned int netcard_portlist[] __used __initdata = {
0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
@@ -127,7 +110,6 @@ static unsigned int cs8900_irq_map[] = {
10, 11, 12, 5
};
#endif
-#endif
#if DEBUGGING
static unsigned int net_debug = DEBUGGING;
@@ -210,32 +192,6 @@ static int __init media_fn(char *str)
__setup("cs89x0_media=", media_fn);
#endif
-#if defined(CONFIG_MACH_IXDP2351)
-static u16
-readword(unsigned long base_addr, int portno)
-{
- return __raw_readw(base_addr + (portno << 1));
-}
-
-static void
-writeword(unsigned long base_addr, int portno, u16 value)
-{
- __raw_writew(value, base_addr + (portno << 1));
-}
-#elif defined(CONFIG_ARCH_IXDP2X01)
-static u16
-readword(unsigned long base_addr, int portno)
-{
- return __raw_readl(base_addr + (portno << 1));
-}
-
-static void
-writeword(unsigned long base_addr, int portno, u16 value)
-{
- __raw_writel(value, base_addr + (portno << 1));
-}
-#endif
-
static void readwords(struct net_local *lp, int portno, void *buf, int length)
{
u8 *buf8 = (u8 *)buf;
@@ -478,9 +434,6 @@ dma_rx(struct net_device *dev)
/* Malloc up new buffer. */
skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) {
- /* I don't think we want to do this to a stressed system */
- cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n",
- dev->name);
dev->stats.rx_dropped++;
/* AKPM: advance bp to the next frame */
@@ -731,9 +684,6 @@ net_rx(struct net_device *dev)
/* Malloc up new buffer. */
skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) {
-#if 0 /* Again, this seems a cruel thing to do */
- pr_warn("%s: Memory squeeze, dropping packet\n", dev->name);
-#endif
dev->stats.rx_dropped++;
return;
}
@@ -908,7 +858,7 @@ net_open(struct net_device *dev)
goto bad_out;
}
} else {
-#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM)
+#if !defined(CONFIG_CS89x0_PLATFORM)
if (((1 << dev->irq) & lp->irq_map) == 0) {
pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
dev->name, dev->irq, lp->irq_map);
@@ -1321,9 +1271,7 @@ static const struct net_device_ops net_ops = {
static void __init reset_chip(struct net_device *dev)
{
#if !defined(CONFIG_MACH_MX31ADS)
-#if !defined(CS89x0_NONISA_IRQ)
struct net_local *lp = netdev_priv(dev);
-#endif /* CS89x0_NONISA_IRQ */
int reset_start_time;
writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -1331,7 +1279,6 @@ static void __init reset_chip(struct net_device *dev)
/* wait 30 ms */
msleep(30);
-#if !defined(CS89x0_NONISA_IRQ)
if (lp->chip_type != CS8900) {
/* Hardware problem requires PNP registers to be reconfigured after a reset */
iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT);
@@ -1344,7 +1291,6 @@ static void __init reset_chip(struct net_device *dev)
iowrite8((dev->mem_start >> 8) & 0xff,
lp->virt_addr + DATA_PORT + 1);
}
-#endif /* CS89x0_NONISA_IRQ */
/* Wait until the chip is reset */
reset_start_time = jiffies;
@@ -1579,9 +1525,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
i = lp->isa_config & INT_NO_MASK;
#ifndef CONFIG_CS89x0_PLATFORM
if (lp->chip_type == CS8900) {
-#ifdef CS89x0_NONISA_IRQ
- i = cs8900_irq_map[0];
-#else
/* Translate the IRQ using the IRQ mapping table. */
if (i >= ARRAY_SIZE(cs8900_irq_map))
pr_err("invalid ISA interrupt number %d\n", i);
@@ -1599,7 +1542,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
lp->irq_map = ((irq_map_buff[0] >> 8) |
(irq_map_buff[1] << 8));
}
-#endif
}
#endif
if (!dev->irq)
@@ -1978,18 +1920,6 @@ static struct platform_driver cs89x0_driver = {
.remove = cs89x0_platform_remove,
};
-static int __init cs89x0_init(void)
-{
- return platform_driver_probe(&cs89x0_driver, cs89x0_platform_probe);
-}
-
-module_init(cs89x0_init);
-
-static void __exit cs89x0_cleanup(void)
-{
- platform_driver_unregister(&cs89x0_driver);
-}
-
-module_exit(cs89x0_cleanup);
+module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
#endif /* CONFIG_CS89x0_PLATFORM */
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 354cbb78ed50..67b0388b6e68 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -887,18 +887,7 @@ static struct platform_driver ep93xx_eth_driver = {
},
};
-static int __init ep93xx_eth_init_module(void)
-{
- printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n");
- return platform_driver_register(&ep93xx_eth_driver);
-}
-
-static void __exit ep93xx_eth_cleanup_module(void)
-{
- platform_driver_unregister(&ep93xx_eth_driver);
-}
+module_platform_driver(ep93xx_eth_driver);
-module_init(ep93xx_eth_init_module);
-module_exit(ep93xx_eth_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index bf0fc56dba19..4b6e5695b263 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -212,7 +212,7 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
}
/* rtnl lock is held */
-int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
int err;
@@ -225,7 +225,7 @@ int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
/* rtnl lock is held */
-int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
int err;
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index da1cba3c410e..08bded051b93 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti);
int enic_dev_add_addr(struct enic *enic, u8 *addr);
int enic_dev_del_addr(struct enic *enic, u8 *addr);
-int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
+int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int enic_dev_notify_unset(struct enic *enic);
int enic_dev_hang_notify(struct enic *enic);
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ec1a233622c6..635f55992d7e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1300,7 +1300,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
}
if (vlan_stripped)
- __vlan_hwaccel_put_tag(skb, vlan_tci);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
if (netdev->features & NETIF_F_GRO)
napi_gro_receive(&enic->napi[q_number], skb);
@@ -2496,9 +2496,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops;
- netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (ENIC_SETTING(enic, LOOP)) {
- netdev->features &= ~NETIF_F_HW_VLAN_TX;
+ netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
enic->loop_enable = 1;
enic->loop_tag = enic->config.loop_tag;
dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 605b22283be1..97455c573db5 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -308,6 +308,9 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
if (status & STAT_ERROR) {
err = (int)readq(&devcmd->args[0]);
+ if (err == ERR_EINVAL &&
+ cmd == CMD_CAPABILITY)
+ return err;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
pr_err("Error %d devcmd %d\n",
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8cdf02503d13..9105465b2a1a 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
tmp = readl(reg);
}
+/*
+ * Sleep, either by using msleep() or if we are suspending, then
+ * use mdelay() to sleep.
+ */
+static void dm9000_msleep(board_info_t *db, unsigned int ms)
+{
+ if (db->in_suspend)
+ mdelay(ms);
+ else
+ msleep(ms);
+}
+
+/* Read a word from phyxcer */
+static int
+dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int reg_save;
+ int ret;
+
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
+ /* Fill the phyxcer register into REG_0C */
+ iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+ /* Issue phyxcer read command */
+ iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
+
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ dm9000_msleep(db, 1); /* Wait read complete */
+
+ spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
+
+ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
+
+ /* The read data keeps on REG_0D & REG_0E */
+ ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ mutex_unlock(&db->addr_lock);
+
+ dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+ return ret;
+}
+
+/* Write a word to phyxcer */
+static void
+dm9000_phy_write(struct net_device *dev,
+ int phyaddr_unused, int reg, int value)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+ unsigned long reg_save;
+
+ dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
+ /* Fill the phyxcer register into REG_0C */
+ iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+ /* Fill the written data into REG_0D & REG_0E */
+ iow(db, DM9000_EPDRL, value);
+ iow(db, DM9000_EPDRH, value >> 8);
+
+ /* Issue phyxcer write command */
+ iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
+
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ dm9000_msleep(db, 1); /* Wait write complete */
+
+ spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
+
+ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
+
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+ mutex_unlock(&db->addr_lock);
+}
+
/* dm9000_set_io
*
* select the specified set of io routines to use with the
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev)
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
+ dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
+ dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
/* if wol is needed, then always set NCR_WAKEEN otherwise we end
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev)
return 0;
}
-/*
- * Sleep, either by using msleep() or if we are suspending, then
- * use mdelay() to sleep.
- */
-static void dm9000_msleep(board_info_t *db, unsigned int ms)
-{
- if (db->in_suspend)
- mdelay(ms);
- else
- msleep(ms);
-}
-
-/*
- * Read a word from phyxcer
- */
-static int
-dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
-{
- board_info_t *db = netdev_priv(dev);
- unsigned long flags;
- unsigned int reg_save;
- int ret;
-
- mutex_lock(&db->addr_lock);
-
- spin_lock_irqsave(&db->lock,flags);
-
- /* Save previous register address */
- reg_save = readb(db->io_addr);
-
- /* Fill the phyxcer register into REG_0C */
- iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
- iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
-
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(&db->lock,flags);
-
- dm9000_msleep(db, 1); /* Wait read complete */
-
- spin_lock_irqsave(&db->lock,flags);
- reg_save = readb(db->io_addr);
-
- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
-
- /* The read data keeps on REG_0D & REG_0E */
- ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
-
- /* restore the previous address */
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(&db->lock,flags);
-
- mutex_unlock(&db->addr_lock);
-
- dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
- return ret;
-}
-
-/*
- * Write a word to phyxcer
- */
-static void
-dm9000_phy_write(struct net_device *dev,
- int phyaddr_unused, int reg, int value)
-{
- board_info_t *db = netdev_priv(dev);
- unsigned long flags;
- unsigned long reg_save;
-
- dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
- mutex_lock(&db->addr_lock);
-
- spin_lock_irqsave(&db->lock,flags);
-
- /* Save previous register address */
- reg_save = readb(db->io_addr);
-
- /* Fill the phyxcer register into REG_0C */
- iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
- /* Fill the written data into REG_0D & REG_0E */
- iow(db, DM9000_EPDRL, value);
- iow(db, DM9000_EPDRH, value >> 8);
-
- iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
-
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(&db->lock, flags);
-
- dm9000_msleep(db, 1); /* Wait write complete */
-
- spin_lock_irqsave(&db->lock,flags);
- reg_save = readb(db->io_addr);
-
- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
-
- /* restore the previous address */
- writeb(reg_save, db->io_addr);
-
- spin_unlock_irqrestore(&db->lock, flags);
- mutex_unlock(&db->addr_lock);
-}
-
static void
dm9000_shutdown(struct net_device *dev)
{
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
- dm9000_reset(db);
+ /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
+ * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
+ * while probe stage.
+ */
+
+ iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {
@@ -1687,22 +1693,7 @@ static struct platform_driver dm9000_driver = {
.remove = dm9000_drv_remove,
};
-static int __init
-dm9000_init(void)
-{
- printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
-
- return platform_driver_register(&dm9000_driver);
-}
-
-static void __exit
-dm9000_cleanup(void)
-{
- platform_driver_unregister(&dm9000_driver);
-}
-
-module_init(dm9000_init);
-module_exit(dm9000_cleanup);
+module_platform_driver(dm9000_driver);
MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
MODULE_DESCRIPTION("Davicom DM9000 network driver");
diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h
index 55688bd1a3ef..9ce058adabab 100644
--- a/drivers/net/ethernet/davicom/dm9000.h
+++ b/drivers/net/ethernet/davicom/dm9000.h
@@ -69,7 +69,9 @@
#define NCR_WAKEEN (1<<6)
#define NCR_FCOL (1<<4)
#define NCR_FDX (1<<3)
-#define NCR_LBK (3<<1)
+
+#define NCR_RESERVED (3<<1)
+#define NCR_MAC_LBK (1<<1)
#define NCR_RST (1<<0)
#define NSR_SPEED (1<<7)
@@ -167,5 +169,12 @@
#define ISR_LNKCHNG (1<<5)
#define ISR_UNDERRUN (1<<4)
+/* Davicom MII registers.
+ */
+
+#define MII_DM_DSPCR 0x1b /* DSP Control Register */
+
+#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */
+
#endif /* _DM9000X_H_ */
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 88feced9a629..cdbcd1643141 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -236,17 +236,14 @@ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
private->rx_buffer = dma_alloc_coherent(d, 8192,
&private->rx_dma_handle,
GFP_KERNEL);
- if (private->rx_buffer == NULL) {
- pr_err("%s: no memory for rx buffer\n", __func__);
+ if (private->rx_buffer == NULL)
goto rx_buf_fail;
- }
+
private->tx_buffer = dma_alloc_coherent(d, 8192,
&private->tx_dma_handle,
GFP_KERNEL);
- if (private->tx_buffer == NULL) {
- pr_err("%s: no memory for tx buffer\n", __func__);
+ if (private->tx_buffer == NULL)
goto tx_buf_fail;
- }
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 110d26f4c602..afa8e3af2c4d 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -580,12 +580,9 @@ alloc_list (struct net_device *dev)
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
- if (skb == NULL) {
- printk (KERN_ERR
- "%s: alloc_list: allocate Rx buffer error! ",
- dev->name);
+ if (skb == NULL)
break;
- }
+
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
cpu_to_le64 ( pci_map_single (
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 29aff55f2eea..234ce6f07544 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -214,6 +214,7 @@ struct be_tx_stats {
};
struct be_tx_obj {
+ u32 db_offset;
struct be_queue_info q;
struct be_queue_info cq;
/* Remember the skbs that were transmitted */
@@ -292,7 +293,7 @@ struct be_drv_stats {
u32 rx_in_range_errors;
u32 rx_out_range_errors;
u32 rx_frame_too_long;
- u32 rx_address_mismatch_drops;
+ u32 rx_address_filtered;
u32 rx_dropped_too_small;
u32 rx_dropped_too_short;
u32 rx_dropped_header_too_small;
@@ -328,6 +329,7 @@ enum vf_state {
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
+#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
struct phy_info {
u8 transceiver;
@@ -434,6 +436,8 @@ struct be_adapter {
u8 wol_cap;
bool wol;
u32 uc_macs; /* Count of secondary UC MAC programmed */
+ u16 asic_rev;
+ u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
u16 max_mcast_mac;
@@ -445,6 +449,7 @@ struct be_adapter {
u16 max_event_queues;
u32 if_cap_flags;
u8 pf_number;
+ u64 rss_flags;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -648,6 +653,11 @@ static inline bool be_is_wol_excluded(struct be_adapter *adapter)
}
}
+static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
+{
+ return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
+}
+
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 3c9b4f12e3e5..25d3290b8cac 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -263,6 +263,27 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
}
}
+static void be_async_dbg_evt_process(struct be_adapter *adapter,
+ u32 trailer, struct be_mcc_compl *cmp)
+{
+ u8 event_type = 0;
+ struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
+
+ event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
+ ASYNC_TRAILER_EVENT_TYPE_MASK;
+
+ switch (event_type) {
+ case ASYNC_DEBUG_EVENT_TYPE_QNQ:
+ if (evt->valid)
+ adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
+ adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
+ break;
+ }
+}
+
static inline bool is_link_state_evt(u32 trailer)
{
return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
@@ -277,6 +298,13 @@ static inline bool is_grp5_evt(u32 trailer)
ASYNC_EVENT_CODE_GRP_5);
}
+static inline bool is_dbg_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_QNQ);
+}
+
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
{
struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
@@ -325,6 +353,9 @@ int be_process_mcc(struct be_adapter *adapter)
else if (is_grp5_evt(compl->flags))
be_async_grp5_evt_process(adapter,
compl->flags, compl);
+ else if (is_dbg_evt(compl->flags))
+ be_async_dbg_evt_process(adapter,
+ compl->flags, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
status = be_mcc_compl_process(adapter, compl);
atomic_dec(&mcc_obj->q.used);
@@ -687,10 +718,8 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
if (!mccq->created)
return NULL;
- if (atomic_read(&mccq->used) >= mccq->len) {
- dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
+ if (atomic_read(&mccq->used) >= mccq->len)
return NULL;
- }
wrb = queue_head_node(mccq);
queue_head_inc(mccq);
@@ -1022,6 +1051,7 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
+ req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -1095,15 +1125,14 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
return status;
}
-int be_cmd_txq_create(struct be_adapter *adapter,
- struct be_queue_info *txq,
- struct be_queue_info *cq)
+int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eth_tx_create *req;
+ struct be_queue_info *txq = &txo->q;
+ struct be_queue_info *cq = &txo->cq;
struct be_dma_mem *q_mem = &txq->dma_mem;
- void *ctxt;
- int status;
+ int status, ver = 0;
spin_lock_bh(&adapter->mcc_lock);
@@ -1114,34 +1143,37 @@ int be_cmd_txq_create(struct be_adapter *adapter,
}
req = embedded_payload(wrb);
- ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
if (lancer_chip(adapter)) {
req->hdr.version = 1;
- AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
- adapter->if_handle);
+ req->if_id = cpu_to_le16(adapter->if_handle);
+ } else if (BEx_chip(adapter)) {
+ if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
+ req->hdr.version = 2;
+ } else { /* For SH */
+ req->hdr.version = 2;
}
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req->ulp_num = BE_ULP1_NUM;
req->type = BE_ETH_TX_RING_TYPE_STANDARD;
-
- AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
- be_encoded_q_len(txq->len));
- AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
-
- be_dws_cpu_to_le(ctxt, sizeof(req->context));
-
+ req->cq_id = cpu_to_le16(cq->id);
+ req->queue_size = be_encoded_q_len(txq->len);
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+ ver = req->hdr.version;
+
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
txq->id = le16_to_cpu(resp->cid);
+ if (ver == 2)
+ txo->db_offset = le32_to_cpu(resp->db_offset);
+ else
+ txo->db_offset = DB_TXULP1_OFFSET;
txq->created = true;
}
@@ -1834,7 +1866,7 @@ err:
/* Uses mbox */
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
- u32 *mode, u32 *caps)
+ u32 *mode, u32 *caps, u16 *asic_rev)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_query_fw_cfg *req;
@@ -1855,6 +1887,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
*port_num = le32_to_cpu(resp->phys_port);
*mode = le32_to_cpu(resp->function_mode);
*caps = le32_to_cpu(resp->function_caps);
+ *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
}
mutex_unlock(&adapter->mbox_lock);
@@ -1897,7 +1930,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
return status;
}
-int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ u32 rss_hash_opts, u16 table_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
@@ -1916,16 +1950,12 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
req->if_id = cpu_to_le32(adapter->if_handle);
- req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
- RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
+ req->enable_rss = cpu_to_le16(rss_hash_opts);
+ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
- if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
+ if (lancer_chip(adapter) || skyhawk_chip(adapter))
req->hdr.version = 1;
- req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
- RSS_ENABLE_UDP_IPV6);
- }
- req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
memcpy(req->cpu_table, rsstable, table_size);
memcpy(req->hash, myhash, sizeof(myhash));
be_dws_cpu_to_le(req->hash, sizeof(req->hash));
@@ -2343,7 +2373,6 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_seeprom_read *req;
- struct be_sge *sge;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -2354,7 +2383,6 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
goto err;
}
req = nonemb_cmd->va;
- sge = nonembedded_sgl(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
@@ -2461,6 +2489,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
struct mgmt_controller_attrib *attribs;
struct be_dma_mem attribs_cmd;
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
@@ -2468,12 +2499,10 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
if (!attribs_cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure\n");
- return -ENOMEM;
+ status = -ENOMEM;
+ goto err;
}
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
wrb = wrb_from_mbox(adapter);
if (!wrb) {
status = -EBUSY;
@@ -2493,8 +2522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
err:
mutex_unlock(&adapter->mbox_lock);
- pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
- attribs_cmd.dma);
+ if (attribs_cmd.va)
+ pci_free_consistent(adapter->pdev, attribs_cmd.size,
+ attribs_cmd.va, attribs_cmd.dma);
return status;
}
@@ -2667,10 +2697,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
&cmd.dma, GFP_KERNEL);
- if (!cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ if (!cmd.va)
return -ENOMEM;
- }
spin_lock_bh(&adapter->mcc_lock);
@@ -2794,6 +2822,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
CMD_SUBSYSTEM_ETH))
return -EPERM;
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
@@ -2801,12 +2832,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
if (!cmd.va) {
dev_err(&adapter->pdev->dev,
"Memory allocation failure\n");
- return -ENOMEM;
+ status = -ENOMEM;
+ goto err;
}
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
wrb = wrb_from_mbox(adapter);
if (!wrb) {
status = -EBUSY;
@@ -2837,7 +2866,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
}
err:
mutex_unlock(&adapter->mbox_lock);
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ if (cmd.va)
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2942,14 +2972,15 @@ static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
int i;
for (i = 0; i < desc_count; i++) {
- desc->desc_len = RESOURCE_DESC_SIZE;
+ desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
if (((void *)desc + desc->desc_len) >
(void *)(buf + max_buf_size)) {
desc = NULL;
break;
}
- if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
+ if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
+ desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
break;
desc = (void *)desc + desc->desc_len;
@@ -2969,16 +3000,18 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
int status;
struct be_dma_mem cmd;
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
&cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
- return -ENOMEM;
+ status = -ENOMEM;
+ goto err;
}
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
wrb = wrb_from_mbox(adapter);
if (!wrb) {
@@ -2992,6 +3025,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
OPCODE_COMMON_GET_FUNC_CONFIG,
cmd.size, wrb, &cmd);
+ if (skyhawk_chip(adapter))
+ req->hdr.version = 1;
+
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_func_config *resp = cmd.va;
@@ -3018,28 +3054,46 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
}
err:
mutex_unlock(&adapter->mbox_lock);
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ if (cmd.va)
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
return status;
}
- /* Uses sync mcc */
-int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- u8 domain)
+/* Uses mbox */
+int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
int status;
- struct be_dma_mem cmd;
- memset(&cmd, 0, sizeof(struct be_dma_mem));
- cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
- if (!cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
- return -ENOMEM;
- }
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+ wrb = wrb_from_mbox(adapter);
+
+ req = cmd->va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PROFILE_CONFIG,
+ cmd->size, wrb, cmd);
+
+ req->type = ACTIVE_PROFILE_TYPE;
+ req->hdr.domain = domain;
+ if (!lancer_chip(adapter))
+ req->hdr.version = 1;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_profile_config *req;
+ int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -3049,16 +3103,47 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
goto err;
}
- req = cmd.va;
-
+ req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_PROFILE_CONFIG,
- cmd.size, wrb, &cmd);
+ cmd->size, wrb, cmd);
req->type = ACTIVE_PROFILE_TYPE;
req->hdr.domain = domain;
+ if (!lancer_chip(adapter))
+ req->hdr.version = 1;
status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sync mcc, if MCCQ is already created otherwise mbox */
+int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
+ u16 *txq_count, u8 domain)
+{
+ struct be_queue_info *mccq = &adapter->mcc_obj.q;
+ struct be_dma_mem cmd;
+ int status;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ if (!lancer_chip(adapter))
+ cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1);
+ else
+ cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ if (!mccq->created)
+ status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
+ else
+ status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
if (!status) {
struct be_cmd_resp_get_profile_config *resp = cmd.va;
u32 desc_count = le32_to_cpu(resp->desc_count);
@@ -3071,12 +3156,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
status = -EINVAL;
goto err;
}
- *cap_flags = le32_to_cpu(desc->cap_flags);
+ if (cap_flags)
+ *cap_flags = le32_to_cpu(desc->cap_flags);
+ if (txq_count)
+ *txq_count = le32_to_cpu(desc->txq_count);
}
err:
- spin_unlock_bh(&adapter->mcc_lock);
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ if (cmd.va)
+ pci_free_consistent(adapter->pdev, cmd.size,
+ cmd.va, cmd.dma);
return status;
}
@@ -3105,7 +3193,7 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
req->hdr.domain = domain;
req->desc_count = cpu_to_le32(1);
- req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
+ req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
req->nic_desc.pf_num = adapter->pf_number;
@@ -3202,6 +3290,31 @@ err:
return status;
}
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_intr_set *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
+ wrb, NULL);
+
+ req->intr_enabled = intr_enable;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 96970860c915..a855668e0cc5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -84,6 +84,9 @@ struct be_mcc_compl {
#define ASYNC_EVENT_QOS_SPEED 0x1
#define ASYNC_EVENT_COS_PRIORITY 0x2
#define ASYNC_EVENT_PVID_STATE 0x3
+#define ASYNC_EVENT_CODE_QNQ 0x6
+#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
+
struct be_async_event_trailer {
u32 code;
};
@@ -144,6 +147,16 @@ struct be_async_event_grp5_pvid_state {
struct be_async_event_trailer trailer;
} __packed;
+/* async event indicating outer VLAN tag in QnQ */
+struct be_async_event_qnq {
+ u8 valid; /* Indicates if outer VLAN is valid */
+ u8 rsvd0;
+ u16 vlan_tag;
+ u32 event_tag;
+ u8 rsvd1[4];
+ struct be_async_event_trailer trailer;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@@ -188,6 +201,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PORT_NAME 77
+#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -473,46 +487,27 @@ struct be_cmd_resp_mcc_create {
#define BE_ETH_TX_RING_TYPE_STANDARD 2
#define BE_ULP1_NUM 1
-/* Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field */
-struct amap_tx_context {
- u8 if_id[16]; /* dword 0 */
- u8 tx_ring_size[4]; /* dword 0 */
- u8 rsvd1[26]; /* dword 0 */
- u8 pci_func_id[8]; /* dword 1 */
- u8 rsvd2[9]; /* dword 1 */
- u8 ctx_valid; /* dword 1 */
- u8 cq_id_send[16]; /* dword 2 */
- u8 rsvd3[16]; /* dword 2 */
- u8 rsvd4[32]; /* dword 3 */
- u8 rsvd5[32]; /* dword 4 */
- u8 rsvd6[32]; /* dword 5 */
- u8 rsvd7[32]; /* dword 6 */
- u8 rsvd8[32]; /* dword 7 */
- u8 rsvd9[32]; /* dword 8 */
- u8 rsvd10[32]; /* dword 9 */
- u8 rsvd11[32]; /* dword 10 */
- u8 rsvd12[32]; /* dword 11 */
- u8 rsvd13[32]; /* dword 12 */
- u8 rsvd14[32]; /* dword 13 */
- u8 rsvd15[32]; /* dword 14 */
- u8 rsvd16[32]; /* dword 15 */
-} __packed;
-
struct be_cmd_req_eth_tx_create {
struct be_cmd_req_hdr hdr;
u8 num_pages;
u8 ulp_num;
- u8 type;
- u8 bound_port;
- u8 context[sizeof(struct amap_tx_context) / 8];
+ u16 type;
+ u16 if_id;
+ u8 queue_size;
+ u8 rsvd0;
+ u32 rsvd1;
+ u16 cq_id;
+ u16 rsvd2;
+ u32 rsvd3[13];
struct phys_addr pages[8];
} __packed;
struct be_cmd_resp_eth_tx_create {
struct be_cmd_resp_hdr hdr;
u16 cid;
- u16 rsvd0;
+ u16 rid;
+ u32 db_offset;
+ u32 rsvd0[4];
} __packed;
/******************** Create RxQ ***************************/
@@ -608,8 +603,8 @@ struct be_port_rxf_stats_v0 {
u32 rx_in_range_errors; /* dword 10*/
u32 rx_out_range_errors; /* dword 11*/
u32 rx_frame_too_long; /* dword 12*/
- u32 rx_address_mismatch_drops; /* dword 13*/
- u32 rx_vlan_mismatch_drops; /* dword 14*/
+ u32 rx_address_filtered; /* dword 13*/
+ u32 rx_vlan_filtered; /* dword 14*/
u32 rx_dropped_too_small; /* dword 15*/
u32 rx_dropped_too_short; /* dword 16*/
u32 rx_dropped_header_too_small; /* dword 17*/
@@ -815,8 +810,8 @@ struct lancer_pport_stats {
u32 rx_control_frames_unknown_opcode_hi;
u32 rx_in_range_errors;
u32 rx_out_of_range_errors;
- u32 rx_address_mismatch_drops;
- u32 rx_vlan_mismatch_drops;
+ u32 rx_address_filtered;
+ u32 rx_vlan_filtered;
u32 rx_dropped_too_small;
u32 rx_dropped_too_short;
u32 rx_dropped_header_too_small;
@@ -1066,7 +1061,6 @@ struct be_cmd_resp_modify_eq_delay {
} __packed;
/******************** Get FW Config *******************/
-#define BE_FUNCTION_CAPS_RSS 0x2
/* The HW can come up in either of the following multi-channel modes
* based on the skew/IPL.
*/
@@ -1109,6 +1103,9 @@ struct be_cmd_resp_query_fw_cfg {
#define RSS_ENABLE_UDP_IPV4 0x10
#define RSS_ENABLE_UDP_IPV6 0x20
+#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC)
+#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
struct be_cmd_req_rss_config {
struct be_cmd_req_hdr hdr;
u32 if_id;
@@ -1592,7 +1589,7 @@ struct be_port_rxf_stats_v1 {
u32 rx_in_range_errors;
u32 rx_out_range_errors;
u32 rx_frame_too_long;
- u32 rx_address_mismatch_drops;
+ u32 rx_address_filtered;
u32 rx_dropped_too_small;
u32 rx_dropped_too_short;
u32 rx_dropped_header_too_small;
@@ -1706,9 +1703,11 @@ struct be_cmd_req_set_ext_fat_caps {
struct be_fat_conf_params set_params;
};
-#define RESOURCE_DESC_SIZE 72
-#define NIC_RESOURCE_DESC_TYPE_ID 0x41
+#define RESOURCE_DESC_SIZE 88
+#define NIC_RESOURCE_DESC_TYPE_V0 0x41
+#define NIC_RESOURCE_DESC_TYPE_V1 0x51
#define MAX_RESOURCE_DESC 4
+#define MAX_RESOURCE_DESC_V1 32
/* QOS unit number */
#define QUN 4
@@ -1755,7 +1754,7 @@ struct be_cmd_req_get_func_config {
};
struct be_cmd_resp_get_func_config {
- struct be_cmd_req_hdr hdr;
+ struct be_cmd_resp_hdr hdr;
u32 desc_count;
u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
};
@@ -1774,6 +1773,12 @@ struct be_cmd_resp_get_profile_config {
u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
};
+struct be_cmd_resp_get_profile_config_v1 {
+ struct be_cmd_req_hdr hdr;
+ u32 desc_count;
+ u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE];
+};
+
struct be_cmd_req_set_profile_config {
struct be_cmd_req_hdr hdr;
u32 rsvd;
@@ -1791,6 +1796,12 @@ struct be_cmd_enable_disable_vf {
u8 rsvd[3];
};
+struct be_cmd_req_intr_set {
+ struct be_cmd_req_hdr hdr;
+ u8 intr_enabled;
+ u8 rsvd[3];
+};
+
static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
{
return flags & adapter->cmd_privileges ? true : false;
@@ -1834,8 +1845,7 @@ extern int be_cmd_mccq_create(struct be_adapter *adapter,
struct be_queue_info *mccq,
struct be_queue_info *cq);
extern int be_cmd_txq_create(struct be_adapter *adapter,
- struct be_queue_info *txq,
- struct be_queue_info *cq);
+ struct be_tx_obj *txo);
extern int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_queue_info *rxq, u16 cq_id,
u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
@@ -1862,11 +1872,11 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
u32 tx_fc, u32 rx_fc);
extern int be_cmd_get_flow_control(struct be_adapter *adapter,
u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
- u32 *port_num, u32 *function_mode, u32 *function_caps);
+extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+ u32 *function_mode, u32 *function_caps, u16 *asic_rev);
extern int be_cmd_reset_function(struct be_adapter *adapter);
extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u16 table_size);
+ u32 rss_hash_opts, u16 table_size);
extern int be_process_mcc(struct be_adapter *adapter);
extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
@@ -1931,10 +1941,11 @@ extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
extern int be_cmd_get_func_config(struct be_adapter *adapter);
extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- u8 domain);
+ u16 *txq_count, u8 domain);
extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain);
extern int be_cmd_get_if_id(struct be_adapter *adapter,
struct be_vf_cfg *vf_cfg, int vf_num);
extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 76b302f30c87..5733cde88e2c 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -54,7 +54,7 @@ static const struct be_ethtool_stat et_stats[] = {
/* Received packets dropped when they don't pass the unicast or
* multicast address filtering.
*/
- {DRVSTAT_INFO(rx_address_mismatch_drops)},
+ {DRVSTAT_INFO(rx_address_filtered)},
/* Received packets dropped when IP packet length field is less than
* the IP header length field.
*/
@@ -680,7 +680,8 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (be_is_wol_supported(adapter)) {
wol->supported |= WAKE_MAGIC;
- wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol)
+ wol->wolopts |= WAKE_MAGIC;
} else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
@@ -719,10 +720,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
&ddrdma_cmd.dma, GFP_KERNEL);
- if (!ddrdma_cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ if (!ddrdma_cmd.va)
return -ENOMEM;
- }
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
@@ -757,6 +756,12 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
int status;
u8 link_status = 0;
+ if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
+ dev_err(&adapter->pdev->dev, "Self test not supported\n");
+ test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
@@ -845,11 +850,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
&eeprom_cmd.dma, GFP_KERNEL);
- if (!eeprom_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure. Could not read eeprom\n");
+ if (!eeprom_cmd.va)
return -ENOMEM;
- }
status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
@@ -939,6 +941,159 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
return;
}
+static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
+{
+ u64 data = 0;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ data |= RXH_IP_DST | RXH_IP_SRC;
+ if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
+ data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ data |= RXH_IP_DST | RXH_IP_SRC;
+ if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
+ data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case TCP_V6_FLOW:
+ if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ data |= RXH_IP_DST | RXH_IP_SRC;
+ if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
+ data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ data |= RXH_IP_DST | RXH_IP_SRC;
+ if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
+ data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ }
+
+ return data;
+}
+
+static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::get_rxnfc: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXFH:
+ cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
+ break;
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_qs - 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int be_set_rss_hash_opts(struct be_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct be_rx_obj *rxo;
+ int status = 0, i, j;
+ u8 rsstable[128];
+ u32 rss_flags = adapter->rss_flags;
+
+ if (cmd->data != L3_RSS_FLAGS &&
+ cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ return -EINVAL;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (cmd->data == L3_RSS_FLAGS)
+ rss_flags &= ~RSS_ENABLE_TCP_IPV4;
+ else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ rss_flags |= RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ if (cmd->data == L3_RSS_FLAGS)
+ rss_flags &= ~RSS_ENABLE_TCP_IPV6;
+ else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ rss_flags |= RSS_ENABLE_IPV6 |
+ RSS_ENABLE_TCP_IPV6;
+ break;
+ case UDP_V4_FLOW:
+ if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
+ BEx_chip(adapter))
+ return -EINVAL;
+
+ if (cmd->data == L3_RSS_FLAGS)
+ rss_flags &= ~RSS_ENABLE_UDP_IPV4;
+ else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ rss_flags |= RSS_ENABLE_IPV4 |
+ RSS_ENABLE_UDP_IPV4;
+ break;
+ case UDP_V6_FLOW:
+ if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
+ BEx_chip(adapter))
+ return -EINVAL;
+
+ if (cmd->data == L3_RSS_FLAGS)
+ rss_flags &= ~RSS_ENABLE_UDP_IPV6;
+ else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
+ rss_flags |= RSS_ENABLE_IPV6 |
+ RSS_ENABLE_UDP_IPV6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rss_flags == adapter->rss_flags)
+ return status;
+
+ if (be_multi_rxq(adapter)) {
+ for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for_all_rss_queues(adapter, rxo, i) {
+ if ((j + i) >= 128)
+ break;
+ rsstable[j + i] = rxo->rss_id;
+ }
+ }
+ }
+ status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
+ if (!status)
+ adapter->rss_flags = rss_flags;
+
+ return status;
+}
+
+static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ if (!be_multi_rxq(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "ethtool::set_rxnfc: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ status = be_set_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return status;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -962,4 +1117,6 @@ const struct ethtool_ops be_ethtool_ops = {
.get_regs = be_get_regs,
.flash_device = be_do_flash,
.self_test = be_self_test,
+ .get_rxnfc = be_get_rxnfc,
+ .set_rxnfc = be_set_rxnfc,
};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 62dc220695f7..3c1099b47f2a 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -72,6 +72,10 @@
*/
#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
+/********* PCI Function Capability *********/
+#define BE_FUNCTION_CAPS_RSS 0x2
+#define BE_FUNCTION_CAPS_SUPER_NIC 0x40
+
/********* Power management (WOL) **********/
#define PCICFG_PM_CONTROL_OFFSET 0x44
#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
@@ -495,7 +499,8 @@ struct flash_file_hdr_g3 {
u32 antidote;
u32 num_imgs;
u8 build[24];
- u8 rsvd[32];
+ u8 asic_type_rev;
+ u8 rsvd[31];
};
struct flash_section_hdr {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08e54f3d288b..4babc8a4a543 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -146,20 +146,16 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->entry_size = entry_size;
mem->size = len * entry_size;
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!mem->va)
return -ENOMEM;
- memset(mem->va, 0, mem->size);
return 0;
}
-static void be_intr_set(struct be_adapter *adapter, bool enable)
+static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
{
u32 reg, enabled;
- if (adapter->eeh_error)
- return;
-
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
&reg);
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
@@ -175,6 +171,22 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}
+static void be_intr_set(struct be_adapter *adapter, bool enable)
+{
+ int status = 0;
+
+ /* On lancer interrupts can't be controlled via this register */
+ if (lancer_chip(adapter))
+ return;
+
+ if (adapter->eeh_error)
+ return;
+
+ status = be_cmd_intr_set(adapter, enable);
+ if (status)
+ be_reg_intr_set(adapter, enable);
+}
+
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
{
u32 val = 0;
@@ -185,14 +197,15 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
iowrite32(val, adapter->db + DB_RQ_OFFSET);
}
-static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
+static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
+ u16 posted)
{
u32 val = 0;
- val |= qid & DB_TXULP_RING_ID_MASK;
+ val |= txo->q.id & DB_TXULP_RING_ID_MASK;
val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
wmb();
- iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
+ iowrite32(val, adapter->db + txo->db_offset);
}
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
@@ -340,9 +353,9 @@ static void populate_be_v0_stats(struct be_adapter *adapter)
drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
drvs->rx_dropped_header_too_small =
port_stats->rx_dropped_header_too_small;
- drvs->rx_address_mismatch_drops =
- port_stats->rx_address_mismatch_drops +
- port_stats->rx_vlan_mismatch_drops;
+ drvs->rx_address_filtered =
+ port_stats->rx_address_filtered +
+ port_stats->rx_vlan_filtered;
drvs->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors;
@@ -391,7 +404,7 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
port_stats->rx_dropped_header_too_small;
drvs->rx_input_fifo_overflow_drop =
port_stats->rx_input_fifo_overflow_drop;
- drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
+ drvs->rx_address_filtered = port_stats->rx_address_filtered;
drvs->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors;
drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
@@ -432,9 +445,9 @@ static void populate_lancer_stats(struct be_adapter *adapter)
drvs->rx_dropped_header_too_small =
pport_stats->rx_dropped_header_too_small;
drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
- drvs->rx_address_mismatch_drops =
- pport_stats->rx_address_mismatch_drops +
- pport_stats->rx_vlan_mismatch_drops;
+ drvs->rx_address_filtered =
+ pport_stats->rx_address_filtered +
+ pport_stats->rx_vlan_filtered;
drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
@@ -626,13 +639,8 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
return vlan_tag;
}
-static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
-{
- return vlan_tx_tag_present(skb) || adapter->pvid;
-}
-
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- struct sk_buff *skb, u32 wrb_cnt, u32 len)
+ struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
{
u16 vlan_tag;
@@ -659,8 +667,9 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
}
+ /* To skip HW VLAN tagging: evt = 1, compl = 0 */
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
@@ -683,7 +692,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
+ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+ bool skip_hw_vlan)
{
dma_addr_t busaddr;
int i, copied = 0;
@@ -732,7 +742,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
queue_head_inc(txq);
}
- wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
+ wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
be_dws_cpu_to_le(hdr, sizeof(*hdr));
return copied;
@@ -749,7 +759,8 @@ dma_err:
}
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
{
u16 vlan_tag = 0;
@@ -759,13 +770,72 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
if (vlan_tx_tag_present(skb)) {
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
- __vlan_put_tag(skb, vlan_tag);
+ skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ if (skb)
+ skb->vlan_tci = 0;
+ }
+
+ if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
+ if (!vlan_tag)
+ vlan_tag = adapter->pvid;
+ if (skip_hw_vlan)
+ *skip_hw_vlan = true;
+ }
+
+ if (vlan_tag) {
+ skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ if (unlikely(!skb))
+ return skb;
+
skb->vlan_tci = 0;
}
+ /* Insert the outer VLAN, if any */
+ if (adapter->qnq_vid) {
+ vlan_tag = adapter->qnq_vid;
+ skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ if (unlikely(!skb))
+ return skb;
+ if (skip_hw_vlan)
+ *skip_hw_vlan = true;
+ }
+
return skb;
}
+static bool be_ipv6_exthdr_check(struct sk_buff *skb)
+{
+ struct ethhdr *eh = (struct ethhdr *)skb->data;
+ u16 offset = ETH_HLEN;
+
+ if (eh->h_proto == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
+
+ offset += sizeof(struct ipv6hdr);
+ if (ip6h->nexthdr != NEXTHDR_TCP &&
+ ip6h->nexthdr != NEXTHDR_UDP) {
+ struct ipv6_opt_hdr *ehdr =
+ (struct ipv6_opt_hdr *) (skb->data + offset);
+
+ /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
+ if (ehdr->hdrlen == 0xff)
+ return true;
+ }
+ }
+ return false;
+}
+
+static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
+}
+
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return BE3_chip(adapter) &&
+ be_ipv6_exthdr_check(skb);
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -776,33 +846,64 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
u32 wrb_cnt = 0, copied = 0;
u32 start = txq->head, eth_hdr_len;
bool dummy_wrb, stopped = false;
+ bool skip_hw_vlan = false;
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
- /* HW has a bug which considers padding bytes as legal
- * and modifies the IPv4 hdr's 'tot_len' field
+ /* For padded packets, BE HW modifies tot_len field in IP header
+ * incorrecly when VLAN tag is inserted by HW.
*/
- if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
- is_ipv4_pkt(skb)) {
+ if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
}
+ /* If vlan tag is already inlined in the packet, skip HW VLAN
+ * tagging in UMC mode
+ */
+ if ((adapter->function_mode & UMC_ENABLED) &&
+ veh->h_vlan_proto == htons(ETH_P_8021Q))
+ skip_hw_vlan = true;
+
/* HW has a bug wherein it will calculate CSUM for VLAN
* pkts even though it is disabled.
* Manually insert VLAN in pkt.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL &&
- be_vlan_tag_chk(adapter, skb)) {
- skb = be_insert_vlan_in_pkt(adapter, skb);
+ vlan_tx_tag_present(skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
+ if (unlikely(!skb))
+ goto tx_drop;
+ }
+
+ /* HW may lockup when VLAN HW tagging is requested on
+ * certain ipv6 packets. Drop such pkts if the HW workaround to
+ * skip HW tagging is not enabled by FW.
+ */
+ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
+ (adapter->pvid || adapter->qnq_vid) &&
+ !qnq_async_evt_rcvd(adapter)))
+ goto tx_drop;
+
+ /* Manual VLAN tag insertion to prevent:
+ * ASIC lockup when the ASIC inserts VLAN tag into
+ * certain ipv6 packets. Insert VLAN tags in driver,
+ * and set event, completion, vlan bits accordingly
+ * in the Tx WRB.
+ */
+ if (be_ipv6_tx_stall_chk(adapter, skb) &&
+ be_vlan_tag_tx_chk(adapter, skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
if (unlikely(!skb))
goto tx_drop;
}
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
- copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
+ skip_hw_vlan);
if (copied) {
int gso_segs = skb_shinfo(skb)->gso_segs;
@@ -821,7 +922,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
stopped = true;
}
- be_txq_notify(adapter, txq->id, wrb_cnt);
+ be_txq_notify(adapter, txo, wrb_cnt);
be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
} else {
@@ -890,7 +991,7 @@ set_vlan_promisc:
return status;
}
-static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
@@ -916,7 +1017,7 @@ ret:
return status;
}
-static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
@@ -1371,7 +1472,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
if (rxcp->vlanf)
- __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
netif_receive_skb(skb);
}
@@ -1427,7 +1528,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
skb->rxhash = rxcp->rss_hash;
if (rxcp->vlanf)
- __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
napi_gro_frags(napi);
}
@@ -1957,7 +2058,7 @@ static int be_tx_qs_create(struct be_adapter *adapter)
if (status)
return status;
- status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+ status = be_cmd_txq_create(adapter, txo);
if (status)
return status;
}
@@ -2435,9 +2536,6 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
-
for_all_evt_queues(adapter, eqo, i)
napi_disable(&eqo->napi);
@@ -2499,9 +2597,19 @@ static int be_rx_qs_create(struct be_adapter *adapter)
rsstable[j + i] = rxo->rss_id;
}
}
- rc = be_cmd_rss_config(adapter, rsstable, 128);
- if (rc)
+ adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+
+ if (!BEx_chip(adapter))
+ adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6;
+
+ rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
+ 128);
+ if (rc) {
+ adapter->rss_flags = 0;
return rc;
+ }
}
/* First time posting */
@@ -2525,9 +2633,6 @@ static int be_open(struct net_device *netdev)
be_irq_register(adapter);
- if (!lancer_chip(adapter))
- be_intr_set(adapter, true);
-
for_all_rx_queues(adapter, rxo, i)
be_cq_notify(adapter, rxo->cq.id, true, 0);
@@ -2562,10 +2667,9 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (cmd.va == NULL)
return -1;
- memset(cmd.va, 0, cmd.size);
if (enable) {
status = pci_write_config_dword(adapter->pdev,
@@ -2713,7 +2817,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter))
- be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
+ be_cmd_get_profile_config(adapter, &cap_flags,
+ NULL, vf + 1);
/* If a FW profile exists, then cap_flags are updated */
en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@ -2877,11 +2982,14 @@ static void be_get_resources(struct be_adapter *adapter)
u16 dev_num_vfs;
int pos, status;
bool profile_present = false;
+ u16 txq_count = 0;
if (!BEx_chip(adapter)) {
status = be_cmd_get_func_config(adapter);
if (!status)
profile_present = true;
+ } else if (BE3_chip(adapter) && be_physfn(adapter)) {
+ be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
}
if (profile_present) {
@@ -2919,7 +3027,9 @@ static void be_get_resources(struct be_adapter *adapter)
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
adapter->max_mcast_mac = BE_MAX_MC;
- adapter->max_tx_queues = MAX_TX_QS;
+ adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
+ adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
+ MAX_TX_QS);
adapter->max_rss_queues = (adapter->be3_native) ?
BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
adapter->max_event_queues = BE3_MAX_RSS_QS;
@@ -2953,7 +3063,8 @@ static int be_get_config(struct be_adapter *adapter)
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
&adapter->function_mode,
- &adapter->function_caps);
+ &adapter->function_caps,
+ &adapter->asic_rev);
if (status)
goto err;
@@ -3214,7 +3325,7 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
return 0;
}
-/* For BE2 and BE3 */
+/* For BE2, BE3 and BE3-R */
static int be_flash_BEx(struct be_adapter *adapter,
const struct firmware *fw,
struct be_dma_mem *flash_cmd,
@@ -3457,11 +3568,9 @@ static int lancer_fw_download(struct be_adapter *adapter,
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
+ &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while flashing\n");
goto lancer_fw_exit;
}
@@ -3529,18 +3638,22 @@ lancer_fw_exit:
#define UFI_TYPE2 2
#define UFI_TYPE3 3
+#define UFI_TYPE3R 10
#define UFI_TYPE4 4
static int be_get_ufi_type(struct be_adapter *adapter,
- struct flash_file_hdr_g2 *fhdr)
+ struct flash_file_hdr_g3 *fhdr)
{
if (fhdr == NULL)
goto be_get_ufi_exit;
if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
return UFI_TYPE4;
- else if (BE3_chip(adapter) && fhdr->build[0] == '3')
- return UFI_TYPE3;
- else if (BE2_chip(adapter) && fhdr->build[0] == '2')
+ else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
+ if (fhdr->asic_type_rev == 0x10)
+ return UFI_TYPE3R;
+ else
+ return UFI_TYPE3;
+ } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
return UFI_TYPE2;
be_get_ufi_exit:
@@ -3551,7 +3664,6 @@ be_get_ufi_exit:
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
{
- struct flash_file_hdr_g2 *fhdr;
struct flash_file_hdr_g3 *fhdr3;
struct image_hdr *img_hdr_ptr = NULL;
struct be_dma_mem flash_cmd;
@@ -3563,29 +3675,41 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
&flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while flashing\n");
goto be_fw_exit;
}
p = fw->data;
- fhdr = (struct flash_file_hdr_g2 *)p;
+ fhdr3 = (struct flash_file_hdr_g3 *)p;
- ufi_type = be_get_ufi_type(adapter, fhdr);
+ ufi_type = be_get_ufi_type(adapter, fhdr3);
- fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
num_imgs = le32_to_cpu(fhdr3->num_imgs);
for (i = 0; i < num_imgs; i++) {
img_hdr_ptr = (struct image_hdr *)(fw->data +
(sizeof(struct flash_file_hdr_g3) +
i * sizeof(struct image_hdr)));
if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
- if (ufi_type == UFI_TYPE4)
+ switch (ufi_type) {
+ case UFI_TYPE4:
status = be_flash_skyhawk(adapter, fw,
&flash_cmd, num_imgs);
- else if (ufi_type == UFI_TYPE3)
+ break;
+ case UFI_TYPE3R:
status = be_flash_BEx(adapter, fw, &flash_cmd,
num_imgs);
+ break;
+ case UFI_TYPE3:
+ /* Do not flash this ufi on BE3-R cards */
+ if (adapter->asic_rev < 0x10)
+ status = be_flash_BEx(adapter, fw,
+ &flash_cmd,
+ num_imgs);
+ else {
+ status = -1;
+ dev_err(&adapter->pdev->dev,
+ "Can't load BE3 UFI on BE3R\n");
+ }
+ }
}
}
@@ -3662,12 +3786,12 @@ static void be_netdev_init(struct net_device *netdev)
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_TX;
+ NETIF_F_HW_VLAN_CTAG_TX;
if (be_multi_rxq(adapter))
netdev->hw_features |= NETIF_F_RXHASH;
netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -3791,12 +3915,13 @@ static int be_ctrl_init(struct be_adapter *adapter)
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
- &rx_filter->dma, GFP_KERNEL);
+ &rx_filter->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (rx_filter->va == NULL) {
status = -ENOMEM;
goto free_mbox;
}
- memset(rx_filter->va, 0, rx_filter->size);
+
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
@@ -3838,10 +3963,9 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (cmd->va == NULL)
return -1;
- memset(cmd->va, 0, cmd->size);
return 0;
}
@@ -3853,6 +3977,7 @@ static void be_remove(struct pci_dev *pdev)
return;
be_roce_dev_remove(adapter);
+ be_intr_set(adapter, false);
cancel_delayed_work_sync(&adapter->func_recovery_work);
@@ -4107,6 +4232,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!status) {
+ status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (status < 0) {
+ dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
+ goto free_netdev;
+ }
netdev->features |= NETIF_F_HIGHDMA;
} else {
status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
@@ -4131,22 +4261,22 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
goto ctrl_clean;
}
- /* tell fw we're ready to fire cmds */
- status = be_cmd_fw_init(adapter);
- if (status)
- goto ctrl_clean;
-
if (be_reset_required(adapter)) {
status = be_cmd_reset_function(adapter);
if (status)
goto ctrl_clean;
+
+ /* Wait for interrupts to quiesce after an FLR */
+ msleep(100);
}
- /* The INTR bit may be set in the card when probed by a kdump kernel
- * after a crash.
- */
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
+ /* Allow interrupts for other ULPs running on NIC function */
+ be_intr_set(adapter, true);
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto ctrl_clean;
status = be_stats_init(adapter);
if (status)
@@ -4357,12 +4487,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
pci_save_state(pdev);
- /* tell fw we're ready to fire cmds */
- status = be_cmd_fw_init(adapter);
+ status = be_cmd_reset_function(adapter);
if (status)
goto err;
- status = be_cmd_reset_function(adapter);
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
if (status)
goto err;
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 55d32aa0a093..f3d126dcc104 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index db4ea8081c07..276572998463 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 7c361d1db94c..21b85fb7d05f 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -780,12 +780,11 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
priv->descs = dma_alloc_coherent(priv->dev,
sizeof(struct ftgmac100_descs),
- &priv->descs_dma_addr, GFP_KERNEL);
+ &priv->descs_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
if (!priv->descs)
return -ENOMEM;
- memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
-
/* initialize RX ring */
ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
@@ -1350,22 +1349,7 @@ static struct platform_driver ftgmac100_driver = {
},
};
-/******************************************************************************
- * initialization / finalization
- *****************************************************************************/
-static int __init ftgmac100_init(void)
-{
- pr_info("Loading version " DRV_VERSION " ...\n");
- return platform_driver_register(&ftgmac100_driver);
-}
-
-static void __exit ftgmac100_exit(void)
-{
- platform_driver_unregister(&ftgmac100_driver);
-}
-
-module_init(ftgmac100_init);
-module_exit(ftgmac100_exit);
+module_platform_driver(ftgmac100_driver);
MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
MODULE_DESCRIPTION("FTGMAC100 driver");
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index b5ea8fbd8a76..a6eda8d83138 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,13 +732,13 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
{
int i;
- priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
- &priv->descs_dma_addr, GFP_KERNEL);
+ priv->descs = dma_alloc_coherent(priv->dev,
+ sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
if (!priv->descs)
return -ENOMEM;
- memset(priv->descs, 0, sizeof(struct ftmac100_descs));
-
/* initialize RX ring */
ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index b7d58fe6f531..549ce13b92ac 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -2,7 +2,8 @@
# Makefile for the Freescale network device drivers.
#
-obj-$(CONFIG_FEC) += fec.o fec_ptp.o
+obj-$(CONFIG_FEC) += fec.o
+fec-objs :=fec_main.o fec_ptp.o
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index eb4372962839..d44f65bac1d4 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -52,6 +52,7 @@
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
+#define FEC_RACC 0x1C4 /* Receive Accelerator function */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
@@ -164,9 +165,11 @@ struct bufdesc_ex {
#define BD_ENET_TX_CSL ((ushort)0x0001)
#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
-/*enhanced buffer desciptor control/status used by Ethernet transmit*/
+/*enhanced buffer descriptor control/status used by Ethernet transmit*/
#define BD_ENET_TX_INT 0x40000000
#define BD_ENET_TX_TS 0x20000000
+#define BD_ENET_TX_PINS 0x10000000
+#define BD_ENET_TX_IINS 0x08000000
/* This device has up to three irqs on some platforms */
@@ -190,6 +193,10 @@ struct bufdesc_ex {
#define BD_ENET_RX_INT 0x00800000
#define BD_ENET_RX_PTP ((ushort)0x0400)
+#define BD_ENET_RX_ICE 0x00000020
+#define BD_ENET_RX_PCR 0x00000010
+#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
@@ -247,6 +254,7 @@ struct fec_enet_private {
int pause_flag;
struct napi_struct napi;
+ int csum_flags;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec_main.c
index 911d0253dbb2..b9748f14ea78 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -29,12 +29,17 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
@@ -53,11 +58,6 @@
#include <asm/cacheflush.h>
-#ifndef CONFIG_ARM
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#endif
-
#include "fec.h"
#if defined(CONFIG_ARM)
@@ -107,6 +107,9 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX,
}, {
+ .name = "mvf-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC,
+ }, {
/* sentinel */
}
};
@@ -117,6 +120,7 @@ enum imx_fec_type {
IMX27_FEC, /* runs on i.mx27/35/51 */
IMX28_FEC,
IMX6Q_FEC,
+ MVF_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -124,6 +128,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
+ { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -177,6 +182,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define PKT_MINBUF_SIZE 64
#define PKT_MAXBLR_SIZE 1520
+/* FEC receive acceleration */
+#define FEC_RACC_IPDIS (1 << 1)
+#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
+
/*
* The 5270/5271/5280/5282/532x RX control register also contains maximum frame
* size bits. Other FEC hardware does not, so we need to take that into
@@ -237,6 +247,21 @@ static void *swap_buffer(void *bufaddr, int len)
return bufaddr;
}
+static int
+fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
+{
+ /* Only run for packets requiring a checksum. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
+ *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
+
+ return 0;
+}
+
static netdev_tx_t
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
@@ -249,7 +274,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned int index;
if (!fep->link) {
- /* Link is down or autonegotiation is in progress. */
+ /* Link is down or auto-negotiation is in progress. */
return NETDEV_TX_BUSY;
}
@@ -262,10 +287,16 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Ooops. All transmit buffers are full. Bail out.
* This should not happen, since ndev->tbusy should be set.
*/
- printk("%s: tx queue full!.\n", ndev->name);
+ netdev_err(ndev, "tx queue full!\n");
return NETDEV_TX_BUSY;
}
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
/* Clear all of the status flags */
status &= ~BD_ENET_TX_STATS;
@@ -322,8 +353,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
} else {
-
ebdp->cbd_esc = BD_ENET_TX_INT;
+
+ /* Enable protocol checksum flags
+ * We do not bother with the IP Checksum bits as they
+ * are done by the kernel
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ ebdp->cbd_esc |= BD_ENET_TX_PINS;
}
}
/* If this was the last BD in the ring, start at the beginning again. */
@@ -345,6 +382,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
+/* Init RX & TX buffer descriptors
+ */
+static void fec_enet_bd_init(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct bufdesc *bdp;
+ unsigned int i;
+
+ /* Initialize the receive buffer descriptors. */
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ if (bdp->cbd_bufaddr)
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ else
+ bdp->cbd_sc = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ fep->cur_rx = fep->rx_bd_base;
+
+ /* ...and the same for transmit */
+ bdp = fep->tx_bd_base;
+ fep->cur_tx = bdp;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
+ dev_kfree_skb_any(fep->tx_skbuff[i]);
+ fep->tx_skbuff[i] = NULL;
+ }
+ bdp->cbd_bufaddr = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp->cbd_sc |= BD_SC_WRAP;
+ fep->dirty_tx = bdp;
+}
+
/* This function is called to start or restart the FEC during a link
* change. This only happens when switching between half and full
* duplex.
@@ -356,6 +440,7 @@ fec_restart(struct net_device *ndev, int duplex)
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int i;
+ u32 val;
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
@@ -388,6 +473,8 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
+ fec_enet_bd_init(ndev);
+
/* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
if (fep->bufdesc_ex)
@@ -397,7 +484,6 @@ fec_restart(struct net_device *ndev, int duplex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
- fep->cur_rx = fep->rx_bd_base;
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) {
@@ -421,6 +507,14 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set MII speed */
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ /* set RX checksum */
+ val = readl(fep->hwp + FEC_RACC);
+ if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+ val |= FEC_RACC_OPTIONS;
+ else
+ val &= ~FEC_RACC_OPTIONS;
+ writel(val, fep->hwp + FEC_RACC);
+
/*
* The phy interface and speed need to get configured
* differently on enet-mac.
@@ -478,7 +572,7 @@ fec_restart(struct net_device *ndev, int duplex)
fep->phy_dev && fep->phy_dev->pause)) {
rcntl |= FEC_ENET_FCE;
- /* set FIFO thresh hold parameter to reduce overrun */
+ /* set FIFO threshold parameter to reduce overrun */
writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
@@ -526,7 +620,7 @@ fec_stop(struct net_device *ndev)
writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
udelay(10);
if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
- printk("fec_stop : Graceful transmit stop did not complete !\n");
+ netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
/* Whack a reset. We should wait for this. */
@@ -624,7 +718,7 @@ fec_enet_tx(struct net_device *ndev)
}
if (status & BD_ENET_TX_READY)
- printk("HEY! Enet xmit interrupt and TX_READY.\n");
+ netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
/* Deferred means some collisions occurred during transmit,
* but we eventually sent the packet OK.
@@ -692,7 +786,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
* the last indicator should be set.
*/
if ((status & BD_ENET_RX_LAST) == 0)
- printk("FEC ENET: rcv is not +last\n");
+ netdev_err(ndev, "rcv is not +last\n");
if (!fep->opened)
goto rx_processing_done;
@@ -743,8 +837,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
if (unlikely(!skb)) {
- printk("%s: Memory squeeze, dropping packet.\n",
- ndev->name);
ndev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
@@ -768,6 +860,18 @@ fec_enet_rx(struct net_device *ndev, int budget)
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
+ if (fep->bufdesc_ex &&
+ (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
+ struct bufdesc_ex *ebdp =
+ (struct bufdesc_ex *)bdp;
+ if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
+ /* don't check it */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+ }
+
if (!skb_defer_rx_timestamp(skb))
napi_gro_receive(&fep->napi, skb);
}
@@ -868,7 +972,6 @@ static void fec_get_mac(struct net_device *ndev)
*/
iap = macaddr;
-#ifdef CONFIG_OF
/*
* 2) from device tree data
*/
@@ -880,7 +983,6 @@ static void fec_get_mac(struct net_device *ndev)
iap = (unsigned char *) mac;
}
}
-#endif
/*
* 3) from flash or fuse (via platform data)
@@ -954,6 +1056,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
} else {
if (fep->link) {
fec_stop(ndev);
+ fep->link = phy_dev->link;
status_change = 1;
}
}
@@ -983,7 +1086,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
usecs_to_jiffies(FEC_MII_TIMEOUT));
if (time_left == 0) {
fep->mii_timeout = 1;
- printk(KERN_ERR "FEC: MDIO read timeout\n");
+ netdev_err(fep->netdev, "MDIO read timeout\n");
return -ETIMEDOUT;
}
@@ -1011,7 +1114,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
usecs_to_jiffies(FEC_MII_TIMEOUT));
if (time_left == 0) {
fep->mii_timeout = 1;
- printk(KERN_ERR "FEC: MDIO write timeout\n");
+ netdev_err(fep->netdev, "MDIO write timeout\n");
return -ETIMEDOUT;
}
@@ -1051,9 +1154,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
if (phy_id >= PHY_MAX_ADDR) {
- printk(KERN_INFO
- "%s: no PHY, assuming direct connection to switch\n",
- ndev->name);
+ netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -1062,7 +1163,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
fep->phy_interface);
if (IS_ERR(phy_dev)) {
- printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
+ netdev_err(ndev, "could not attach to PHY\n");
return PTR_ERR(phy_dev);
}
@@ -1080,11 +1181,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- printk(KERN_INFO
- "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
- ndev->name,
- fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
- fep->phy_dev->irq);
+ netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
+ fep->phy_dev->irq);
return 0;
}
@@ -1394,7 +1493,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_esc = BD_ENET_TX_INT;
}
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
@@ -1559,7 +1658,7 @@ fec_set_mac_address(struct net_device *ndev, void *p)
* Polled functionality used by netconsole and others in non interrupt mode
*
*/
-void fec_poll_controller(struct net_device *dev)
+static void fec_poll_controller(struct net_device *dev)
{
int i;
struct fec_enet_private *fep = netdev_priv(dev);
@@ -1574,6 +1673,33 @@ void fec_poll_controller(struct net_device *dev)
}
#endif
+static int fec_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ netdev->features = features;
+
+ /* Receive checksum has been changed */
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM)
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ else
+ fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
+
+ if (netif_running(netdev)) {
+ fec_stop(netdev);
+ fec_restart(netdev, fep->phy_dev->duplex);
+ netif_wake_queue(netdev);
+ } else {
+ fec_restart(netdev, fep->phy_dev->duplex);
+ }
+ }
+
+ return 0;
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -1587,6 +1713,7 @@ static const struct net_device_ops fec_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = fec_poll_controller,
#endif
+ .ndo_set_features = fec_set_features,
};
/*
@@ -1597,17 +1724,14 @@ static int fec_enet_init(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *cbd_base;
- struct bufdesc *bdp;
- unsigned int i;
/* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
- GFP_KERNEL);
- if (!cbd_base) {
- printk("FEC: allocate descriptor memory failed?\n");
+ GFP_KERNEL);
+ if (!cbd_base)
return -ENOMEM;
- }
+ memset(cbd_base, 0, PAGE_SIZE);
spin_lock_init(&fep->hw_lock);
fep->netdev = ndev;
@@ -1631,34 +1755,12 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
- /* Initialize the receive buffer descriptors. */
- bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
- }
-
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
- bdp->cbd_sc |= BD_SC_WRAP;
-
- /* ...and the same for transmit */
- bdp = fep->tx_bd_base;
- fep->cur_tx = bdp;
- for (i = 0; i < TX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- bdp->cbd_bufaddr = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
- }
-
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
- bdp->cbd_sc |= BD_SC_WRAP;
- fep->dirty_tx = bdp;
+ /* enable hw accelerator */
+ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_RXCSUM);
+ ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_RXCSUM);
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
fec_restart(ndev, 0);
@@ -1666,16 +1768,6 @@ static int fec_enet_init(struct net_device *ndev)
}
#ifdef CONFIG_OF
-static int fec_get_phy_mode_dt(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
-
- if (np)
- return of_get_phy_mode(np);
-
- return -ENODEV;
-}
-
static void fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
@@ -1704,11 +1796,6 @@ static void fec_reset_phy(struct platform_device *pdev)
gpio_set_value(phy_reset, 1);
}
#else /* CONFIG_OF */
-static int fec_get_phy_mode_dt(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-
static void fec_reset_phy(struct platform_device *pdev)
{
/*
@@ -1739,16 +1826,10 @@ fec_probe(struct platform_device *pdev)
if (!r)
return -ENXIO;
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (!r)
- return -EBUSY;
-
/* Init network device */
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
- if (!ndev) {
- ret = -ENOMEM;
- goto failed_alloc_etherdev;
- }
+ if (!ndev)
+ return -ENOMEM;
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1760,7 +1841,7 @@ fec_probe(struct platform_device *pdev)
(pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
- fep->hwp = ioremap(r->start, resource_size(r));
+ fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
fep->pdev = pdev;
fep->dev_id = dev_id++;
@@ -1773,7 +1854,7 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
- ret = fec_get_phy_mode_dt(pdev);
+ ret = of_get_phy_mode(pdev->dev.of_node);
if (ret < 0) {
pdata = pdev->dev.platform_data;
if (pdata)
@@ -1863,6 +1944,9 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_register;
+ if (fep->bufdesc_ex && fep->ptp_clock)
+ netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
+
return 0;
failed_register:
@@ -1882,11 +1966,8 @@ failed_regulator:
clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
- iounmap(fep->hwp);
failed_ioremap:
free_netdev(ndev);
-failed_alloc_etherdev:
- release_mem_region(r->start, resource_size(r));
return ret;
}
@@ -1896,7 +1977,6 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- struct resource *r;
int i;
unregister_netdev(ndev);
@@ -1912,19 +1992,14 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
- iounmap(fep->hwp);
free_netdev(ndev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- BUG_ON(!r);
- release_mem_region(r->start, resource_size(r));
-
platform_set_drvdata(pdev, NULL);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int
fec_suspend(struct device *dev)
{
@@ -1956,24 +2031,15 @@ fec_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
-static const struct dev_pm_ops fec_pm_ops = {
- .suspend = fec_suspend,
- .resume = fec_resume,
- .freeze = fec_suspend,
- .thaw = fec_resume,
- .poweroff = fec_suspend,
- .restore = fec_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
static struct platform_driver fec_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &fec_pm_ops,
-#endif
.of_match_table = fec_dt_ids,
},
.id_table = fec_devtype,
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 77943a6a1b8c..9bc15e2365bb 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -14,6 +14,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -858,13 +860,11 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Reserve FEC control zone */
rv = of_address_to_resource(np, 0, &mem);
if (rv) {
- printk(KERN_ERR DRIVER_NAME ": "
- "Error while parsing device node resource\n" );
+ pr_err("Error while parsing device node resource\n");
goto err_netdev;
}
if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
- printk(KERN_ERR DRIVER_NAME
- " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
+ pr_err("invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
(unsigned long)resource_size(&mem),
sizeof(struct mpc52xx_fec));
rv = -EINVAL;
@@ -902,7 +902,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
- printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" );
+ pr_err("Can not init SDMA tasks\n");
rv = -ENOMEM;
goto err_rx_tx_dmatsk;
}
@@ -982,8 +982,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* We're done ! */
dev_set_drvdata(&op->dev, ndev);
- printk(KERN_INFO "%s: %s MAC %pM\n",
- ndev->name, op->dev.of_node->full_name, ndev->dev_addr);
+ netdev_info(ndev, "%s MAC %pM\n",
+ op->dev.of_node->full_name, ndev->dev_addr);
return 0;
@@ -1094,7 +1094,7 @@ mpc52xx_fec_init(void)
int ret;
ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
if (ret) {
- printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n");
+ pr_err("failed to register mdio driver\n");
return ret;
}
#endif
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 0d8df400a479..25fc960cbf0e 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -17,6 +17,8 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -128,7 +130,6 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
-EXPORT_SYMBOL(fec_ptp_start_cyclecounter);
/**
* fec_ptp_adjfreq - adjust ptp cycle frequency
@@ -319,7 +320,6 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
-EXPORT_SYMBOL(fec_ptp_ioctl);
/**
* fec_time_keep - call timecounter_read every second to avoid timer overrun
@@ -381,8 +381,5 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
if (IS_ERR(fep->ptp_clock)) {
fep->ptp_clock = NULL;
pr_err("ptp_clock_register failed\n");
- } else {
- pr_info("registered PHC device on %s\n", ndev->name);
}
}
-EXPORT_SYMBOL(fec_ptp_init);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 46df28893c10..edc120094c34 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -177,8 +177,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
received++;
netif_receive_skb(skb);
} else {
- dev_warn(fep->dev,
- "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -309,8 +307,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
received++;
netif_rx(skb);
} else {
- dev_warn(fep->dev,
- "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -505,11 +501,9 @@ void fs_init_bds(struct net_device *dev)
*/
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skb == NULL) {
- dev_warn(fep->dev,
- "Memory squeeze, unable to allocate skb\n");
+ if (skb == NULL)
break;
- }
+
skb_align(skb, ENET_RX_ALIGN);
fep->rx_skbuff[i] = skb;
CBDW_BUFADDR(bdp,
@@ -593,13 +587,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
/* Alloc new skb */
new_skb = netdev_alloc_skb(dev, skb->len + 4);
- if (!new_skb) {
- if (net_ratelimit()) {
- dev_warn(fep->dev,
- "Memory squeeze, dropping tx packet.\n");
- }
+ if (!new_skb)
return NULL;
- }
/* Make sure new skb is properly aligned */
skb_align(new_skb, 4);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d2c5441d1bf0..2375a01715a0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -132,7 +132,7 @@ static int gfar_poll(struct napi_struct *napi, int budget);
static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
-static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi);
void gfar_halt(struct net_device *dev);
@@ -245,14 +245,13 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Allocate memory for the buffer descriptors */
vaddr = dma_alloc_coherent(dev,
- sizeof(struct txbd8) * priv->total_tx_ring_size +
- sizeof(struct rxbd8) * priv->total_rx_ring_size,
- &addr, GFP_KERNEL);
- if (!vaddr) {
- netif_err(priv, ifup, ndev,
- "Could not allocate buffer descriptors!\n");
+ (priv->total_tx_ring_size *
+ sizeof(struct txbd8)) +
+ (priv->total_rx_ring_size *
+ sizeof(struct rxbd8)),
+ &addr, GFP_KERNEL);
+ if (!vaddr)
return -ENOMEM;
- }
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
@@ -342,7 +341,7 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_init_tx_rx_base(priv);
/* Configure the coalescing support */
- gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ gfar_configure_coalescing_all(priv);
/* set this when rx hw offload (TOE) functions are being used */
priv->uses_rxfcb = 0;
@@ -387,7 +386,7 @@ static void gfar_init_mac(struct net_device *ndev)
priv->uses_rxfcb = 1;
}
- if (ndev->features & NETIF_F_HW_VLAN_RX) {
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
priv->uses_rxfcb = 1;
}
@@ -691,7 +690,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
}
for (i = 0; i < priv->num_tx_queues; i++)
- priv->tx_queue[i] = NULL;
+ priv->tx_queue[i] = NULL;
for (i = 0; i < priv->num_rx_queues; i++)
priv->rx_queue[i] = NULL;
@@ -1051,8 +1050,9 @@ static int gfar_probe(struct platform_device *ofdev)
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
- dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->features |= NETIF_F_HW_VLAN_RX;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
@@ -1817,25 +1817,15 @@ void gfar_start(struct net_device *dev)
dev->trans_start = jiffies; /* prevent tx timeout */
}
-void gfar_configure_coalescing(struct gfar_private *priv,
+static void gfar_configure_coalescing(struct gfar_private *priv,
unsigned long tx_mask, unsigned long rx_mask)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 __iomem *baddr;
- int i = 0;
-
- /* Backward compatible case ---- even if we enable
- * multiple queues, there's only single reg to program
- */
- gfar_write(&regs->txic, 0);
- if (likely(priv->tx_queue[0]->txcoalescing))
- gfar_write(&regs->txic, priv->tx_queue[0]->txic);
-
- gfar_write(&regs->rxic, 0);
- if (unlikely(priv->rx_queue[0]->rxcoalescing))
- gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
if (priv->mode == MQ_MG_MODE) {
+ int i = 0;
+
baddr = &regs->txic0;
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
gfar_write(baddr + i, 0);
@@ -1849,9 +1839,25 @@ void gfar_configure_coalescing(struct gfar_private *priv,
if (likely(priv->rx_queue[i]->rxcoalescing))
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
}
+ } else {
+ /* Backward compatible case -- even if we enable
+ * multiple queues, there's only single reg to program
+ */
+ gfar_write(&regs->txic, 0);
+ if (likely(priv->tx_queue[0]->txcoalescing))
+ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+
+ gfar_write(&regs->rxic, 0);
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
+ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
}
}
+void gfar_configure_coalescing_all(struct gfar_private *priv)
+{
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+}
+
static int register_grp_irqs(struct gfar_priv_grp *grp)
{
struct gfar_private *priv = grp->priv;
@@ -1941,7 +1947,7 @@ int startup_gfar(struct net_device *ndev)
phy_start(priv->phydev);
- gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ gfar_configure_coalescing_all(priv);
return 0;
@@ -2343,7 +2349,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
local_irq_save(flags);
lock_rx_qs(priv);
- if (features & NETIF_F_HW_VLAN_TX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_TX) {
/* Enable VLAN tag insertion */
tempval = gfar_read(&regs->tctrl);
tempval |= TCTRL_VLINS;
@@ -2355,7 +2361,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
gfar_write(&regs->tctrl, tempval);
}
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* Enable VLAN tag extraction */
tempval = gfar_read(&regs->rctrl);
tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
@@ -2469,12 +2475,11 @@ static void gfar_align_skb(struct sk_buff *skb)
}
/* Interrupt Handler for Transmit complete */
-static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{
struct net_device *dev = tx_queue->dev;
struct netdev_queue *txq;
struct gfar_private *priv = netdev_priv(dev);
- struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *bdp, *next = NULL;
struct txbd8 *lbdp = NULL;
struct txbd8 *base = tx_queue->tx_bd_base;
@@ -2489,7 +2494,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
u32 lstatus;
size_t buflen;
- rx_queue = priv->rx_queue[tqi];
txq = netdev_get_tx_queue(dev, tqi);
bdp = tx_queue->dirty_tx;
skb_dirtytx = tx_queue->skb_dirtytx;
@@ -2571,8 +2575,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
tx_queue->dirty_tx = bdp;
netdev_tx_completed_queue(txq, howmany, bytes_sent);
-
- return howmany;
}
static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
@@ -2694,8 +2696,6 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
struct gfar_private *priv = netdev_priv(dev);
struct rxfcb *fcb = NULL;
- gro_result_t ret;
-
/* fcb is at the beginning if exists */
fcb = (struct rxfcb *)skb->data;
@@ -2725,19 +2725,17 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev);
- /* There's need to check for NETIF_F_HW_VLAN_RX here.
+ /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
- if (dev->features & NETIF_F_HW_VLAN_RX &&
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
fcb->flags & RXFCB_VLN)
- __vlan_hwaccel_put_tag(skb, fcb->vlctl);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
/* Send the packet up the stack */
- ret = napi_gro_receive(napi, skb);
+ napi_gro_receive(napi, skb);
- if (unlikely(GRO_DROP == ret))
- atomic64_inc(&priv->extra_stats.kernel_dropped);
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2835,62 +2833,82 @@ static int gfar_poll(struct napi_struct *napi, int budget)
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
- int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
- int tx_cleaned = 0, i, left_over_budget = budget;
- unsigned long serviced_queues = 0;
- int num_queues = 0;
-
- num_queues = gfargrp->num_rx_queues;
- budget_per_queue = budget/num_queues;
+ int work_done = 0, work_done_per_q = 0;
+ int i, budget_per_q = 0;
+ int has_tx_work;
+ unsigned long rstat_rxf;
+ int num_act_queues;
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived
*/
gfar_write(&regs->ievent, IEVENT_RTX_MASK);
- while (num_queues && left_over_budget) {
- budget_per_queue = left_over_budget/num_queues;
- left_over_budget = 0;
+ rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
+
+ num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
+ if (num_act_queues)
+ budget_per_q = budget/num_act_queues;
+
+ while (1) {
+ has_tx_work = 0;
+ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+ tx_queue = priv->tx_queue[i];
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+ gfar_clean_tx_ring(tx_queue);
+ has_tx_work = 1;
+ }
+ }
for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
- if (test_bit(i, &serviced_queues))
+ /* skip queue if not active */
+ if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
continue;
+
rx_queue = priv->rx_queue[i];
- tx_queue = priv->tx_queue[rx_queue->qindex];
-
- tx_cleaned += gfar_clean_tx_ring(tx_queue);
- rx_cleaned_per_queue =
- gfar_clean_rx_ring(rx_queue, budget_per_queue);
- rx_cleaned += rx_cleaned_per_queue;
- if (rx_cleaned_per_queue < budget_per_queue) {
- left_over_budget = left_over_budget +
- (budget_per_queue -
- rx_cleaned_per_queue);
- set_bit(i, &serviced_queues);
- num_queues--;
+ work_done_per_q =
+ gfar_clean_rx_ring(rx_queue, budget_per_q);
+ work_done += work_done_per_q;
+
+ /* finished processing this queue */
+ if (work_done_per_q < budget_per_q) {
+ /* clear active queue hw indication */
+ gfar_write(&regs->rstat,
+ RSTAT_CLEAR_RXF0 >> i);
+ rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
+ num_act_queues--;
+
+ if (!num_act_queues)
+ break;
+ /* recompute budget per Rx queue */
+ budget_per_q =
+ (budget - work_done) / num_act_queues;
}
}
- }
- if (tx_cleaned)
- return budget;
+ if (work_done >= budget)
+ break;
- if (rx_cleaned < budget) {
- napi_complete(napi);
+ if (!num_act_queues && !has_tx_work) {
- /* Clear the halt bit in RSTAT */
- gfar_write(&regs->rstat, gfargrp->rstat);
+ napi_complete(napi);
- gfar_write(&regs->imask, IMASK_DEFAULT);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
- gfargrp->tx_bit_map);
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+
+ /* If we are coalescing interrupts, update the timer
+ * Otherwise, clear it
+ */
+ gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+ gfargrp->tx_bit_map);
+ break;
+ }
}
- return rx_cleaned;
+ return work_done;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 63a28d294e20..04b552cd419d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -291,7 +291,9 @@ extern const char gfar_driver_version[];
#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
-#define RSTAT_CLEAR_RHALT 0x00800000
+#define RSTAT_CLEAR_RHALT 0x00800000
+#define RSTAT_CLEAR_RXF0 0x00000080
+#define RSTAT_RXF_MASK 0x000000ff
#define TCTRL_IPCSEN 0x00004000
#define TCTRL_TUCSEN 0x00002000
@@ -627,7 +629,6 @@ struct rmon_mib
};
struct gfar_extra_stats {
- atomic64_t kernel_dropped;
atomic64_t rx_large;
atomic64_t rx_short;
atomic64_t rx_nonoctet;
@@ -1180,8 +1181,7 @@ extern void stop_gfar(struct net_device *dev);
extern void gfar_halt(struct net_device *dev);
extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
int enable, u32 regnum, u32 read);
-extern void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned long tx_mask, unsigned long rx_mask);
+extern void gfar_configure_coalescing_all(struct gfar_private *priv);
void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 75e89acf4912..21cd88124ca9 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -66,7 +66,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
- "rx-dropped-by-kernel",
"rx-large-frame-errors",
"rx-short-frame-errors",
"rx-non-octet-errors",
@@ -390,14 +389,14 @@ static int gfar_scoalesce(struct net_device *dev,
/* Check the bounds of the values */
if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
- pr_info("Coalescing is limited to %d microseconds\n",
- GFAR_MAX_COAL_USECS);
+ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
return -EINVAL;
}
if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
- pr_info("Coalescing is limited to %d frames\n",
- GFAR_MAX_COAL_FRAMES);
+ netdev_info(dev, "Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
return -EINVAL;
}
@@ -419,14 +418,14 @@ static int gfar_scoalesce(struct net_device *dev,
/* Check the bounds of the values */
if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
- pr_info("Coalescing is limited to %d microseconds\n",
- GFAR_MAX_COAL_USECS);
+ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
return -EINVAL;
}
if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
- pr_info("Coalescing is limited to %d frames\n",
- GFAR_MAX_COAL_FRAMES);
+ netdev_info(dev, "Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
return -EINVAL;
}
@@ -436,7 +435,7 @@ static int gfar_scoalesce(struct net_device *dev,
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
}
- gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ gfar_configure_coalescing_all(priv);
return 0;
}
@@ -543,7 +542,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
int err = 0, i = 0;
netdev_features_t changed = dev->features ^ features;
- if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
gfar_vlan_mode(dev, features);
if (!(changed & NETIF_F_RXCSUM))
@@ -736,7 +735,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
break;
default:
- pr_err("Right now this class is not supported\n");
+ netdev_err(priv->ndev,
+ "Right now this class is not supported\n");
ret = 0;
goto err;
}
@@ -752,7 +752,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
}
if (i == MAX_FILER_IDX + 1) {
- pr_err("No parse rule found, can't create hash rules\n");
+ netdev_err(priv->ndev,
+ "No parse rule found, can't create hash rules\n");
ret = 0;
goto err;
}
@@ -1569,7 +1570,7 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
gfar_cluster_filer(tab);
gfar_optimize_filer_masks(tab);
- pr_debug("\n\tSummary:\n"
+ pr_debug("\tSummary:\n"
"\tData on hardware: %d\n"
"\tCompression rate: %d%%\n",
tab->index, 100 - (100 * tab->index) / i);
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 2e5daee0438a..576e4b858fce 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -17,6 +17,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/hrtimer.h>
#include <linux/init.h>
@@ -127,7 +130,6 @@ struct gianfar_ptp_registers {
#define DRIVER "gianfar_ptp"
#define DEFAULT_CKSEL 1
-#define N_ALARM 1 /* first alarm is used internally to reset fipers */
#define N_EXT_TS 2
#define REG_SIZE sizeof(struct gianfar_ptp_registers)
@@ -410,7 +412,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
.owner = THIS_MODULE,
.name = "gianfar clock",
.max_adj = 512000,
- .n_alarm = N_ALARM,
+ .n_alarm = 0,
.n_ext_ts = N_EXT_TS,
.n_per_out = 0,
.pps = 1,
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
index cd14a4d449c2..acb55af7e3f3 100644
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c
@@ -337,5 +337,5 @@ void gfar_init_sysfs(struct net_device *dev)
rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
if (rc)
- dev_err(&dev->dev, "Error creating gianfar sysfs files.\n");
+ dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 0a70bb55d1b0..e04c59818f60 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -12,6 +12,9 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -50,12 +53,6 @@
#define ugeth_dbg(format, arg...) \
ugeth_printk(KERN_DEBUG , format , ## arg)
-#define ugeth_err(format, arg...) \
- ugeth_printk(KERN_ERR , format , ## arg)
-#define ugeth_info(format, arg...) \
- ugeth_printk(KERN_INFO , format , ## arg)
-#define ugeth_warn(format, arg...) \
- ugeth_printk(KERN_WARNING , format , ## arg)
#ifdef UGETH_VERBOSE_DEBUG
#define ugeth_vdbg ugeth_dbg
@@ -281,7 +278,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
for (i = 0; i < num_entries; i++) {
if ((snum = qe_get_snum()) < 0) {
if (netif_msg_ifup(ugeth))
- ugeth_err("fill_init_enet_entries: Can not get SNUM.");
+ pr_err("Can not get SNUM\n");
return snum;
}
if ((i == 0) && skip_page_for_first_entry)
@@ -292,7 +289,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
qe_muram_alloc(thread_size, thread_alignment);
if (IS_ERR_VALUE(init_enet_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
+ pr_err("Can not allocate DPRAM memory\n");
qe_put_snum((u8) snum);
return -ENOMEM;
}
@@ -365,10 +362,9 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
init_enet_offset =
(in_be32(p_start) &
ENET_INIT_PARAM_PTR_MASK);
- ugeth_info("Init enet entry %d:", i);
- ugeth_info("Base address: 0x%08x",
- (u32)
- qe_muram_addr(init_enet_offset));
+ pr_info("Init enet entry %d:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)qe_muram_addr(init_enet_offset));
mem_disp(qe_muram_addr(init_enet_offset),
thread_size);
}
@@ -396,8 +392,8 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
{
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
- if (!(paddr_num < NUM_OF_PADDRS)) {
- ugeth_warn("%s: Illagel paddr_num.", __func__);
+ if (paddr_num >= NUM_OF_PADDRS) {
+ pr_warn("%s: Invalid paddr_num: %u\n", __func__, paddr_num);
return -EINVAL;
}
@@ -573,7 +569,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
length =
(ugeth->ug_info->bdRingLenTx[i] *
sizeof(struct qe_bd));
- ugeth_info("TX BDs[%d]", i);
+ pr_info("TX BDs[%d]\n", i);
mem_disp(ugeth->p_tx_bd_ring[i], length);
}
}
@@ -582,7 +578,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
length =
(ugeth->ug_info->bdRingLenRx[i] *
sizeof(struct qe_bd));
- ugeth_info("RX BDs[%d]", i);
+ pr_info("RX BDs[%d]\n", i);
mem_disp(ugeth->p_rx_bd_ring[i], length);
}
}
@@ -592,93 +588,93 @@ static void dump_regs(struct ucc_geth_private *ugeth)
{
int i;
- ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1);
- ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
-
- ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->maccfg1,
- in_be32(&ugeth->ug_regs->maccfg1));
- ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->maccfg2,
- in_be32(&ugeth->ug_regs->maccfg2));
- ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->ipgifg,
- in_be32(&ugeth->ug_regs->ipgifg));
- ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->hafdup,
- in_be32(&ugeth->ug_regs->hafdup));
- ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->ifctl,
- in_be32(&ugeth->ug_regs->ifctl));
- ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->ifstat,
- in_be32(&ugeth->ug_regs->ifstat));
- ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->macstnaddr1,
- in_be32(&ugeth->ug_regs->macstnaddr1));
- ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->macstnaddr2,
- in_be32(&ugeth->ug_regs->macstnaddr2));
- ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->uempr,
- in_be32(&ugeth->ug_regs->uempr));
- ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->utbipar,
- in_be32(&ugeth->ug_regs->utbipar));
- ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->ug_regs->uescr,
- in_be16(&ugeth->ug_regs->uescr));
- ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->tx64,
- in_be32(&ugeth->ug_regs->tx64));
- ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->tx127,
- in_be32(&ugeth->ug_regs->tx127));
- ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->tx255,
- in_be32(&ugeth->ug_regs->tx255));
- ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->rx64,
- in_be32(&ugeth->ug_regs->rx64));
- ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->rx127,
- in_be32(&ugeth->ug_regs->rx127));
- ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->rx255,
- in_be32(&ugeth->ug_regs->rx255));
- ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->txok,
- in_be32(&ugeth->ug_regs->txok));
- ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->ug_regs->txcf,
- in_be16(&ugeth->ug_regs->txcf));
- ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->tmca,
- in_be32(&ugeth->ug_regs->tmca));
- ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->tbca,
- in_be32(&ugeth->ug_regs->tbca));
- ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->rxfok,
- in_be32(&ugeth->ug_regs->rxfok));
- ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->rxbok,
- in_be32(&ugeth->ug_regs->rxbok));
- ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->rbyt,
- in_be32(&ugeth->ug_regs->rbyt));
- ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->rmca,
- in_be32(&ugeth->ug_regs->rmca));
- ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->rbca,
- in_be32(&ugeth->ug_regs->rbca));
- ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->scar,
- in_be32(&ugeth->ug_regs->scar));
- ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->ug_regs->scam,
- in_be32(&ugeth->ug_regs->scam));
+ pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1);
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs);
+
+ pr_info("maccfg1 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->maccfg1,
+ in_be32(&ugeth->ug_regs->maccfg1));
+ pr_info("maccfg2 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->maccfg2,
+ in_be32(&ugeth->ug_regs->maccfg2));
+ pr_info("ipgifg : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->ipgifg,
+ in_be32(&ugeth->ug_regs->ipgifg));
+ pr_info("hafdup : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->hafdup,
+ in_be32(&ugeth->ug_regs->hafdup));
+ pr_info("ifctl : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->ifctl,
+ in_be32(&ugeth->ug_regs->ifctl));
+ pr_info("ifstat : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->ifstat,
+ in_be32(&ugeth->ug_regs->ifstat));
+ pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->macstnaddr1,
+ in_be32(&ugeth->ug_regs->macstnaddr1));
+ pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->macstnaddr2,
+ in_be32(&ugeth->ug_regs->macstnaddr2));
+ pr_info("uempr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->uempr,
+ in_be32(&ugeth->ug_regs->uempr));
+ pr_info("utbipar : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->utbipar,
+ in_be32(&ugeth->ug_regs->utbipar));
+ pr_info("uescr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->ug_regs->uescr,
+ in_be16(&ugeth->ug_regs->uescr));
+ pr_info("tx64 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tx64,
+ in_be32(&ugeth->ug_regs->tx64));
+ pr_info("tx127 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tx127,
+ in_be32(&ugeth->ug_regs->tx127));
+ pr_info("tx255 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tx255,
+ in_be32(&ugeth->ug_regs->tx255));
+ pr_info("rx64 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rx64,
+ in_be32(&ugeth->ug_regs->rx64));
+ pr_info("rx127 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rx127,
+ in_be32(&ugeth->ug_regs->rx127));
+ pr_info("rx255 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rx255,
+ in_be32(&ugeth->ug_regs->rx255));
+ pr_info("txok : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->txok,
+ in_be32(&ugeth->ug_regs->txok));
+ pr_info("txcf : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->ug_regs->txcf,
+ in_be16(&ugeth->ug_regs->txcf));
+ pr_info("tmca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tmca,
+ in_be32(&ugeth->ug_regs->tmca));
+ pr_info("tbca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tbca,
+ in_be32(&ugeth->ug_regs->tbca));
+ pr_info("rxfok : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rxfok,
+ in_be32(&ugeth->ug_regs->rxfok));
+ pr_info("rxbok : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rxbok,
+ in_be32(&ugeth->ug_regs->rxbok));
+ pr_info("rbyt : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rbyt,
+ in_be32(&ugeth->ug_regs->rbyt));
+ pr_info("rmca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rmca,
+ in_be32(&ugeth->ug_regs->rmca));
+ pr_info("rbca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rbca,
+ in_be32(&ugeth->ug_regs->rbca));
+ pr_info("scar : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->scar,
+ in_be32(&ugeth->ug_regs->scar));
+ pr_info("scam : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->scam,
+ in_be32(&ugeth->ug_regs->scam));
if (ugeth->p_thread_data_tx) {
int numThreadsTxNumerical;
@@ -703,13 +699,13 @@ static void dump_regs(struct ucc_geth_private *ugeth)
break;
}
- ugeth_info("Thread data TXs:");
- ugeth_info("Base address: 0x%08x",
- (u32) ugeth->p_thread_data_tx);
+ pr_info("Thread data TXs:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_thread_data_tx);
for (i = 0; i < numThreadsTxNumerical; i++) {
- ugeth_info("Thread data TX[%d]:", i);
- ugeth_info("Base address: 0x%08x",
- (u32) & ugeth->p_thread_data_tx[i]);
+ pr_info("Thread data TX[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_thread_data_tx[i]);
mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
sizeof(struct ucc_geth_thread_data_tx));
}
@@ -737,270 +733,260 @@ static void dump_regs(struct ucc_geth_private *ugeth)
break;
}
- ugeth_info("Thread data RX:");
- ugeth_info("Base address: 0x%08x",
- (u32) ugeth->p_thread_data_rx);
+ pr_info("Thread data RX:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_thread_data_rx);
for (i = 0; i < numThreadsRxNumerical; i++) {
- ugeth_info("Thread data RX[%d]:", i);
- ugeth_info("Base address: 0x%08x",
- (u32) & ugeth->p_thread_data_rx[i]);
+ pr_info("Thread data RX[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_thread_data_rx[i]);
mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
sizeof(struct ucc_geth_thread_data_rx));
}
}
if (ugeth->p_exf_glbl_param) {
- ugeth_info("EXF global param:");
- ugeth_info("Base address: 0x%08x",
- (u32) ugeth->p_exf_glbl_param);
+ pr_info("EXF global param:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_exf_glbl_param);
mem_disp((u8 *) ugeth->p_exf_glbl_param,
sizeof(*ugeth->p_exf_glbl_param));
}
if (ugeth->p_tx_glbl_pram) {
- ugeth_info("TX global param:");
- ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
- ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->p_tx_glbl_pram->temoder,
- in_be16(&ugeth->p_tx_glbl_pram->temoder));
- ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->sqptr,
- in_be32(&ugeth->p_tx_glbl_pram->sqptr));
- ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
- in_be32(&ugeth->p_tx_glbl_pram->
- schedulerbasepointer));
- ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
- in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
- ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->tstate,
- in_be32(&ugeth->p_tx_glbl_pram->tstate));
- ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
- ugeth->p_tx_glbl_pram->iphoffset[0]);
- ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
- ugeth->p_tx_glbl_pram->iphoffset[1]);
- ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
- ugeth->p_tx_glbl_pram->iphoffset[2]);
- ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
- ugeth->p_tx_glbl_pram->iphoffset[3]);
- ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
- ugeth->p_tx_glbl_pram->iphoffset[4]);
- ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
- ugeth->p_tx_glbl_pram->iphoffset[5]);
- ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
- ugeth->p_tx_glbl_pram->iphoffset[6]);
- ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
- ugeth->p_tx_glbl_pram->iphoffset[7]);
- ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
- in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
- ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
- in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
- ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
- in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
- ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
- in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
- ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
- in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
- ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
- in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
- ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
- in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
- ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
- in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
- ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_tx_glbl_pram->tqptr,
- in_be32(&ugeth->p_tx_glbl_pram->tqptr));
+ pr_info("TX global param:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram);
+ pr_info("temoder : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_tx_glbl_pram->temoder,
+ in_be16(&ugeth->p_tx_glbl_pram->temoder));
+ pr_info("sqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->sqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->sqptr));
+ pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer));
+ pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
+ pr_info("tstate : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->tstate,
+ in_be32(&ugeth->p_tx_glbl_pram->tstate));
+ pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[0],
+ ugeth->p_tx_glbl_pram->iphoffset[0]);
+ pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[1],
+ ugeth->p_tx_glbl_pram->iphoffset[1]);
+ pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[2],
+ ugeth->p_tx_glbl_pram->iphoffset[2]);
+ pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[3],
+ ugeth->p_tx_glbl_pram->iphoffset[3]);
+ pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[4],
+ ugeth->p_tx_glbl_pram->iphoffset[4]);
+ pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[5],
+ ugeth->p_tx_glbl_pram->iphoffset[5]);
+ pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[6],
+ ugeth->p_tx_glbl_pram->iphoffset[6]);
+ pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[7],
+ ugeth->p_tx_glbl_pram->iphoffset[7]);
+ pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[0],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
+ pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[1],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
+ pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[2],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
+ pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[3],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
+ pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[4],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
+ pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[5],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
+ pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[6],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
+ pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[7],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
+ pr_info("tqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->tqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->tqptr));
}
if (ugeth->p_rx_glbl_pram) {
- ugeth_info("RX global param:");
- ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
- ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->remoder,
- in_be32(&ugeth->p_rx_glbl_pram->remoder));
- ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->rqptr,
- in_be32(&ugeth->p_rx_glbl_pram->rqptr));
- ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->p_rx_glbl_pram->typeorlen,
- in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
- ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
- ugeth->p_rx_glbl_pram->rxgstpack);
- ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
- in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
- ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
- in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
- ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
- (u32) & ugeth->p_rx_glbl_pram->rstate,
- ugeth->p_rx_glbl_pram->rstate);
- ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->p_rx_glbl_pram->mrblr,
- in_be16(&ugeth->p_rx_glbl_pram->mrblr));
- ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
- in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
- ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->p_rx_glbl_pram->mflr,
- in_be16(&ugeth->p_rx_glbl_pram->mflr));
- ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->p_rx_glbl_pram->minflr,
- in_be16(&ugeth->p_rx_glbl_pram->minflr));
- ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->p_rx_glbl_pram->maxd1,
- in_be16(&ugeth->p_rx_glbl_pram->maxd1));
- ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->p_rx_glbl_pram->maxd2,
- in_be16(&ugeth->p_rx_glbl_pram->maxd2));
- ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->ecamptr,
- in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
- ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->l2qt,
- in_be32(&ugeth->p_rx_glbl_pram->l2qt));
- ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
- in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
- ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
- in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
- ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
- in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
- ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
- in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
- ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
- in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
- ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
- in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
- ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
- in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
- ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
- in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
- ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->p_rx_glbl_pram->vlantype,
- in_be16(&ugeth->p_rx_glbl_pram->vlantype));
- ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
- (u32) & ugeth->p_rx_glbl_pram->vlantci,
- in_be16(&ugeth->p_rx_glbl_pram->vlantci));
+ pr_info("RX global param:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram);
+ pr_info("remoder : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->remoder,
+ in_be32(&ugeth->p_rx_glbl_pram->remoder));
+ pr_info("rqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rqptr));
+ pr_info("typeorlen : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->typeorlen,
+ in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
+ pr_info("rxgstpack : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rxgstpack,
+ ugeth->p_rx_glbl_pram->rxgstpack);
+ pr_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
+ pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->intcoalescingptr,
+ in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
+ pr_info("rstate : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rstate,
+ ugeth->p_rx_glbl_pram->rstate);
+ pr_info("mrblr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->mrblr,
+ in_be16(&ugeth->p_rx_glbl_pram->mrblr));
+ pr_info("rbdqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rbdqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
+ pr_info("mflr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->mflr,
+ in_be16(&ugeth->p_rx_glbl_pram->mflr));
+ pr_info("minflr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->minflr,
+ in_be16(&ugeth->p_rx_glbl_pram->minflr));
+ pr_info("maxd1 : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->maxd1,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd1));
+ pr_info("maxd2 : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->maxd2,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd2));
+ pr_info("ecamptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->ecamptr,
+ in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
+ pr_info("l2qt : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l2qt,
+ in_be32(&ugeth->p_rx_glbl_pram->l2qt));
+ pr_info("l3qt[0] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[0],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
+ pr_info("l3qt[1] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[1],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
+ pr_info("l3qt[2] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[2],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
+ pr_info("l3qt[3] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[3],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
+ pr_info("l3qt[4] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[4],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
+ pr_info("l3qt[5] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[5],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
+ pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[6],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
+ pr_info("l3qt[7] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[7],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
+ pr_info("vlantype : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->vlantype,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantype));
+ pr_info("vlantci : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->vlantci,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantci));
for (i = 0; i < 64; i++)
- ugeth_info
- ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
- i,
- (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
- ugeth->p_rx_glbl_pram->addressfiltering[i]);
- ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
- in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
+ pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n",
+ i,
+ (u32)&ugeth->p_rx_glbl_pram->addressfiltering[i],
+ ugeth->p_rx_glbl_pram->addressfiltering[i]);
+ pr_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->exfGlobalParam,
+ in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
}
if (ugeth->p_send_q_mem_reg) {
- ugeth_info("Send Q memory registers:");
- ugeth_info("Base address: 0x%08x",
- (u32) ugeth->p_send_q_mem_reg);
+ pr_info("Send Q memory registers:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
- ugeth_info("SQQD[%d]:", i);
- ugeth_info("Base address: 0x%08x",
- (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
+ pr_info("SQQD[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
sizeof(struct ucc_geth_send_queue_qd));
}
}
if (ugeth->p_scheduler) {
- ugeth_info("Scheduler:");
- ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
+ pr_info("Scheduler:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler);
mem_disp((u8 *) ugeth->p_scheduler,
sizeof(*ugeth->p_scheduler));
}
if (ugeth->p_tx_fw_statistics_pram) {
- ugeth_info("TX FW statistics pram:");
- ugeth_info("Base address: 0x%08x",
- (u32) ugeth->p_tx_fw_statistics_pram);
+ pr_info("TX FW statistics pram:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_tx_fw_statistics_pram);
mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
sizeof(*ugeth->p_tx_fw_statistics_pram));
}
if (ugeth->p_rx_fw_statistics_pram) {
- ugeth_info("RX FW statistics pram:");
- ugeth_info("Base address: 0x%08x",
- (u32) ugeth->p_rx_fw_statistics_pram);
+ pr_info("RX FW statistics pram:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_rx_fw_statistics_pram);
mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
sizeof(*ugeth->p_rx_fw_statistics_pram));
}
if (ugeth->p_rx_irq_coalescing_tbl) {
- ugeth_info("RX IRQ coalescing tables:");
- ugeth_info("Base address: 0x%08x",
- (u32) ugeth->p_rx_irq_coalescing_tbl);
+ pr_info("RX IRQ coalescing tables:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_rx_irq_coalescing_tbl);
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
- ugeth_info("RX IRQ coalescing table entry[%d]:", i);
- ugeth_info("Base address: 0x%08x",
- (u32) & ugeth->p_rx_irq_coalescing_tbl->
- coalescingentry[i]);
- ugeth_info
- ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_irq_coalescing_tbl->
- coalescingentry[i].interruptcoalescingmaxvalue,
- in_be32(&ugeth->p_rx_irq_coalescing_tbl->
- coalescingentry[i].
- interruptcoalescingmaxvalue));
- ugeth_info
- ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_irq_coalescing_tbl->
- coalescingentry[i].interruptcoalescingcounter,
- in_be32(&ugeth->p_rx_irq_coalescing_tbl->
- coalescingentry[i].
- interruptcoalescingcounter));
+ pr_info("RX IRQ coalescing table entry[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i]);
+ pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingmaxvalue,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingmaxvalue));
+ pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingcounter,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingcounter));
}
}
if (ugeth->p_rx_bd_qs_tbl) {
- ugeth_info("RX BD QS tables:");
- ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
+ pr_info("RX BD QS tables:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
- ugeth_info("RX BD QS table[%d]:", i);
- ugeth_info("Base address: 0x%08x",
- (u32) & ugeth->p_rx_bd_qs_tbl[i]);
- ugeth_info
- ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
- in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
- ugeth_info
- ("bdptr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
- in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
- ugeth_info
- ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- in_be32(&ugeth->p_rx_bd_qs_tbl[i].
- externalbdbaseptr));
- ugeth_info
- ("externalbdptr : addr - 0x%08x, val - 0x%08x",
- (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
- in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
- ugeth_info("ucode RX Prefetched BDs:");
- ugeth_info("Base address: 0x%08x",
- (u32)
- qe_muram_addr(in_be32
- (&ugeth->p_rx_bd_qs_tbl[i].
- bdbaseptr)));
+ pr_info("RX BD QS table[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i]);
+ pr_info("bdbaseptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
+ pr_info("bdptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
+ pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].
+ externalbdbaseptr));
+ pr_info("externalbdptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
+ pr_info("ucode RX Prefetched BDs:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)));
mem_disp((u8 *)
qe_muram_addr(in_be32
(&ugeth->p_rx_bd_qs_tbl[i].
@@ -1010,9 +996,9 @@ static void dump_regs(struct ucc_geth_private *ugeth)
}
if (ugeth->p_init_enet_param_shadow) {
int size;
- ugeth_info("Init enet param shadow:");
- ugeth_info("Base address: 0x%08x",
- (u32) ugeth->p_init_enet_param_shadow);
+ pr_info("Init enet param shadow:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32) ugeth->p_init_enet_param_shadow);
mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
sizeof(*ugeth->p_init_enet_param_shadow));
@@ -1392,12 +1378,11 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
struct phy_device *tbiphy;
if (!ug_info->tbi_node)
- ugeth_warn("TBI mode requires that the device "
- "tree specify a tbi-handle\n");
+ pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
tbiphy = of_phy_find_device(ug_info->tbi_node);
if (!tbiphy)
- ugeth_warn("Could not get TBI device\n");
+ pr_warn("Could not get TBI device\n");
value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
@@ -1409,8 +1394,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
if (ret_val != 0) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
- __func__);
+ pr_err("Preamble length must be between 3 and 7 inclusive\n");
return ret_val;
}
@@ -1520,7 +1504,7 @@ static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
/* check if the UCC number is in range. */
if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: ucc_num out of range.", __func__);
+ pr_err("ucc_num out of range\n");
return -EINVAL;
}
@@ -1549,7 +1533,7 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
/* check if the UCC number is in range. */
if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: ucc_num out of range.", __func__);
+ pr_err("ucc_num out of range\n");
return -EINVAL;
}
@@ -1648,7 +1632,7 @@ static void adjust_link(struct net_device *dev)
break;
default:
if (netif_msg_link(ugeth))
- ugeth_warn(
+ pr_warn(
"%s: Ack! Speed (%d) is not 10/100/1000!",
dev->name, phydev->speed);
break;
@@ -2103,8 +2087,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
(uf_info->bd_mem_part == MEM_PART_MURAM))) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: Bad memory partition value.",
- __func__);
+ pr_err("Bad memory partition value\n");
return -EINVAL;
}
@@ -2114,9 +2097,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
(ug_info->bdRingLenRx[i] %
UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
if (netif_msg_probe(ugeth))
- ugeth_err
- ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
- __func__);
+ pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n");
return -EINVAL;
}
}
@@ -2125,9 +2106,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
for (i = 0; i < ug_info->numQueuesTx; i++) {
if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
if (netif_msg_probe(ugeth))
- ugeth_err
- ("%s: Tx BD ring length must be no smaller than 2.",
- __func__);
+ pr_err("Tx BD ring length must be no smaller than 2\n");
return -EINVAL;
}
}
@@ -2136,23 +2115,21 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
if ((uf_info->max_rx_buf_length == 0) ||
(uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
if (netif_msg_probe(ugeth))
- ugeth_err
- ("%s: max_rx_buf_length must be non-zero multiple of 128.",
- __func__);
+ pr_err("max_rx_buf_length must be non-zero multiple of 128\n");
return -EINVAL;
}
/* num Tx queues */
if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: number of tx queues too large.", __func__);
+ pr_err("number of tx queues too large\n");
return -EINVAL;
}
/* num Rx queues */
if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: number of rx queues too large.", __func__);
+ pr_err("number of rx queues too large\n");
return -EINVAL;
}
@@ -2160,10 +2137,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
if (netif_msg_probe(ugeth))
- ugeth_err
- ("%s: VLAN priority table entry must not be"
- " larger than number of Rx queues.",
- __func__);
+ pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
}
}
@@ -2172,18 +2146,14 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
if (netif_msg_probe(ugeth))
- ugeth_err
- ("%s: IP priority table entry must not be"
- " larger than number of Rx queues.",
- __func__);
+ pr_err("IP priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
}
}
if (ug_info->cam && !ug_info->ecamptr) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
- __func__);
+ pr_err("If cam mode is chosen, must supply cam ptr\n");
return -EINVAL;
}
@@ -2191,9 +2161,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
ug_info->rxExtendedFiltering) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: Number of station addresses greater than 1 "
- "not allowed in extended parsing mode.",
- __func__);
+ pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n");
return -EINVAL;
}
@@ -2207,7 +2175,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: Failed to init uccf.", __func__);
+ pr_err("Failed to init uccf\n");
return -ENOMEM;
}
@@ -2222,7 +2190,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
if (!ugeth->ug_regs) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: Failed to ioremap regs.", __func__);
+ pr_err("Failed to ioremap regs\n");
return -ENOMEM;
}
@@ -2273,9 +2241,7 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
}
if (!ugeth->p_tx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate memory for Tx bd rings.",
- __func__);
+ pr_err("Can not allocate memory for Tx bd rings\n");
return -ENOMEM;
}
/* Zero unused end of bd ring, according to spec */
@@ -2293,8 +2259,7 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
if (ugeth->tx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Could not allocate tx_skbuff",
- __func__);
+ pr_err("Could not allocate tx_skbuff\n");
return -ENOMEM;
}
@@ -2353,9 +2318,7 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
}
if (!ugeth->p_rx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate memory for Rx bd rings.",
- __func__);
+ pr_err("Can not allocate memory for Rx bd rings\n");
return -ENOMEM;
}
}
@@ -2369,8 +2332,7 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
if (ugeth->rx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Could not allocate rx_skbuff",
- __func__);
+ pr_err("Could not allocate rx_skbuff\n");
return -ENOMEM;
}
@@ -2438,8 +2400,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
break;
default:
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Bad number of Rx threads value.",
- __func__);
+ pr_err("Bad number of Rx threads value\n");
return -EINVAL;
break;
}
@@ -2462,8 +2423,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
break;
default:
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Bad number of Tx threads value.",
- __func__);
+ pr_err("Bad number of Tx threads value\n");
return -EINVAL;
break;
}
@@ -2512,8 +2472,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
&ug_regs->ipgifg);
if (ret_val != 0) {
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: IPGIFG initialization parameter too large.",
- __func__);
+ pr_err("IPGIFG initialization parameter too large\n");
return ret_val;
}
@@ -2529,8 +2488,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
&ug_regs->hafdup);
if (ret_val != 0) {
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Half Duplex initialization parameter too large.",
- __func__);
+ pr_err("Half Duplex initialization parameter too large\n");
return ret_val;
}
@@ -2567,9 +2525,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
- __func__);
+ pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
return -ENOMEM;
}
ugeth->p_tx_glbl_pram =
@@ -2589,9 +2545,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
- __func__);
+ pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n");
return -ENOMEM;
}
@@ -2618,9 +2572,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
- __func__);
+ pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n");
return -ENOMEM;
}
@@ -2661,9 +2613,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_SCHEDULER_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_scheduler.",
- __func__);
+ pr_err("Can not allocate DPRAM memory for p_scheduler\n");
return -ENOMEM;
}
@@ -2710,10 +2660,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_TX_STATISTICS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for"
- " p_tx_fw_statistics_pram.",
- __func__);
+ pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n");
return -ENOMEM;
}
ugeth->p_tx_fw_statistics_pram =
@@ -2750,9 +2697,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
- __func__);
+ pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
return -ENOMEM;
}
ugeth->p_rx_glbl_pram =
@@ -2771,9 +2716,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
- __func__);
+ pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n");
return -ENOMEM;
}
@@ -2794,9 +2737,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_RX_STATISTICS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for"
- " p_rx_fw_statistics_pram.", __func__);
+ pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n");
return -ENOMEM;
}
ugeth->p_rx_fw_statistics_pram =
@@ -2816,9 +2757,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
+ 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for"
- " p_rx_irq_coalescing_tbl.", __func__);
+ pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n");
return -ENOMEM;
}
@@ -2884,9 +2823,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
- __func__);
+ pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n");
return -ENOMEM;
}
@@ -2961,8 +2898,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
if (ug_info->rxExtendedFiltering) {
if (!ug_info->extendedFilteringChainPointer) {
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Null Extended Filtering Chain Pointer.",
- __func__);
+ pr_err("Null Extended Filtering Chain Pointer\n");
return -EINVAL;
}
@@ -2973,9 +2909,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for"
- " p_exf_glbl_param.", __func__);
+ pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n");
return -ENOMEM;
}
@@ -3020,9 +2954,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
if (!(ugeth->p_init_enet_param_shadow =
kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate memory for"
- " p_UccInitEnetParamShadows.", __func__);
+ pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
return -ENOMEM;
}
/* Zero out *p_init_enet_param_shadow */
@@ -3055,8 +2987,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
(ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Invalid largest External Lookup Key Size.",
- __func__);
+ pr_err("Invalid largest External Lookup Key Size\n");
return -EINVAL;
}
ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
@@ -3081,8 +3012,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
, size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
ug_info->riscRx, 1)) != 0) {
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
- __func__);
+ pr_err("Can not fill p_init_enet_param_shadow\n");
return ret_val;
}
@@ -3096,8 +3026,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
ug_info->riscTx, 0)) != 0) {
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
- __func__);
+ pr_err("Can not fill p_init_enet_param_shadow\n");
return ret_val;
}
@@ -3105,8 +3034,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
for (i = 0; i < ug_info->numQueuesRx; i++) {
if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Can not fill Rx bds with buffers.",
- __func__);
+ pr_err("Can not fill Rx bds with buffers\n");
return ret_val;
}
}
@@ -3115,9 +3043,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
if (IS_ERR_VALUE(init_enet_pram_offset)) {
if (netif_msg_ifup(ugeth))
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
- __func__);
+ pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n");
return -ENOMEM;
}
p_init_enet_pram =
@@ -3266,8 +3192,8 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
(!(bd_status & (R_F | R_L))) ||
(bd_status & R_ERRORS_FATAL)) {
if (netif_msg_rx_err(ugeth))
- ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
- __func__, __LINE__, (u32) skb);
+ pr_err("%d: ERROR!!! skb - 0x%08x\n",
+ __LINE__, (u32)skb);
dev_kfree_skb(skb);
ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
@@ -3290,7 +3216,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
skb = get_new_skb(ugeth, bd);
if (!skb) {
if (netif_msg_rx_err(ugeth))
- ugeth_warn("%s: No Rx Data Buffer", __func__);
+ pr_warn("No Rx Data Buffer\n");
dev->stats.rx_dropped++;
break;
}
@@ -3481,25 +3407,19 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
err = ucc_struct_init(ugeth);
if (err) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Cannot configure internal struct, "
- "aborting.", dev->name);
+ netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n");
goto err;
}
err = ucc_geth_startup(ugeth);
if (err) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Cannot configure net device, aborting.",
- dev->name);
+ netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
goto err;
}
err = adjust_enet_interface(ugeth);
if (err) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Cannot configure net device, aborting.",
- dev->name);
+ netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
goto err;
}
@@ -3516,8 +3436,7 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
if (err) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
+ netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
goto err;
}
@@ -3538,35 +3457,27 @@ static int ucc_geth_open(struct net_device *dev)
/* Test station address */
if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Multicast address used for station "
- "address - is this what you wanted?",
- __func__);
+ netif_err(ugeth, ifup, dev,
+ "Multicast address used for station address - is this what you wanted?\n");
return -EINVAL;
}
err = init_phy(dev);
if (err) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Cannot initialize PHY, aborting.",
- dev->name);
+ netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
return err;
}
err = ucc_geth_init_mac(ugeth);
if (err) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Cannot initialize MAC, aborting.",
- dev->name);
+ netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n");
goto err;
}
err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
0, "UCC Geth", dev);
if (err) {
- if (netif_msg_ifup(ugeth))
- ugeth_err("%s: Cannot get IRQ for net device, aborting.",
- dev->name);
+ netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n");
goto err;
}
@@ -3704,8 +3615,7 @@ static int ucc_geth_resume(struct platform_device *ofdev)
err = ucc_geth_init_mac(ugeth);
if (err) {
- ugeth_err("%s: Cannot initialize MAC, aborting.",
- ndev->name);
+ netdev_err(ndev, "Cannot initialize MAC, aborting\n");
return err;
}
}
@@ -3825,8 +3735,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info = &ugeth_info[ucc_num];
if (ug_info == NULL) {
if (netif_msg_probe(&debug))
- ugeth_err("%s: [%d] Missing additional data!",
- __func__, ucc_num);
+ pr_err("[%d] Missing additional data!\n", ucc_num);
return -ENODEV;
}
@@ -3837,8 +3746,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.rx_clock = qe_clock_source(sprop);
if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
(ug_info->uf_info.rx_clock > QE_CLK24)) {
- printk(KERN_ERR
- "ucc_geth: invalid rx-clock-name property\n");
+ pr_err("invalid rx-clock-name property\n");
return -EINVAL;
}
} else {
@@ -3846,13 +3754,11 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if (!prop) {
/* If both rx-clock-name and rx-clock are missing,
we want to tell people to use rx-clock-name. */
- printk(KERN_ERR
- "ucc_geth: missing rx-clock-name property\n");
+ pr_err("missing rx-clock-name property\n");
return -EINVAL;
}
if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- printk(KERN_ERR
- "ucc_geth: invalid rx-clock propperty\n");
+ pr_err("invalid rx-clock propperty\n");
return -EINVAL;
}
ug_info->uf_info.rx_clock = *prop;
@@ -3863,20 +3769,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.tx_clock = qe_clock_source(sprop);
if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
(ug_info->uf_info.tx_clock > QE_CLK24)) {
- printk(KERN_ERR
- "ucc_geth: invalid tx-clock-name property\n");
+ pr_err("invalid tx-clock-name property\n");
return -EINVAL;
}
} else {
prop = of_get_property(np, "tx-clock", NULL);
if (!prop) {
- printk(KERN_ERR
- "ucc_geth: missing tx-clock-name property\n");
+ pr_err("missing tx-clock-name property\n");
return -EINVAL;
}
if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- printk(KERN_ERR
- "ucc_geth: invalid tx-clock property\n");
+ pr_err("invalid tx-clock property\n");
return -EINVAL;
}
ug_info->uf_info.tx_clock = *prop;
@@ -3949,7 +3852,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
}
if (netif_msg_probe(&debug))
- printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
+ pr_info("UCC%1d at 0x%8x (irq = %d)\n",
ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
ug_info->uf_info.irq);
@@ -3988,8 +3891,8 @@ static int ucc_geth_probe(struct platform_device* ofdev)
err = register_netdev(dev);
if (err) {
if (netif_msg_probe(ugeth))
- ugeth_err("%s: Cannot register net device, aborting.",
- dev->name);
+ pr_err("%s: Cannot register net device, aborting\n",
+ dev->name);
free_netdev(dev);
return err;
}
@@ -4047,7 +3950,7 @@ static int __init ucc_geth_init(void)
int i, ret;
if (netif_msg_drv(&debug))
- printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
+ pr_info(DRV_DESC "\n");
for (i = 0; i < 8; i++)
memcpy(&(ugeth_info[i]), &ugeth_primary_info,
sizeof(ugeth_primary_info));
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 1ebf7128ec04..e79aaf9ae52a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -38,7 +38,7 @@
#include "ucc_geth.h"
-static char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
+static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
"tx-64-frames",
"tx-65-127-frames",
"tx-128-255-frames",
@@ -59,7 +59,7 @@ static char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
"rx-dropped-frames",
};
-static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
"tx-single-collision",
"tx-multiple-collision",
"tx-late-collsion",
@@ -74,7 +74,7 @@ static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
"tx-jumbo-frames",
};
-static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
"rx-crc-errors",
"rx-alignment-errors",
"rx-in-range-length-errors",
@@ -160,8 +160,7 @@ uec_set_pauseparam(struct net_device *netdev,
if (ugeth->phydev->autoneg) {
if (netif_running(netdev)) {
/* FIXME: automatically restart */
- printk(KERN_INFO
- "Please re-open the interface.\n");
+ netdev_info(netdev, "Please re-open the interface\n");
}
} else {
struct ucc_geth_info *ug_info = ugeth->ug_info;
@@ -240,18 +239,18 @@ uec_set_ringparam(struct net_device *netdev,
int queue = 0, ret = 0;
if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) {
- printk("%s: RxBD ring size must be no smaller than %d.\n",
- netdev->name, UCC_GETH_RX_BD_RING_SIZE_MIN);
+ netdev_info(netdev, "RxBD ring size must be no smaller than %d\n",
+ UCC_GETH_RX_BD_RING_SIZE_MIN);
return -EINVAL;
}
if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) {
- printk("%s: RxBD ring size must be multiple of %d.\n",
- netdev->name, UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT);
+ netdev_info(netdev, "RxBD ring size must be multiple of %d\n",
+ UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT);
return -EINVAL;
}
if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) {
- printk("%s: TxBD ring size must be no smaller than %d.\n",
- netdev->name, UCC_GETH_TX_BD_RING_SIZE_MIN);
+ netdev_info(netdev, "TxBD ring size must be no smaller than %d\n",
+ UCC_GETH_TX_BD_RING_SIZE_MIN);
return -EINVAL;
}
@@ -260,8 +259,7 @@ uec_set_ringparam(struct net_device *netdev,
if (netif_running(netdev)) {
/* FIXME: restart automatically */
- printk(KERN_INFO
- "Please re-open the interface.\n");
+ netdev_info(netdev, "Please re-open the interface\n");
}
return ret;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 2418faf2251a..ef46b58cb4e9 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -705,19 +705,7 @@ static struct pcmcia_driver fmvj18x_cs_driver = {
.suspend = fmvj18x_suspend,
.resume = fmvj18x_resume,
};
-
-static int __init init_fmvj18x_cs(void)
-{
- return pcmcia_register_driver(&fmvj18x_cs_driver);
-}
-
-static void __exit exit_fmvj18x_cs(void)
-{
- pcmcia_unregister_driver(&fmvj18x_cs_driver);
-}
-
-module_init(init_fmvj18x_cs);
-module_exit(exit_fmvj18x_cs);
+module_pcmcia_driver(fmvj18x_cs_driver);
/*====================================================================*/
@@ -1003,8 +991,6 @@ static void fjn_rx(struct net_device *dev)
}
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
- pkt_len);
outb(F_SKP_PKT, ioaddr + RX_SKIP);
dev->stats.rx_dropped++;
break;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 1c54e229e3cc..e38816145395 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -798,16 +798,14 @@ static inline int i596_rx(struct net_device *dev)
#ifdef __mc68000__
cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
#endif
- }
- else
+ } else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
+ }
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
- printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
- }
- else {
+ } else {
if (!rx_in_place) {
/* 16 byte align the data fields */
skb_reserve(skb, 2);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f045ea4dc514..d653bac4cfc4 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -715,14 +715,12 @@ static inline int i596_rx(struct net_device *dev)
rbd->v_data = newskb->data;
rbd->b_data = SWAP32(dma_addr);
DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
- } else
+ } else {
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ }
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
- printk(KERN_ERR
- "%s: i596_rx Memory squeeze, dropping packet.\n",
- dev->name);
dev->stats.rx_dropped++;
} else {
if (!rx_in_place) {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 328f47c92e26..90ea0b1673ca 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -402,7 +402,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
- netdev_info(dev, "Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
@@ -432,10 +431,8 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i]) {
- netdev_info(dev, "Not enough memory to allocate skb array\n");
+ if (!skb_arr_rq1[i])
break;
- }
}
/* Ring doorbell */
ehea_update_rq1a(pr->qp, i - 1);
@@ -695,10 +692,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
- if (!skb) {
- netdev_err(dev, "Not enough memory to allocate skb\n");
+ if (!skb)
break;
- }
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
@@ -730,7 +725,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
processed_bytes += skb->len;
if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
- __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ cqe->vlan_tag);
napi_gro_receive(&pr->napi, skb);
} else {
@@ -2115,7 +2111,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -2153,7 +2149,7 @@ out:
return err;
}
-static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -3025,12 +3021,12 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
- dev->hw_features = NETIF_F_SG | NETIF_F_TSO
- | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
- | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
- | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
- | NETIF_F_RXCSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
+ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO |
+ NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
index b16b4828b64d..a559f326bf63 100644
--- a/drivers/net/ethernet/ibm/emac/debug.c
+++ b/drivers/net/ethernet/ibm/emac/debug.c
@@ -245,7 +245,7 @@ static void emac_sysrq_handler(int key)
static struct sysrq_key_op emac_sysrq_op = {
.handler = emac_sysrq_handler,
- .help_msg = "emaC",
+ .help_msg = "emac(c)",
.action_msg = "Show EMAC(s) status",
};
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 1f7ecf57181e..610ed223d1db 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,17 +637,12 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt =
- dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL);
+ mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (mal->bd_virt == NULL) {
- printk(KERN_ERR
- "mal%d: out of memory allocating RX/TX descriptors!\n",
- index);
err = -ENOMEM;
goto fail_unmap;
}
- memset(mal->bd_virt, 0, bd_size);
for (i = 0; i < mal->num_tx_chans; ++i)
set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c859771a9902..302d59401065 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -556,11 +556,9 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
rxq_entries;
adapter->rx_queue.queue_addr =
- dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
- &adapter->rx_queue.queue_dma, GFP_KERNEL);
-
+ dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
+ &adapter->rx_queue.queue_dma, GFP_KERNEL);
if (!adapter->rx_queue.queue_addr) {
- netdev_err(netdev, "unable to allocate rx queue pages\n");
rc = -ENOMEM;
goto err_out;
}
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ec800b093e7e..d2bea3f07c73 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -870,7 +870,7 @@ err_unlock:
}
static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
- void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+ int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
{
struct cb *cb;
unsigned long flags;
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
nic->cbs_avail--;
cb->skb = skb;
+ err = cb_prepare(nic, cb, skb);
+ if (err)
+ goto err_unlock;
+
if (unlikely(!nic->cbs_avail))
err = -ENOSPC;
- cb_prepare(nic, cb, skb);
/* Order is important otherwise we'll be in a race with h/w:
* set S-bit in current first, then clear S-bit in previous. */
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic)
nic->mii.mdio_write = mdio_write;
}
-static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct config *config = &cb->u.config;
u8 *c = (u8 *)config;
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
"[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+ return 0;
}
/*************************************************************************
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
return fw;
}
-static void e100_setup_ucode(struct nic *nic, struct cb *cb,
+static int e100_setup_ucode(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
const struct firmware *fw = (void *)skb;
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb,
cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
cb->command = cpu_to_le16(cb_ucode | cb_el);
+ return 0;
}
static inline int e100_load_ucode_wait(struct nic *nic)
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic)
return err;
}
-static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
+static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_iaaddr);
memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
+ return 0;
}
-static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_dump);
cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
offsetof(struct mem, dump_buf));
+ return 0;
}
static int e100_phy_check_without_mii(struct nic *nic)
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic)
return 0;
}
-static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
struct netdev_hw_addr *ha;
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
ETH_ALEN);
}
+ return 0;
}
static void e100_set_multicast_list(struct net_device *netdev)
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data)
round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
}
-static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
+static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
+ dma_addr_t dma_addr;
cb->command = nic->tx_command;
+ dma_addr = pci_map_single(nic->pdev,
+ skb->data, skb->len, PCI_DMA_TODEVICE);
+ /* If we can't map the skb, have the upper layer try later */
+ if (pci_dma_mapping_error(nic->pdev, dma_addr))
+ return -ENOMEM;
+
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
* testing, ie sending frames with bad CRC.
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
cb->u.tcb.tcb_byte_count = 0;
cb->u.tcb.threshold = nic->tx_threshold;
cb->u.tcb.tbd_count = 1;
- cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
- skb->data, skb->len, PCI_DMA_TODEVICE));
- /* check for mapping failure? */
+ cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
skb_tx_timestamp(skb);
+ return 0;
}
static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 43462d596a4e..82a967c95598 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1020,12 +1020,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
}
- memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = txdr->next_to_clean = 0;
ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
@@ -1053,6 +1052,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
+ ret_val = 4;
+ goto err_nomem;
+ }
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1069,18 +1072,17 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
GFP_KERNEL);
if (!rxdr->buffer_info) {
- ret_val = 4;
+ ret_val = 5;
goto err_nomem;
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!rxdr->desc) {
- ret_val = 5;
+ ret_val = 6;
goto err_nomem;
}
- memset(rxdr->desc, 0, rxdr->size);
rxdr->next_to_use = rxdr->next_to_clean = 0;
rctl = er32(RCTL);
@@ -1101,7 +1103,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
if (!skb) {
- ret_val = 6;
+ ret_val = 7;
goto err_nomem;
}
skb_reserve(skb, NET_IP_ALIGN);
@@ -1110,6 +1112,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data,
E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
+ ret_val = 8;
+ goto err_nomem;
+ }
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 8502c625dbef..59ad007dd5aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -166,8 +166,10 @@ static void e1000_vlan_mode(struct net_device *netdev,
netdev_features_t features);
static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
bool filter_on);
-static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
#ifdef CONFIG_PM
@@ -333,7 +335,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
if (!test_bit(vid, adapter->active_vlans)) {
if (hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
- e1000_vlan_rx_add_vid(netdev, vid);
+ e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
adapter->mng_vlan_id = vid;
} else {
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -341,7 +343,8 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
(vid != old_vid) &&
!test_bit(old_vid, adapter->active_vlans))
- e1000_vlan_rx_kill_vid(netdev, old_vid);
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
+ old_vid);
} else {
adapter->mng_vlan_id = vid;
}
@@ -809,10 +812,10 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
/* Since there is no support for separate Rx/Tx vlan accel
* enable/disable make sure Tx flag is always in same state as Rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -823,7 +826,7 @@ static int e1000_set_features(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
e1000_vlan_mode(netdev, features);
if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
@@ -1058,9 +1061,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac_type >= e1000_82543) {
netdev->hw_features = NETIF_F_SG |
NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_RX;
- netdev->features = NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
}
if ((hw->mac_type >= e1000_82544) &&
@@ -1457,7 +1460,8 @@ static int e1000_close(struct net_device *netdev)
if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
+ adapter->mng_vlan_id);
}
return 0;
@@ -1516,8 +1520,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
- e_err(probe, "Unable to allocate memory for the Tx descriptor "
- "ring\n");
return -ENOMEM;
}
@@ -1707,10 +1709,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL);
-
if (!rxdr->desc) {
- e_err(probe, "Unable to allocate memory for the Rx descriptor "
- "ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
@@ -1729,8 +1728,6 @@ setup_rx_desc_die:
if (!rxdr->desc) {
dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma);
- e_err(probe, "Unable to allocate memory for the Rx "
- "descriptor ring\n");
goto setup_rx_desc_die;
}
@@ -4006,7 +4003,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
if (status & E1000_RXD_STAT_VP) {
u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
napi_gro_receive(&adapter->napi, skb);
}
@@ -4792,7 +4789,7 @@ static void __e1000_vlan_mode(struct e1000_adapter *adapter,
u32 ctrl;
ctrl = er32(CTRL);
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */
ctrl |= E1000_CTRL_VME;
} else {
@@ -4844,7 +4841,8 @@ static void e1000_vlan_mode(struct net_device *netdev,
e1000_irq_enable(adapter);
}
-static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4869,7 +4867,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0;
}
-static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4903,7 +4902,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
e1000_vlan_filter_on_off(adapter, true);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- e1000_vlan_rx_add_vid(adapter->netdev, vid);
+ e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e0991388664c..b71c8502a2b3 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -37,7 +37,9 @@
* "index + 5".
*/
static const u16 e1000_gg82563_cable_length_table[] = {
- 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF
+};
+
#define GG82563_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_gg82563_cable_length_table)
@@ -116,7 +118,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
- udelay(200);
+ usleep_range(200, 400);
/* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -403,17 +405,17 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return -E1000_ERR_PHY;
}
- udelay(200);
+ usleep_range(200, 400);
ret_val = e1000e_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
- udelay(200);
+ usleep_range(200, 400);
} else {
ret_val = e1000e_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
}
e1000_release_phy_80003es2lan(hw);
@@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
- udelay(200);
+ usleep_range(200, 400);
/* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return -E1000_ERR_PHY;
}
- udelay(200);
+ usleep_range(200, 400);
ret_val = e1000e_write_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS &
+ offset, data);
- udelay(200);
+ usleep_range(200, 400);
} else {
ret_val = e1000e_write_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS &
+ offset, data);
}
e1000_release_phy_80003es2lan(hw);
@@ -580,7 +582,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n");
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
@@ -595,7 +597,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
/* Try once more */
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
}
@@ -666,14 +668,12 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
s32 ret_val;
if (hw->phy.media_type == e1000_media_type_copper) {
- ret_val = e1000e_get_speed_and_duplex_copper(hw,
- speed,
- duplex);
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
hw->phy.ops.cfg_on_link_up(hw);
} else {
ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
- speed,
- duplex);
+ speed,
+ duplex);
}
return ret_val;
@@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
if (ret_val)
e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n");
@@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
reg_data = er32(TXDCTL(1));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data);
/* Enable retransmit on late collisions */
@@ -818,13 +818,12 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* default to true to enable the MDIC W/A */
hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET >>
- E1000_KMRNCTRLSTA_OFFSET_SHIFT,
- &i);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
if (!ret_val) {
if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
- E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
}
@@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
- u32 ctrl_ext;
+ u32 reg;
u16 data;
ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
@@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
}
/* Bypass Rx and Tx FIFO's */
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
- E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
- E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
+ data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
if (ret_val)
return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
- &data);
+ reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
if (ret_val)
return ret_val;
data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
- data);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
if (ret_val)
return ret_val;
@@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
- ew32(CTRL_EXT, ctrl_ext);
+ reg = er32(CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+ ew32(CTRL_EXT, reg);
ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
if (ret_val)
@@ -1049,27 +1045,29 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
- 0xFFFF);
+ 0xFFFF);
if (ret_val)
return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- &reg_data);
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- reg_data);
+ reg_data);
if (ret_val)
return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
- &reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
- reg_data);
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
if (ret_val)
return ret_val;
@@ -1096,7 +1094,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
if (hw->phy.media_type == e1000_media_type_copper) {
ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
- &duplex);
+ &duplex);
if (ret_val)
return ret_val;
@@ -1125,9 +1123,10 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
u16 reg_data, reg_data2;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
if (ret_val)
return ret_val;
@@ -1171,9 +1170,10 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
u32 i = 0;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
if (ret_val)
return ret_val;
@@ -1220,7 +1220,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1255,7 +1255,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
+ E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = {
.phy_ops = &es2_phy_ops,
.nvm_ops = &es2_nvm_ops,
};
-
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 2faffbde179e..7380442a3829 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -184,7 +184,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
default:
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
@@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
return ret_val;
phy->id = (u32)(phy_id << 16);
- udelay(20);
+ usleep_range(20, 40);
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
- udelay(50);
+ usleep_range(50, 100);
i++;
}
@@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
- udelay(50);
+ usleep_range(50, 100);
}
if (i == fw_timeout) {
@@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm);
}
+
/**
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
* @hw: pointer to the HW structure
@@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
}
for (i = 0; i < words; i++) {
- eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
- ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
- E1000_NVM_RW_REG_START;
+ eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
+ ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
+ E1000_NVM_RW_REG_START);
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
if (ret_val)
@@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
s32 timeout = PHY_CFG_TIMEOUT;
while (timeout) {
- if (er32(EEMNGCTL) &
- E1000_NVM_CFG_DONE_PORT_0)
+ if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
break;
usleep_range(1000, 2000);
timeout--;
@@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
}
if (hw->nvm.type == e1000_nvm_flash_hw) {
- udelay(10);
+ usleep_range(10, 20);
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext);
@@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
if (ret_val)
e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n");
@@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB |
- E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
@@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
break;
default:
reg_data = er32(TXDCTL(1));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB |
- E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB |
+ E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data);
break;
}
@@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
status = er32(STATUS);
er32(RXCW);
/* SYNCH bit and IV bit are sticky */
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
@@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* the IV bit and restart Autoneg
*/
for (i = 0; i < AN_RETRY_COUNT; i++) {
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) &&
(rxcw & E1000_RXCW_C))
@@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = {
.phy_ops = &e82_phy_ops_bm,
.nvm_ops = &e82571_nvm_ops,
};
-
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 85cb1a3b7cd4..08e24dc3dc0e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -44,6 +44,8 @@
#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAC_MASK_82574 0x01F00000
+#define E1000_IVAR_INT_ALLOC_VALID 0x8
+
/* Manageability Operation Mode mask */
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index fc3a4fe1ac71..351c94a0cf74 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -66,7 +66,7 @@
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
-#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
@@ -216,6 +216,8 @@
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
@@ -234,17 +236,17 @@
#define E1000_STATUS_FUNC_SHIFT 2
#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
-
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
@@ -311,6 +313,7 @@
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
/* Receive Checksum Control */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
@@ -400,7 +403,8 @@
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
-#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED 0x80000000
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
@@ -583,13 +587,13 @@
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
-#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */
-#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
-#define E1000_NVM_RW_REG_START 1 /* Start operation */
-#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
-#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
-#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
-#define E1000_FLASH_UPDATES 2000
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */
+#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
@@ -785,6 +789,7 @@
GG82563_REG(194, 18) /* Inband Control */
/* MDI Control */
+#define E1000_MDIC_REG_MASK 0x001F0000
#define E1000_MDIC_REG_SHIFT 16
#define E1000_MDIC_PHY_SHIFT 21
#define E1000_MDIC_OP_WRITE 0x04000000
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index fcc758138b8a..82f1c84282db 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -46,6 +46,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
#include <linux/mii.h>
+#include <linux/mdio.h>
#include "hw.h"
struct e1000_info;
@@ -61,7 +62,6 @@ struct e1000_info;
#define e_notice(format, arg...) \
netdev_notice(adapter->netdev, format, ## arg)
-
/* Interrupt modes, as used by the IntMode parameter */
#define E1000E_INT_MODE_LEGACY 0
#define E1000E_INT_MODE_MSI 1
@@ -239,9 +239,8 @@ struct e1000_adapter {
u16 tx_itr;
u16 rx_itr;
- /* Tx */
- struct e1000_ring *tx_ring /* One per active queue */
- ____cacheline_aligned_in_smp;
+ /* Tx - one ring per active queue */
+ struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
u32 tx_fifo_limit;
struct napi_struct napi;
@@ -352,6 +351,8 @@ struct e1000_adapter {
struct timecounter tc;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
+
+ u16 eee_advert;
};
struct e1000_info {
@@ -487,8 +488,8 @@ extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
extern void e1000e_free_rx_resources(struct e1000_ring *ring);
extern void e1000e_free_tx_resources(struct e1000_ring *ring);
extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64
- *stats);
+ struct rtnl_link_stats64
+ *stats);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
@@ -558,12 +559,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
return hw->nvm.ops.update(hw);
}
-static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
return hw->nvm.ops.read(hw, offset, words, data);
}
-static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
return hw->nvm.ops.write(hw, offset, words, data);
}
@@ -597,7 +600,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw)
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
- udelay(50);
+ usleep_range(50, 100);
return i;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index f91a8f3f9d48..7c8ca658d553 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -35,12 +35,11 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
-#include <linux/mdio.h>
#include <linux/pm_runtime.h>
#include "e1000.h"
-enum {NETDEV_STATS, E1000_STATS};
+enum { NETDEV_STATS, E1000_STATS };
struct e1000_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -121,6 +120,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
"Interrupt test (offline)", "Loopback test (offline)",
"Link test (on/offline)"
};
+
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
static int e1000_get_settings(struct net_device *netdev,
@@ -197,8 +197,7 @@ static int e1000_get_settings(struct net_device *netdev,
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if ((hw->phy.media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
- ETH_TP_MDI;
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@@ -224,8 +223,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
- spd != SPEED_1000 &&
- dplx != DUPLEX_FULL) {
+ (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) {
goto err_inval;
}
@@ -298,12 +296,10 @@ static int e1000_set_settings(struct net_device *netdev,
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber)
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg;
+ ADVERTISED_FIBRE | ADVERTISED_Autoneg;
else
hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ ADVERTISED_TP | ADVERTISED_Autoneg;
ecmd->advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
@@ -346,7 +342,7 @@ static void e1000_get_pauseparam(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
pause->autoneg =
- (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
if (hw->fc.current_mode == e1000_fc_rx_pause) {
pause->rx_pause = 1;
@@ -435,7 +431,7 @@ static void e1000_get_regs(struct net_device *netdev,
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
- adapter->pdev->device;
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -503,8 +499,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
@@ -515,7 +511,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
} else {
for (i = 0; i < last_word - first_word + 1; i++) {
ret_val = e1000_read_nvm(hw, first_word + i, 1,
- &eeprom_buff[i]);
+ &eeprom_buff[i]);
if (ret_val)
break;
}
@@ -553,7 +549,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
- if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
+ if (eeprom->magic !=
+ (adapter->pdev->vendor | (adapter->pdev->device << 16)))
return -EFAULT;
if (adapter->flags & FLAG_READ_ONLY_NVM)
@@ -579,7 +576,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
ret_val = e1000_read_nvm(hw, last_word, 1,
- &eeprom_buff[last_word - first_word]);
+ &eeprom_buff[last_word - first_word]);
if (ret_val)
goto out;
@@ -618,8 +615,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000e_driver_name,
- sizeof(drvinfo->driver));
+ strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, e1000e_driver_version,
sizeof(drvinfo->version));
@@ -627,10 +623,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
* PCI-E controllers
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d-%d",
- (adapter->eeprom_vers & 0xF000) >> 12,
- (adapter->eeprom_vers & 0x0FF0) >> 4,
- (adapter->eeprom_vers & 0x000F));
+ "%d.%d-%d",
+ (adapter->eeprom_vers & 0xF000) >> 12,
+ (adapter->eeprom_vers & 0x0FF0) >> 4,
+ (adapter->eeprom_vers & 0x000F));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
@@ -756,7 +752,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
{
u32 pat, val;
static const u32 test[] = {
- 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+ };
for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
(test[pat] & write));
@@ -786,6 +783,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
}
return 0;
}
+
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
do { \
if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
@@ -813,16 +811,16 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
u32 wlock_mac = 0;
/* The status register is Read Only, so a write should fail.
- * Some bits that get toggled are ignored.
+ * Some bits that get toggled are ignored. There are several bits
+ * on newer hardware that are r/w.
*/
switch (mac->type) {
- /* there are several bits on newer hardware that are r/w */
case e1000_82571:
case e1000_82572:
case e1000_80003es2lan:
toggle = 0x7FFFF3FF;
break;
- default:
+ default:
toggle = 0x7FFFF033;
break;
}
@@ -928,7 +926,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16) NVM_SUM) && !(*data))
+ if ((checksum != (u16)NVM_SUM) && !(*data))
*data = 2;
return *data;
@@ -936,7 +934,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
{
- struct net_device *netdev = (struct net_device *) data;
+ struct net_device *netdev = (struct net_device *)data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -969,8 +967,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
netdev)) {
shared_int = 0;
- } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name,
+ netdev)) {
*data = 1;
ret_val = -1;
goto out;
@@ -1080,28 +1078,33 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
struct e1000_ring *tx_ring = &adapter->test_tx_ring;
struct e1000_ring *rx_ring = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
+ struct e1000_buffer *buffer_info;
int i;
if (tx_ring->desc && tx_ring->buffer_info) {
for (i = 0; i < tx_ring->count; i++) {
- if (tx_ring->buffer_info[i].dma)
+ buffer_info = &tx_ring->buffer_info[i];
+
+ if (buffer_info->dma)
dma_unmap_single(&pdev->dev,
- tx_ring->buffer_info[i].dma,
- tx_ring->buffer_info[i].length,
- DMA_TO_DEVICE);
- if (tx_ring->buffer_info[i].skb)
- dev_kfree_skb(tx_ring->buffer_info[i].skb);
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ if (buffer_info->skb)
+ dev_kfree_skb(buffer_info->skb);
}
}
if (rx_ring->desc && rx_ring->buffer_info) {
for (i = 0; i < rx_ring->count; i++) {
- if (rx_ring->buffer_info[i].dma)
+ buffer_info = &rx_ring->buffer_info[i];
+
+ if (buffer_info->dma)
dma_unmap_single(&pdev->dev,
- rx_ring->buffer_info[i].dma,
- 2048, DMA_FROM_DEVICE);
- if (rx_ring->buffer_info[i].skb)
- dev_kfree_skb(rx_ring->buffer_info[i].skb);
+ buffer_info->dma,
+ 2048, DMA_FROM_DEVICE);
+ if (buffer_info->skb)
+ dev_kfree_skb(buffer_info->skb);
}
}
@@ -1138,8 +1141,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->count = E1000_DEFAULT_TXD;
tx_ring->buffer_info = kcalloc(tx_ring->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL);
+ sizeof(struct e1000_buffer), GFP_KERNEL);
if (!tx_ring->buffer_info) {
ret_val = 1;
goto err_nomem;
@@ -1156,8 +1158,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
- ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
+ ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
ew32(TDH(0), 0);
ew32(TDT(0), 0);
@@ -1179,8 +1181,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev,
tx_ring->buffer_info[i].dma)) {
ret_val = 4;
@@ -1200,8 +1202,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->count = E1000_DEFAULT_RXD;
rx_ring->buffer_info = kcalloc(rx_ring->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL);
+ sizeof(struct e1000_buffer), GFP_KERNEL);
if (!rx_ring->buffer_info) {
ret_val = 5;
goto err_nomem;
@@ -1220,16 +1221,16 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rctl = er32(RCTL);
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
ew32(RCTL, rctl & ~E1000_RCTL_EN);
- ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
- ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
+ ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF));
+ ew32(RDBAH(0), ((u64)rx_ring->dma >> 32));
ew32(RDLEN(0), rx_ring->size);
ew32(RDH(0), 0);
ew32(RDT(0), 0);
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
- E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
- E1000_RCTL_SBP | E1000_RCTL_SECRC |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
+ E1000_RCTL_SBP | E1000_RCTL_SECRC |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
ew32(RCTL, rctl);
for (i = 0; i < rx_ring->count; i++) {
@@ -1244,8 +1245,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, 2048,
- DMA_FROM_DEVICE);
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev,
rx_ring->buffer_info[i].dma)) {
ret_val = 8;
@@ -1296,7 +1297,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl_reg);
e1e_flush();
- udelay(500);
+ usleep_range(500, 1000);
return 0;
}
@@ -1322,7 +1323,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
/* Assert SW reset for above settings to take effect */
hw->phy.ops.commit(hw);
- mdelay(1);
+ usleep_range(1000, 2000);
/* Force Full Duplex */
e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
@@ -1363,7 +1364,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
/* force 1000, set loopback */
e1e_wphy(hw, MII_BMCR, 0x4140);
- mdelay(250);
+ msleep(250);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = er32(CTRL);
@@ -1395,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
if (hw->phy.type == e1000_phy_m88)
e1000_phy_disable_receiver(adapter);
- udelay(500);
+ usleep_range(500, 1000);
return 0;
}
@@ -1431,8 +1432,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* special write to serdes control register to enable SerDes analog
* loopback
*/
-#define E1000_SERDES_LB_ON 0x410
- ew32(SCTL, E1000_SERDES_LB_ON);
+ ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
e1e_flush();
usleep_range(10000, 20000);
@@ -1526,8 +1526,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_82572:
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
-#define E1000_SERDES_LB_OFF 0x400
- ew32(SCTL, E1000_SERDES_LB_OFF);
+ ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
e1e_flush();
usleep_range(10000, 20000);
break;
@@ -1564,7 +1563,7 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb,
frame_size &= ~1;
if (*(skb->data + 3) == 0xFF)
if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
return 0;
return 13;
}
@@ -1575,6 +1574,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
struct e1000_ring *rx_ring = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
int i, j, k, l;
int lc;
int good_cnt;
@@ -1595,14 +1595,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
k = 0;
l = 0;
- for (j = 0; j <= lc; j++) { /* loop count loop */
- for (i = 0; i < 64; i++) { /* send the packets */
- e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
- 1024);
+ /* loop count loop */
+ for (j = 0; j <= lc; j++) {
+ /* send the packets */
+ for (i = 0; i < 64; i++) {
+ buffer_info = &tx_ring->buffer_info[k];
+
+ e1000_create_lbtest_frame(buffer_info->skb, 1024);
dma_sync_single_for_device(&pdev->dev,
- tx_ring->buffer_info[k].dma,
- tx_ring->buffer_info[k].length,
- DMA_TO_DEVICE);
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
k++;
if (k == tx_ring->count)
k = 0;
@@ -1612,13 +1615,16 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
- do { /* receive the sent packets */
+ /* receive the sent packets */
+ do {
+ buffer_info = &rx_ring->buffer_info[l];
+
dma_sync_single_for_cpu(&pdev->dev,
- rx_ring->buffer_info[l].dma, 2048,
- DMA_FROM_DEVICE);
+ buffer_info->dma, 2048,
+ DMA_FROM_DEVICE);
- ret_val = e1000_check_lbtest_frame(
- rx_ring->buffer_info[l].skb, 1024);
+ ret_val = e1000_check_lbtest_frame(buffer_info->skb,
+ 1024);
if (!ret_val)
good_cnt++;
l++;
@@ -1637,7 +1643,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 14; /* error code for time out error */
break;
}
- } /* end loop count loop */
+ }
return ret_val;
}
@@ -1696,7 +1702,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
/* On some Phy/switch combinations, link establishment
* can take a few seconds more than expected.
*/
- msleep(5000);
+ msleep_interruptible(5000);
if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
@@ -1980,12 +1986,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
- p = (char *) &net_stats +
- e1000_gstrings_stats[i].stat_offset;
+ p = (char *)&net_stats +
+ e1000_gstrings_stats[i].stat_offset;
break;
case E1000_STATS:
- p = (char *) adapter +
- e1000_gstrings_stats[i].stat_offset;
+ p = (char *)adapter +
+ e1000_gstrings_stats[i].stat_offset;
break;
default:
data[i] = 0;
@@ -1993,7 +1999,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
}
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
@@ -2069,23 +2075,20 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u16 cap_addr, adv_addr, lpa_addr, pcs_stat_addr, phy_data, lpi_ctrl;
- u32 status, ret_val;
+ u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data;
+ u32 ret_val;
- if (!(adapter->flags & FLAG_IS_ICH) ||
- !(adapter->flags2 & FLAG2_HAS_EEE))
+ if (!(adapter->flags2 & FLAG2_HAS_EEE))
return -EOPNOTSUPP;
switch (hw->phy.type) {
case e1000_phy_82579:
cap_addr = I82579_EEE_CAPABILITY;
- adv_addr = I82579_EEE_ADVERTISEMENT;
lpa_addr = I82579_EEE_LP_ABILITY;
pcs_stat_addr = I82579_EEE_PCS_STATUS;
break;
case e1000_phy_i217:
cap_addr = I217_EEE_CAPABILITY;
- adv_addr = I217_EEE_ADVERTISEMENT;
lpa_addr = I217_EEE_LP_ABILITY;
pcs_stat_addr = I217_EEE_PCS_STATUS;
break;
@@ -2104,10 +2107,7 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
/* EEE Advertised */
- ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &phy_data);
- if (ret_val)
- goto release;
- edata->advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
/* EEE Link Partner Advertised */
ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
@@ -2125,25 +2125,11 @@ release:
if (ret_val)
return -ENODATA;
- e1e_rphy(hw, I82579_LPI_CTRL, &lpi_ctrl);
- status = er32(STATUS);
-
/* Result of the EEE auto negotiation - there is no register that
* has the status of the EEE negotiation so do a best-guess based
- * on whether both Tx and Rx LPI indications have been received or
- * base it on the link speed, the EEE advertised speeds on both ends
- * and the speeds on which EEE is enabled locally.
+ * on whether Tx or Rx LPI indications have been received.
*/
- if (((phy_data & E1000_EEE_TX_LPI_RCVD) &&
- (phy_data & E1000_EEE_RX_LPI_RCVD)) ||
- ((status & E1000_STATUS_SPEED_100) &&
- (edata->advertised & ADVERTISED_100baseT_Full) &&
- (edata->lp_advertised & ADVERTISED_100baseT_Full) &&
- (lpi_ctrl & I82579_LPI_CTRL_100_ENABLE)) ||
- ((status & E1000_STATUS_SPEED_1000) &&
- (edata->advertised & ADVERTISED_1000baseT_Full) &&
- (edata->lp_advertised & ADVERTISED_1000baseT_Full) &&
- (lpi_ctrl & I82579_LPI_CTRL_1000_ENABLE)))
+ if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD))
edata->eee_active = true;
edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
@@ -2160,19 +2146,10 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
struct ethtool_eee eee_curr;
s32 ret_val;
- if (!(adapter->flags & FLAG_IS_ICH) ||
- !(adapter->flags2 & FLAG2_HAS_EEE))
- return -EOPNOTSUPP;
-
ret_val = e1000e_get_eee(netdev, &eee_curr);
if (ret_val)
return ret_val;
- if (eee_curr.advertised != edata->advertised) {
- e_err("Setting EEE advertisement is not supported\n");
- return -EINVAL;
- }
-
if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
e_err("Setting EEE tx-lpi is not supported\n");
return -EINVAL;
@@ -2183,16 +2160,21 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (hw->dev_spec.ich8lan.eee_disable != !edata->eee_enabled) {
- hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
-
- /* reset the link */
- if (netif_running(netdev))
- e1000e_reinit_locked(adapter);
- else
- e1000e_reset(adapter);
+ if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
+ return -EINVAL;
}
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+
+ hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
+
+ /* reset the link */
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1e6b889aee87..84850f7a23e4 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -167,7 +167,7 @@ enum e1000_1000t_rx_status {
e1000_1000t_rx_status_undefined = 0xFF
};
-enum e1000_rev_polarity{
+enum e1000_rev_polarity {
e1000_rev_polarity_normal = 0,
e1000_rev_polarity_reversed,
e1000_rev_polarity_undefined = 0xFF
@@ -545,7 +545,7 @@ struct e1000_mac_info {
u16 mta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
- #define MAX_MTA_REG 128
+#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 121a865c7fbd..ad9d8f2dd868 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -61,15 +61,15 @@
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
struct ich8_hsfsts {
- u16 flcdone :1; /* bit 0 Flash Cycle Done */
- u16 flcerr :1; /* bit 1 Flash Cycle Error */
- u16 dael :1; /* bit 2 Direct Access error Log */
- u16 berasesz :2; /* bit 4:3 Sector Erase Size */
- u16 flcinprog :1; /* bit 5 flash cycle in Progress */
- u16 reserved1 :2; /* bit 13:6 Reserved */
- u16 reserved2 :6; /* bit 13:6 Reserved */
- u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
- u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
+ u16 flcdone:1; /* bit 0 Flash Cycle Done */
+ u16 flcerr:1; /* bit 1 Flash Cycle Error */
+ u16 dael:1; /* bit 2 Direct Access error Log */
+ u16 berasesz:2; /* bit 4:3 Sector Erase Size */
+ u16 flcinprog:1; /* bit 5 flash cycle in Progress */
+ u16 reserved1:2; /* bit 13:6 Reserved */
+ u16 reserved2:6; /* bit 13:6 Reserved */
+ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
+ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
} hsf_status;
u16 regval;
};
@@ -78,11 +78,11 @@ union ich8_hws_flash_status {
/* Offset 06h FLCTL */
union ich8_hws_flash_ctrl {
struct ich8_hsflctl {
- u16 flcgo :1; /* 0 Flash Cycle Go */
- u16 flcycle :2; /* 2:1 Flash Cycle */
- u16 reserved :5; /* 7:3 Reserved */
- u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
- u16 flockdn :6; /* 15:10 Reserved */
+ u16 flcgo:1; /* 0 Flash Cycle Go */
+ u16 flcycle:2; /* 2:1 Flash Cycle */
+ u16 reserved:5; /* 7:3 Reserved */
+ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
+ u16 flockdn:6; /* 15:10 Reserved */
} hsf_ctrl;
u16 regval;
};
@@ -90,10 +90,10 @@ union ich8_hws_flash_ctrl {
/* ICH Flash Region Access Permissions */
union ich8_hws_flash_regacc {
struct ich8_flracc {
- u32 grra :8; /* 0:7 GbE region Read Access */
- u32 grwa :8; /* 8:15 GbE region Write Access */
- u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
- u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
+ u32 grra:8; /* 0:7 GbE region Read Access */
+ u32 grwa:8; /* 8:15 GbE region Write Access */
+ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
+ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
} hsf_flregacc;
u16 regval;
};
@@ -142,6 +142,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -312,7 +313,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
ew32(CTRL, mac_reg);
e1e_flush();
- udelay(10);
+ usleep_range(10, 20);
mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
ew32(CTRL, mac_reg);
e1e_flush();
@@ -548,8 +549,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* find total size of the NVM, then cut in half since the total
* size represents two separate NVM banks.
*/
- nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
- << FLASH_SECTOR_ADDR_SHIFT;
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
nvm->flash_bank_size /= 2;
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
@@ -636,6 +637,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_pch_lpt) {
mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch_lpt;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_pch_lpt;
}
/* Enable PCS Lock-loss workaround for ICH8 */
@@ -692,7 +695,7 @@ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
*
* Assumes the SW/FW/HW Semaphore is already acquired.
**/
-static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
{
return __e1000_access_emi_reg_locked(hw, addr, &data, false);
}
@@ -709,11 +712,22 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
s32 ret_val;
- u16 lpi_ctrl;
+ u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
- if ((hw->phy.type != e1000_phy_82579) &&
- (hw->phy.type != e1000_phy_i217))
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ lpa = I82579_EEE_LP_ABILITY;
+ pcs_status = I82579_EEE_PCS_STATUS;
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ break;
+ case e1000_phy_i217:
+ lpa = I217_EEE_LP_ABILITY;
+ pcs_status = I217_EEE_PCS_STATUS;
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ break;
+ default:
return 0;
+ }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -728,34 +742,24 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
/* Enable EEE if not disabled by user */
if (!dev_spec->eee_disable) {
- u16 lpa, pcs_status, data;
-
/* Save off link partner's EEE ability */
- switch (hw->phy.type) {
- case e1000_phy_82579:
- lpa = I82579_EEE_LP_ABILITY;
- pcs_status = I82579_EEE_PCS_STATUS;
- break;
- case e1000_phy_i217:
- lpa = I217_EEE_LP_ABILITY;
- pcs_status = I217_EEE_PCS_STATUS;
- break;
- default:
- ret_val = -E1000_ERR_PHY;
- goto release;
- }
ret_val = e1000_read_emi_reg_locked(hw, lpa,
&dev_spec->eee_lp_ability);
if (ret_val)
goto release;
+ /* Read EEE advertisement */
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
+ if (ret_val)
+ goto release;
+
/* Enable EEE only for speeds in which the link partner is
- * EEE capable.
+ * EEE capable and for which we advertise EEE.
*/
- if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
- if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
e1e_rphy_locked(hw, MII_LPA, &data);
if (data & LPA_100FULL)
lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
@@ -767,13 +771,13 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
dev_spec->eee_lp_ability &=
~I82579_EEE_100_SUPPORTED;
}
-
- /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
- ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
- if (ret_val)
- goto release;
}
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (ret_val)
+ goto release;
+
ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
release:
hw->phy.ops.release(hw);
@@ -835,6 +839,94 @@ release:
}
/**
+ * e1000_platform_pm_pch_lpt - Set platform power management values
+ * @hw: pointer to the HW structure
+ * @link: bool indicating link status
+ *
+ * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
+ * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
+ * when link is up (which must not exceed the maximum latency supported
+ * by the platform), otherwise specify there is no LTR requirement.
+ * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
+ * latencies in the LTR Extended Capability Structure in the PCIe Extended
+ * Capability register set, on this device LTR is set by writing the
+ * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
+ * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
+ * message to the PMC.
+ **/
+static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
+{
+ u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
+ link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
+ u16 lat_enc = 0; /* latency encoded */
+
+ if (link) {
+ u16 speed, duplex, scale = 0;
+ u16 max_snoop, max_nosnoop;
+ u16 max_ltr_enc; /* max LTR latency encoded */
+ s64 lat_ns; /* latency (ns) */
+ s64 value;
+ u32 rxa;
+
+ if (!hw->adapter->max_frame_size) {
+ e_dbg("max_frame_size not set.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
+ if (!speed) {
+ e_dbg("Speed not set.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Rx Packet Buffer Allocation size (KB) */
+ rxa = er32(PBA) & E1000_PBA_RXA_MASK;
+
+ /* Determine the maximum latency tolerated by the device.
+ *
+ * Per the PCIe spec, the tolerated latencies are encoded as
+ * a 3-bit encoded scale (only 0-5 are valid) multiplied by
+ * a 10-bit value (0-1023) to provide a range from 1 ns to
+ * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
+ * 1=2^5ns, 2=2^10ns,...5=2^25ns.
+ */
+ lat_ns = ((s64)rxa * 1024 -
+ (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
+ if (lat_ns < 0)
+ lat_ns = 0;
+ else
+ do_div(lat_ns, speed);
+
+ value = lat_ns;
+ while (value > PCI_LTR_VALUE_MASK) {
+ scale++;
+ value = DIV_ROUND_UP(value, (1 << 5));
+ }
+ if (scale > E1000_LTRV_SCALE_MAX) {
+ e_dbg("Invalid LTR latency scale %d\n", scale);
+ return -E1000_ERR_CONFIG;
+ }
+ lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
+
+ /* Determine the maximum latency tolerated by the platform */
+ pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
+ &max_snoop);
+ pci_read_config_word(hw->adapter->pdev,
+ E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
+ max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
+
+ if (lat_enc > max_ltr_enc)
+ lat_enc = max_ltr_enc;
+ }
+
+ /* Set Snoop and No-Snoop latencies the same */
+ reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
+ ew32(LTRV, reg);
+
+ return 0;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -871,6 +963,34 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+ /* When connected at 10Mbps half-duplex, 82579 parts are excessively
+ * aggressive resulting in many collisions. To avoid this, increase
+ * the IPG and reduce Rx latency in the PHY.
+ */
+ if ((hw->mac.type == e1000_pch2lan) && link) {
+ u32 reg;
+ reg = er32(STATUS);
+ if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+ reg = er32(TIPG);
+ reg &= ~E1000_TIPG_IPGT_MASK;
+ reg |= 0xFF;
+ ew32(TIPG, reg);
+
+ /* Reduce Rx latency in analog PHY */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
+
+ hw->phy.ops.release(hw);
+
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
/* Work-around I218 hang issue */
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
@@ -879,6 +999,15 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Set platform power management values for
+ * Latency Tolerance Reporting (LTR)
+ */
+ ret_val = e1000_platform_pm_pch_lpt(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
@@ -1002,10 +1131,6 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
- /* Disable EEE by default until IEEE802.3az spec is finalized */
- if (adapter->flags2 & FLAG2_HAS_EEE)
- adapter->hw.dev_spec.ich8lan.eee_disable = true;
-
return 0;
}
@@ -1134,9 +1259,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
- return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
- ((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+ return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
}
/**
@@ -1153,7 +1278,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
fwsm = er32(FWSM);
return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
- (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@@ -1440,8 +1565,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
word_addr = (u16)(cnf_base_addr << 1);
for (i = 0; i < cnf_size; i++) {
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
- &reg_data);
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
if (ret_val)
goto release;
@@ -1501,13 +1625,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
- status_reg &= BM_CS_STATUS_LINK_UP |
- BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_MASK;
+ status_reg &= (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
if (status_reg == (BM_CS_STATUS_LINK_UP |
- BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_1000))
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
k1_enable = false;
}
@@ -1516,13 +1640,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
- status_reg &= HV_M_STATUS_LINK_UP |
- HV_M_STATUS_AUTONEG_COMPLETE |
- HV_M_STATUS_SPEED_MASK;
+ status_reg &= (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_MASK);
if (status_reg == (HV_M_STATUS_LINK_UP |
- HV_M_STATUS_AUTONEG_COMPLETE |
- HV_M_STATUS_SPEED_1000))
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_1000))
k1_enable = false;
}
@@ -1579,7 +1703,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
if (ret_val)
return ret_val;
- udelay(20);
+ usleep_range(20, 40);
ctrl_ext = er32(CTRL_EXT);
ctrl_reg = er32(CTRL);
@@ -1589,11 +1713,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
e1e_flush();
- udelay(20);
+ usleep_range(20, 40);
ew32(CTRL, ctrl_reg);
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
- udelay(20);
+ usleep_range(20, 40);
return 0;
}
@@ -1667,7 +1791,6 @@ release:
return ret_val;
}
-
/**
* e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
* @hw: pointer to the HW structure
@@ -1834,7 +1957,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
* SHRAL/H) and initial CRC values to the MAC
*/
for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
- u8 mac_addr[ETH_ALEN] = {0};
+ u8 mac_addr[ETH_ALEN] = { 0 };
u32 addr_high, addr_low;
addr_high = er32(RAH(i));
@@ -1865,8 +1988,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
ew32(RCTL, mac_reg);
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_CTRL_OFFSET,
- &data);
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
if (ret_val)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
@@ -1875,8 +1998,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_HD_CTRL,
- &data);
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
if (ret_val)
return ret_val;
data &= ~(0xF << 8);
@@ -1923,8 +2046,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
ew32(RCTL, mac_reg);
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_CTRL_OFFSET,
- &data);
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
if (ret_val)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
@@ -1933,8 +2056,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_HD_CTRL,
- &data);
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
if (ret_val)
return ret_val;
data &= ~(0xF << 8);
@@ -2100,7 +2223,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
do {
data = er32(STATUS);
data &= E1000_STATUS_LAN_INIT_DONE;
- udelay(100);
+ usleep_range(100, 200);
} while ((!data) && --loop);
/* If basic configuration is incomplete before the above loop
@@ -2445,7 +2568,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
/* Check bank 0 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
- &sig_byte);
+ &sig_byte);
if (ret_val)
return ret_val;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2456,8 +2579,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
/* Check bank 1 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
- bank1_offset,
- &sig_byte);
+ bank1_offset,
+ &sig_byte);
if (ret_val)
return ret_val;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2510,8 +2633,8 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
ret_val = 0;
for (i = 0; i < words; i++) {
- if (dev_spec->shadow_ram[offset+i].modified) {
- data[i] = dev_spec->shadow_ram[offset+i].value;
+ if (dev_spec->shadow_ram[offset + i].modified) {
+ data[i] = dev_spec->shadow_ram[offset + i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw,
act_offset + i,
@@ -2696,8 +2819,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
- hw->nvm.flash_base_addr;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
do {
udelay(1);
@@ -2714,8 +2837,9 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_READ_COMMAND_TIMEOUT);
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
/* Check if FCERR is set to 1, if set to 1, clear it
* and try the whole sequence a few more times, else
@@ -2774,8 +2898,8 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
nvm->ops.acquire(hw);
for (i = 0; i < words; i++) {
- dev_spec->shadow_ram[offset+i].modified = true;
- dev_spec->shadow_ram[offset+i].value = data[i];
+ dev_spec->shadow_ram[offset + i].modified = true;
+ dev_spec->shadow_ram[offset + i].value = data[i];
}
nvm->ops.release(hw);
@@ -2844,8 +2968,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
data = dev_spec->shadow_ram[i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw, i +
- old_bank_offset,
- &data);
+ old_bank_offset,
+ &data);
if (ret_val)
break;
}
@@ -2863,7 +2987,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
/* Convert offset to bytes. */
act_offset = (i + new_bank_offset) << 1;
- udelay(100);
+ usleep_range(100, 200);
/* Write the bytes to the new bank. */
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset,
@@ -2871,10 +2995,10 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
break;
- udelay(100);
+ usleep_range(100, 200);
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
- act_offset + 1,
- (u8)(data >> 8));
+ act_offset + 1,
+ (u8)(data >> 8));
if (ret_val)
break;
}
@@ -3050,8 +3174,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
- hw->nvm.flash_base_addr;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
do {
udelay(1);
@@ -3062,7 +3186,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
- hsflctl.hsf_ctrl.fldbcount = size -1;
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
@@ -3078,8 +3202,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
/* check if FCERR is set to 1 , if set to 1, clear it
* and try the whole sequence a few more times else done
*/
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
if (!ret_val)
break;
@@ -3138,7 +3263,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
for (program_retries = 0; program_retries < 100; program_retries++) {
e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
- udelay(100);
+ usleep_range(100, 200);
ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
if (!ret_val)
break;
@@ -3209,8 +3334,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
flash_linear_addr = hw->nvm.flash_base_addr;
flash_linear_addr += (bank) ? flash_bank_size : 0;
- for (j = 0; j < iteration ; j++) {
+ for (j = 0; j < iteration; j++) {
do {
+ u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
+
/* Steps */
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val)
@@ -3230,8 +3357,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
flash_linear_addr += (j * sector_size);
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+ ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
if (!ret_val)
break;
@@ -3270,8 +3396,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
return ret_val;
}
- if (*data == ID_LED_RESERVED_0000 ||
- *data == ID_LED_RESERVED_FFFF)
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
*data = ID_LED_DEFAULT_ICH8LAN;
return 0;
@@ -3511,9 +3636,9 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
if (ret_val)
e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
/* Setup the receive address. */
e1000e_init_rx_addrs(hw, mac->rar_entry_count);
@@ -3541,16 +3666,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy for both queues */
txdctl = er32(TXDCTL(0));
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB;
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
ew32(TXDCTL(0), txdctl);
txdctl = er32(TXDCTL(1));
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB;
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
ew32(TXDCTL(1), txdctl);
/* ICH8 has opposite polarity of no_snoop bits.
@@ -3559,7 +3684,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
snoop = PCIE_ICH8_SNOOP_ALL;
else
- snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
+ snoop = (u32)~(PCIE_NO_SNOOP_ALL);
e1000e_set_pcie_no_snoop(hw, snoop);
ctrl_ext = er32(CTRL_EXT);
@@ -3575,6 +3700,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+
/**
* e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
* @hw: pointer to the HW structure
@@ -3686,8 +3812,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
*/
hw->fc.current_mode = hw->fc.requested_mode;
- e_dbg("After fix-ups FlowControl is now = %x\n",
- hw->fc.current_mode);
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
/* Continue to configure the copper link. */
ret_val = hw->mac.ops.setup_physical_interface(hw);
@@ -3737,12 +3862,12 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &reg_data);
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- reg_data);
+ reg_data);
if (ret_val)
return ret_val;
@@ -3760,7 +3885,6 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
break;
case e1000_phy_82577:
case e1000_phy_82579:
- case e1000_phy_i217:
ret_val = e1000_copper_link_setup_82577(hw);
if (ret_val)
return ret_val;
@@ -3796,6 +3920,31 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY specific link setup function and then calls the
+ * generic setup_copper_link to finish configuring the link for
+ * Lynxpoint PCH devices
+ **/
+static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000e_setup_copper_link(hw);
+}
+
+/**
* e1000_get_link_up_info_ich8lan - Get current link speed and duplex
* @hw: pointer to the HW structure
* @speed: pointer to store current link speed
@@ -3815,8 +3964,7 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
return ret_val;
if ((hw->mac.type == e1000_ich8lan) &&
- (hw->phy.type == e1000_phy_igp_3) &&
- (*speed == SPEED_1000)) {
+ (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
}
@@ -3899,7 +4047,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
* /disabled - false).
**/
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
- bool state)
+ bool state)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
@@ -3981,12 +4129,12 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
return;
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
- &reg_data);
+ &reg_data);
if (ret_val)
return;
reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
- reg_data);
+ reg_data);
if (ret_val)
return;
reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 8bf4655c2e17..80034a2b297c 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -211,7 +211,8 @@
#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
-#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
+#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
@@ -249,13 +250,6 @@
/* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8
-/* OBFF Control & Threshold Defines */
-#define E1000_SVCR_OFF_EN 0x00000001
-#define E1000_SVCR_OFF_MASKINT 0x00001000
-#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000
-#define E1000_SVCR_OFF_TIMER_SHIFT 16
-#define E1000_SVT_OFF_HWM_MASK 0x0000001F
-
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
@@ -267,4 +261,5 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b78e02174601..2480c1091873 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
* serdes media type.
*/
/* SYNCH bit and IV bit are sticky. */
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
@@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
/* SYNCH bit and IV bit are sticky, so reread rxcw. */
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
@@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
- udelay(50);
+ usleep_range(50, 100);
i++;
}
@@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
- udelay(50);
+ usleep_range(50, 100);
}
if (i == timeout) {
@@ -1600,15 +1600,28 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw)
ledctl_blink = E1000_LEDCTL_LED0_BLINK |
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
} else {
- /* set the blink bit for each LED that's "on" (0x0E)
- * in ledctl_mode2
+ /* Set the blink bit for each LED that's "on" (0x0E)
+ * (or "off" if inverted) in ledctl_mode2. The blink
+ * logic in hardware only works when mode is set to "on"
+ * so it must be changed accordingly when the mode is
+ * "off" and inverted.
*/
ledctl_blink = hw->mac.ledctl_mode2;
- for (i = 0; i < 4; i++)
- if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
- E1000_LEDCTL_MODE_LED_ON)
- ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
- (i * 8));
+ for (i = 0; i < 32; i += 8) {
+ u32 mode = (hw->mac.ledctl_mode2 >> i) &
+ E1000_LEDCTL_LED0_MODE_MASK;
+ u32 led_default = hw->mac.ledctl_default >> i;
+
+ if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+ ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+ ledctl_blink &=
+ ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_MODE_LED_ON) << i;
+ }
+ }
}
ew32(LEDCTL, ledctl_blink);
@@ -1712,7 +1725,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
while (timeout) {
if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
break;
- udelay(100);
+ usleep_range(100, 200);
timeout--;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 948b86ffa4f0..a27e3bcc3249 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -55,7 +55,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.2.14" DRV_EXTRAVERSION
+#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -219,9 +219,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n",
- netdev->name, netdev->state, netdev->trans_start,
- netdev->last_rx);
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+ netdev->state, netdev->trans_start, netdev->last_rx);
}
/* Print Registers */
@@ -555,7 +554,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
if (staterr & E1000_RXD_STAT_VP)
- __vlan_hwaccel_put_tag(skb, tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
napi_gro_receive(&adapter->napi, skb);
}
@@ -755,8 +754,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
cpu_to_le64(ps_page->dma);
}
- skb = __netdev_alloc_skb_ip_align(netdev,
- adapter->rx_ps_bsize0,
+ skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
gfp);
if (!skb) {
@@ -848,11 +846,16 @@ check_page:
}
}
- if (!buffer_info->dma)
+ if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
- PAGE_SIZE,
+ buffer_info->page, 0,
+ PAGE_SIZE,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ }
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -937,10 +940,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
cleaned = true;
cleaned_count++;
- dma_unmap_single(&pdev->dev,
- buffer_info->dma,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->wb.upper.length);
@@ -1068,8 +1069,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
static void e1000_print_hw_hang(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter,
- print_hang_task);
+ struct e1000_adapter,
+ print_hang_task);
struct net_device *netdev = adapter->netdev;
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int i = tx_ring->next_to_clean;
@@ -1082,8 +1083,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
- if (!adapter->tx_hang_recheck &&
- (adapter->flags2 & FLAG2_DMA_BURST)) {
+ if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
/* May be block on write-back, flush and detect again
* flush pending descriptor writebacks to memory
*/
@@ -1125,19 +1125,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
"PHY 1000BASE-T Status <%x>\n"
"PHY Extended Status <%x>\n"
"PCI Status <%x>\n",
- readl(tx_ring->head),
- readl(tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->upper.fields.status,
- er32(STATUS),
- phy_status,
- phy_1000t_status,
- phy_ext_status,
- pci_status);
+ readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
+ tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
+ eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
+ phy_status, phy_1000t_status, phy_ext_status, pci_status);
/* Suggest workaround for known h/w issue */
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
@@ -1430,7 +1421,7 @@ copydone:
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
if (rx_desc->wb.upper.header_status &
- cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
+ cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
adapter->rx_hdr_split++;
e1000_receive_skb(adapter, netdev, skb, staterr,
@@ -1468,7 +1459,7 @@ next_desc:
* e1000_consume_page - helper function
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
+ u16 length)
{
bi->page = NULL;
skb->len += length;
@@ -1495,7 +1486,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
unsigned int i;
int cleaned_count = 0;
bool cleaned = false;
- unsigned int total_rx_bytes=0, total_rx_packets=0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct skb_shared_info *shinfo;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
@@ -1541,7 +1533,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
-
#define rxtop (rx_ring->rx_skb_top)
if (!(staterr & E1000_RXD_STAT_EOP)) {
/* this descriptor is only the beginning (or middle) */
@@ -1549,12 +1540,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
+ 0, length);
} else {
/* this is the middle of a chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
+ shinfo = skb_shinfo(rxtop);
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
+ buffer_info->page, 0,
+ length);
/* re-use the skb, only consumed the page */
buffer_info->skb = skb;
}
@@ -1563,9 +1555,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
} else {
if (rxtop) {
/* end of the chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
+ shinfo = skb_shinfo(rxtop);
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
+ buffer_info->page, 0,
+ length);
/* re-use the current skb, we only consumed the
* page
*/
@@ -1590,10 +1583,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
+ buffer_info->page, 0,
+ length);
e1000_consume_page(buffer_info, skb,
- length);
+ length);
}
}
}
@@ -1666,8 +1659,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
dma_unmap_page(&pdev->dev, buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ PAGE_SIZE, DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
@@ -1720,7 +1712,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
static void e1000e_downshift_workaround(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, downshift_task);
+ struct e1000_adapter,
+ downshift_task);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -1913,7 +1906,6 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
-
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -1970,7 +1962,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
ew32(RFCTL, rfctl);
}
-#define E1000_IVAR_INT_ALLOC_VALID 0x8
/* Configure Rx vector */
rx_ring->ims_val = E1000_IMS_RXQ0;
adapter->eiac_mask |= rx_ring->ims_val;
@@ -2045,8 +2036,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_HAS_MSIX) {
adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
adapter->msix_entries = kcalloc(adapter->num_vectors,
- sizeof(struct msix_entry),
- GFP_KERNEL);
+ sizeof(struct
+ msix_entry),
+ GFP_KERNEL);
if (adapter->msix_entries) {
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
@@ -2490,7 +2482,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
switch (itr_setting) {
case lowest_latency:
/* handle TSO and jumbo frames */
- if (bytes/packets > 8000)
+ if (bytes / packets > 8000)
retval = bulk_latency;
else if ((packets < 5) && (bytes > 512))
retval = low_latency;
@@ -2498,13 +2490,13 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000)
+ if (bytes / packets > 8000)
retval = bulk_latency;
- else if ((packets < 10) || ((bytes/packets) > 1200))
+ else if ((packets < 10) || ((bytes / packets) > 1200))
retval = bulk_latency;
else if ((packets > 35))
retval = lowest_latency;
- } else if (bytes/packets > 2000) {
+ } else if (bytes / packets > 2000) {
retval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
retval = lowest_latency;
@@ -2556,8 +2548,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
current_itr = max(adapter->rx_itr, adapter->tx_itr);
- switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
+ switch (current_itr) {
case lowest_latency:
new_itr = 70000;
break;
@@ -2578,8 +2570,7 @@ set_itr_now:
* increasing
*/
new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
adapter->itr = new_itr;
adapter->rx_ring->itr_val = new_itr;
if (adapter->msix_entries)
@@ -2681,7 +2672,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
return work_done;
}
-static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2706,7 +2698,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0;
}
-static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2750,7 +2743,8 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
+ adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
}
@@ -2810,24 +2804,23 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
u16 vid = adapter->hw.mng_cookie.vlan_id;
u16 old_vid = adapter->mng_vlan_id;
- if (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
- e1000_vlan_rx_add_vid(netdev, vid);
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
adapter->mng_vlan_id = vid;
}
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
- e1000_vlan_rx_kill_vid(netdev, old_vid);
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
}
static void e1000_restore_vlan(struct e1000_adapter *adapter)
{
u16 vid;
- e1000_vlan_rx_add_vid(adapter->netdev, 0);
+ e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- e1000_vlan_rx_add_vid(adapter->netdev, vid);
+ e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
@@ -3002,8 +2995,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl = er32(RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/* Do not Store bad packets */
rctl &= ~E1000_RCTL_SBP;
@@ -3089,19 +3082,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* Enable Packet split descriptors */
rctl |= E1000_RCTL_DTYP_PS;
- psrctl |= adapter->rx_ps_bsize0 >>
- E1000_PSRCTL_BSIZE0_SHIFT;
+ psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
switch (adapter->rx_ps_pages) {
case 3:
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE3_SHIFT;
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
+ /* fall-through */
case 2:
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE2_SHIFT;
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
+ /* fall-through */
case 1:
- psrctl |= PAGE_SIZE >>
- E1000_PSRCTL_BSIZE1_SHIFT;
+ psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
break;
}
@@ -3275,7 +3266,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev)
/* update_mc_addr_list expects a packed array of only addresses. */
i = 0;
netdev_for_each_mc_addr(ha, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -3385,7 +3376,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
ew32(RCTL, rctl);
- if (netdev->features & NETIF_F_HW_VLAN_RX)
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
e1000e_vlan_strip_enable(adapter);
else
e1000e_vlan_strip_disable(adapter);
@@ -3752,8 +3743,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (adapter->max_frame_size +
- sizeof(struct e1000_tx_desc) -
- ETH_FCS_LEN) * 2;
+ sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
min_tx_space = ALIGN(min_tx_space, 1024);
min_tx_space >>= 10;
/* software strips receive CRC, so leave room for it */
@@ -3856,13 +3846,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
if ((adapter->max_frame_size * 2) > (pba << 10)) {
if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
dev_info(&adapter->pdev->dev,
- "Interrupt Throttle Rate turned off\n");
+ "Interrupt Throttle Rate off\n");
adapter->flags2 |= FLAG2_DISABLE_AIM;
e1000e_write_itr(adapter, 0);
}
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
dev_info(&adapter->pdev->dev,
- "Interrupt Throttle Rate turned on\n");
+ "Interrupt Throttle Rate on\n");
adapter->flags2 &= ~FLAG2_DISABLE_AIM;
adapter->itr = 20000;
e1000e_write_itr(adapter, adapter->itr);
@@ -3893,6 +3883,38 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* initialize systim and reset the ns time counter */
e1000e_config_hwtstamp(adapter);
+ /* Set EEE advertisement as appropriate */
+ if (adapter->flags2 & FLAG2_HAS_EEE) {
+ s32 ret_val;
+ u16 adv_addr;
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ break;
+ case e1000_phy_i217:
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "Invalid PHY type setting EEE advertisement\n");
+ return;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ dev_err(&adapter->pdev->dev,
+ "EEE advertisement - unable to acquire PHY\n");
+ return;
+ }
+
+ e1000_write_emi_reg_locked(hw, adv_addr,
+ hw->dev_spec.ich8lan.eee_disable ?
+ 0 : adapter->eee_advert);
+
+ hw->phy.ops.release(hw);
+ }
+
if (!netif_running(adapter->netdev) &&
!test_bit(__E1000_TESTING, &adapter->state)) {
e1000_power_down_phy(adapter);
@@ -3994,6 +4016,8 @@ void e1000e_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
+ napi_synchronize(&adapter->napi);
+
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -4261,8 +4285,7 @@ static int e1000_open(struct net_device *netdev)
e1000e_power_up_phy(adapter);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- if ((adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
+ if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
@@ -4351,12 +4374,13 @@ static int e1000_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
- napi_disable(&adapter->napi);
-
if (!test_bit(__E1000_DOWN, &adapter->state)) {
e1000e_down(adapter);
e1000_free_irq(adapter);
}
+
+ napi_disable(&adapter->napi);
+
e1000_power_down_phy(adapter);
e1000e_free_tx_resources(adapter->tx_ring);
@@ -4365,9 +4389,9 @@ static int e1000_close(struct net_device *netdev)
/* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS (let 8021q kill it)
*/
- if (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
+ adapter->mng_vlan_id);
/* If AMT is enabled, let the firmware know that the network
* interface is now closed
@@ -4382,6 +4406,7 @@ static int e1000_close(struct net_device *netdev)
return 0;
}
+
/**
* e1000_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -4432,7 +4457,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
static void e1000e_update_phy_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, update_phy_task);
+ struct e1000_adapter,
+ update_phy_task);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -4449,7 +4475,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
**/
static void e1000_update_phy_info(unsigned long data)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -4616,18 +4642,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
* our own version based on RUC and ROC
*/
netdev->stats.rx_errors = adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.cexterr;
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
netdev->stats.rx_length_errors = adapter->stats.ruc +
- adapter->stats.roc;
+ adapter->stats.roc;
netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
netdev->stats.rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
- netdev->stats.tx_errors = adapter->stats.ecol +
- adapter->stats.latecol;
+ netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
netdev->stats.tx_aborted_errors = adapter->stats.ecol;
netdev->stats.tx_window_errors = adapter->stats.latecol;
netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
@@ -4785,7 +4809,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
**/
static void e1000_watchdog(unsigned long data)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -4796,7 +4820,8 @@ static void e1000_watchdog(unsigned long data)
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, watchdog_task);
+ struct e1000_adapter,
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -4830,8 +4855,8 @@ static void e1000_watchdog_task(struct work_struct *work)
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
e1000_print_link_info(adapter);
/* check if SmartSpeed worked */
@@ -4944,7 +4969,7 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
- LINK_TIMEOUT);
+ LINK_TIMEOUT);
}
}
@@ -4979,8 +5004,8 @@ link_up:
*/
u32 goc = (adapter->gotc + adapter->gorc) / 10000;
u32 dif = (adapter->gotc > adapter->gorc ?
- adapter->gotc - adapter->gorc :
- adapter->gorc - adapter->gotc) / 10000;
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
e1000e_write_itr(adapter, itr);
@@ -5059,14 +5084,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- 0, IPPROTO_TCP, 0);
+ 0, IPPROTO_TCP, 0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
@@ -5075,7 +5100,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
- E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
i = tx_ring->next_to_use;
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
@@ -5145,8 +5170,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
context_desc->lower_setup.ip_config = 0;
context_desc->upper_setup.tcp_fields.tucss = css;
- context_desc->upper_setup.tcp_fields.tucso =
- css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(cmd_len);
@@ -5219,7 +5243,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
- offset, size, DMA_TO_DEVICE);
+ offset, size,
+ DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -5268,7 +5293,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
if (tx_flags & E1000_TX_FLAGS_TSO) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
- E1000_TXD_CMD_TSE;
+ E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (tx_flags & E1000_TX_FLAGS_IPV4)
@@ -5299,8 +5324,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
buffer_info = &tx_ring->buffer_info[i];
tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
- tx_desc->lower.data =
- cpu_to_le32(txd_lower | buffer_info->length);
+ tx_desc->lower.data = cpu_to_le32(txd_lower |
+ buffer_info->length);
tx_desc->upper.data = cpu_to_le32(txd_upper);
i++;
@@ -5350,11 +5375,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
return 0;
- if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
+ if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
return 0;
{
- const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
+ const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
struct udphdr *udp;
if (ip->protocol != IPPROTO_UDP)
@@ -5579,7 +5604,7 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
**/
struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5600,18 +5625,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
* our own version based on RUC and ROC
*/
stats->rx_errors = adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.cexterr;
- stats->rx_length_errors = adapter->stats.ruc +
- adapter->stats.roc;
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
stats->rx_crc_errors = adapter->stats.crcerrs;
stats->rx_frame_errors = adapter->stats.algnerrc;
stats->rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
- stats->tx_errors = adapter->stats.ecol +
- adapter->stats.latecol;
+ stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
stats->tx_aborted_errors = adapter->stats.ecol;
stats->tx_window_errors = adapter->stats.latecol;
stats->tx_carrier_errors = adapter->stats.tncrs;
@@ -5680,9 +5702,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* adjust allocation if LPE protects us, and we aren't using SBP */
if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
- + ETH_FCS_LEN;
+ + ETH_FCS_LEN;
if (netif_running(netdev))
e1000e_up(adapter);
@@ -5861,7 +5883,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
phy_reg &= ~(BM_RCTL_MO_MASK);
if (mac_reg & E1000_RCTL_MO_3)
phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
- << BM_RCTL_MO_SHIFT);
+ << BM_RCTL_MO_SHIFT);
if (mac_reg & E1000_RCTL_BAM)
phy_reg |= BM_RCTL_BAM;
if (mac_reg & E1000_RCTL_PMCF)
@@ -5930,10 +5952,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
}
ctrl = er32(CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
@@ -5977,8 +5995,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
*/
e1000e_release_hw_control(adapter);
- pci_clear_master(pdev);
-
/* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
@@ -6077,24 +6093,24 @@ static int __e1000_resume(struct pci_dev *pdev)
e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
if (phy_data) {
e_info("PHY Wakeup cause - %s\n",
- phy_data & E1000_WUS_EX ? "Unicast Packet" :
- phy_data & E1000_WUS_MC ? "Multicast Packet" :
- phy_data & E1000_WUS_BC ? "Broadcast Packet" :
- phy_data & E1000_WUS_MAG ? "Magic Packet" :
- phy_data & E1000_WUS_LNKC ?
- "Link Status Change" : "other");
+ phy_data & E1000_WUS_EX ? "Unicast Packet" :
+ phy_data & E1000_WUS_MC ? "Multicast Packet" :
+ phy_data & E1000_WUS_BC ? "Broadcast Packet" :
+ phy_data & E1000_WUS_MAG ? "Magic Packet" :
+ phy_data & E1000_WUS_LNKC ?
+ "Link Status Change" : "other");
}
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
u32 wus = er32(WUS);
if (wus) {
e_info("MAC Wakeup cause - %s\n",
- wus & E1000_WUS_EX ? "Unicast Packet" :
- wus & E1000_WUS_MC ? "Multicast Packet" :
- wus & E1000_WUS_BC ? "Broadcast Packet" :
- wus & E1000_WUS_MAG ? "Magic Packet" :
- wus & E1000_WUS_LNKC ? "Link Status Change" :
- "other");
+ wus & E1000_WUS_EX ? "Unicast Packet" :
+ wus & E1000_WUS_MC ? "Multicast Packet" :
+ wus & E1000_WUS_BC ? "Broadcast Packet" :
+ wus & E1000_WUS_MAG ? "Magic Packet" :
+ wus & E1000_WUS_LNKC ? "Link Status Change" :
+ "other");
}
ew32(WUS, ~0);
}
@@ -6369,7 +6385,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
e_info("(PCI Express:2.5GT/s:%s) %pM\n",
/* bus width */
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
- "Width x1"),
+ "Width x1"),
/* MAC address */
netdev->dev_addr);
e_info("Intel(R) PRO/%s Network Connection\n",
@@ -6409,7 +6425,7 @@ static int e1000_set_features(struct net_device *netdev,
if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
adapter->flags |= FLAG_TSO_FORCE;
- if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
NETIF_F_RXALL)))
return 0;
@@ -6479,7 +6495,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
resource_size_t flash_start, flash_len;
static int cards_found;
u16 aspm_disable_flag = 0;
- int i, err, pci_using_dac;
+ int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
@@ -6506,15 +6522,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
}
- err = pci_request_selected_regions_exclusive(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM),
- e1000e_driver_name);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions_exclusive(pdev, bars,
+ e1000e_driver_name);
if (err)
goto err_pci_reg;
@@ -6567,6 +6584,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_flashmap;
}
+ /* Set default EEE advertisement */
+ if (adapter->flags2 & FLAG2_HAS_EEE)
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
/* construct the net_device struct */
netdev->netdev_ops = &e1000e_netdev_ops;
e1000e_set_ethtool_ops(netdev);
@@ -6615,8 +6636,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set initial default active device features */
netdev->features = (NETIF_F_SG |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
@@ -6630,7 +6651,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXALL;
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
- netdev->features |= NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= (NETIF_F_SG |
NETIF_F_TSO |
@@ -6683,11 +6704,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = e1000_watchdog;
- adapter->watchdog_timer.data = (unsigned long) adapter;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
init_timer(&adapter->phy_info_timer);
adapter->phy_info_timer.function = e1000_update_phy_info;
- adapter->phy_info_timer.data = (unsigned long) adapter;
+ adapter->phy_info_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->reset_task, e1000_reset_task);
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
@@ -6795,7 +6816,7 @@ err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -6865,7 +6886,7 @@ static void e1000_remove(struct pci_dev *pdev)
if (adapter->hw.flash_address)
iounmap(adapter->hw.flash_address);
pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_select_bars(pdev, IORESOURCE_MEM));
free_netdev(netdev);
@@ -6886,7 +6907,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
+ board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
@@ -6962,8 +6984,8 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
- SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
- e1000_runtime_resume, e1000_idle)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
+ e1000_idle)
};
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 84fecc268162..44ddc0a0ee0e 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw)
{
u32 ctrl_ext;
- udelay(10);
+ usleep_range(10, 20);
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 98da75dff936..c16bd75b6caa 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -45,7 +45,7 @@
unsigned int copybreak = COPYBREAK_DEFAULT;
module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak,
- "Maximum size of packet that is copied to a new buffer on receive");
+ "Maximum size of packet that is copied to a new buffer on receive");
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
@@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
*
* Default Value: 1 (enabled)
*/
-E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
+E1000_PARAM(WriteProtectNVM,
+ "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
/* Enable CRC Stripping
*
@@ -160,13 +161,18 @@ struct e1000_option {
const char *err;
int def;
union {
- struct { /* range_option info */
+ /* range_option info */
+ struct {
int min;
int max;
} r;
- struct { /* list_option info */
+ /* list_option info */
+ struct {
int nr;
- struct e1000_opt_list { int i; char *str; } *p;
+ struct e1000_opt_list {
+ int i;
+ char *str;
+ } *p;
} l;
} arg;
};
@@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
"Using defaults for all values\n");
}
- { /* Transmit Interrupt Delay */
+ /* Transmit Interrupt Delay */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
@@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->tx_int_delay = opt.def;
}
}
- { /* Transmit Absolute Interrupt Delay */
+ /* Transmit Absolute Interrupt Delay */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
@@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->tx_abs_int_delay = opt.def;
}
}
- { /* Receive Interrupt Delay */
+ /* Receive Interrupt Delay */
+ {
static struct e1000_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
@@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->rx_int_delay = opt.def;
}
}
- { /* Receive Absolute Interrupt Delay */
+ /* Receive Absolute Interrupt Delay */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
@@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->rx_abs_int_delay = opt.def;
}
}
- { /* Interrupt Throttling Rate */
+ /* Interrupt Throttling Rate */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
@@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
break;
}
}
- { /* Interrupt Mode */
+ /* Interrupt Mode */
+ {
static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
@@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
kfree(opt.err);
#endif
}
- { /* Smart Power Down */
+ /* Smart Power Down */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
@@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
- { /* CRC Stripping */
+ /* CRC Stripping */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
@@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
}
}
- { /* Kumeran Lock Loss Workaround */
+ /* Kumeran Lock Loss Workaround */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
+ bool enabled = opt.def;
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
- if (hw->mac.type == e1000_ich8lan)
- e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
- kmrn_lock_loss);
- } else {
- if (hw->mac.type == e1000_ich8lan)
- e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
- opt.def);
+ enabled = kmrn_lock_loss;
}
+
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+ enabled);
}
- { /* Write-protect NVM */
+ /* Write-protect NVM */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
@@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
if (num_WriteProtectNVM > bd) {
- unsigned int write_protect_nvm = WriteProtectNVM[bd];
+ unsigned int write_protect_nvm =
+ WriteProtectNVM[bd];
e1000_validate_option(&write_protect_nvm, &opt,
adapter);
if (write_protect_nvm)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0930c136aa31..59c76a6815a0 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
- 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED
+};
+
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_m88_cable_length_table)
@@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
- 124};
+ 124
+};
+
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table)
@@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
manc = er32(MANC);
- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
- E1000_BLK_PHY_RESET : 0;
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
}
/**
@@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
return ret_val;
phy->id = (u32)(phy_id << 16);
- udelay(20);
+ usleep_range(20, 40);
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -175,7 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
- *data = (u16) mdic;
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ e_dbg("MDI Read offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+ *data = (u16)mdic;
/* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
@@ -233,6 +242,12 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ e_dbg("MDI Write offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
/* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
@@ -324,7 +339,7 @@ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
* semaphores before exiting.
**/
static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
- bool locked)
+ bool locked)
{
s32 ret_val = 0;
@@ -391,7 +406,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
* at the offset. Release any acquired semaphores before exiting.
**/
static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
- bool locked)
+ bool locked)
{
s32 ret_val = 0;
@@ -410,8 +425,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
(u16)offset);
if (!ret_val)
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
- offset,
- data);
+ offset, data);
if (!locked)
hw->phy.ops.release(hw);
@@ -458,7 +472,7 @@ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
* Release any acquired semaphores before exiting.
**/
static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
- bool locked)
+ bool locked)
{
u32 kmrnctrlsta;
@@ -531,7 +545,7 @@ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
* before exiting.
**/
static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
- bool locked)
+ bool locked)
{
u32 kmrnctrlsta;
@@ -772,8 +786,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
phy_data |= M88E1000_EPSCR_TX_CLK_25;
- if ((phy->revision == 2) &&
- (phy->id == M88E1111_I_PHY_ID)) {
+ if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) {
/* 82573L PHY - set the downshift counter to 5x. */
phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
@@ -1296,7 +1309,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
@@ -1319,7 +1332,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
/* Try once more */
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
}
@@ -1609,9 +1622,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
if (!ret_val)
- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -1653,9 +1666,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &data);
if (!ret_val)
- phy->cable_polarity = (data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -1685,9 +1698,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &phy_data);
if (!ret_val)
- phy->cable_polarity = (phy_data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -1733,7 +1746,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
* Polls the PHY status register for link, 'iterations' number of times.
**/
s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success)
+ u32 usec_interval, bool *success)
{
s32 ret_val = 0;
u16 i, phy_status;
@@ -1756,7 +1769,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
- mdelay(usec_interval/1000);
+ mdelay(usec_interval / 1000);
else
udelay(usec_interval);
}
@@ -1791,8 +1804,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
return -E1000_ERR_PHY;
@@ -1824,10 +1837,10 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
- IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
};
/* Read the AGC registers for all channels */
@@ -1841,8 +1854,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
* that can be put into the lookup table to obtain the
* approximate cable length.
*/
- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
- IGP02E1000_AGC_LENGTH_MASK;
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK);
/* Array index bound check. */
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
@@ -1865,8 +1878,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
/* Calculate cable length with the error range of +/- 10 meters. */
- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
- (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0);
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
@@ -2040,9 +2053,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
return ret_val;
} else {
/* Polarity is forced */
- phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
}
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
@@ -2119,7 +2132,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
ew32(CTRL, ctrl);
e1e_flush();
- udelay(150);
+ usleep_range(150, 300);
phy->ops.release(hw);
@@ -2375,13 +2388,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
- (page << page_shift));
+ (page << page_shift));
if (ret_val)
goto release;
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ data);
release:
hw->phy.ops.release(hw);
@@ -2433,13 +2446,13 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
- (page << page_shift));
+ (page << page_shift));
if (ret_val)
goto release;
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ data);
release:
hw->phy.ops.release(hw);
return ret_val;
@@ -2674,7 +2687,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
if (read) {
/* Read the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
- data);
+ data);
} else {
/* Write the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
@@ -2763,7 +2776,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
- data, true);
+ data, true);
goto out;
}
@@ -2786,8 +2799,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
page << IGP_PAGE_SHIFT, reg);
- ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
- data);
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data);
out:
if (!locked)
hw->phy.ops.release(hw);
@@ -2871,7 +2883,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
- &data, false);
+ &data, false);
goto out;
}
@@ -2910,7 +2922,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
page << IGP_PAGE_SHIFT, reg);
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
- data);
+ data);
out:
if (!locked)
@@ -2988,15 +3000,15 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
* These accesses done with PHY address 2 and without using pages.
**/
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
- u16 *data, bool read)
+ u16 *data, bool read)
{
s32 ret_val;
u32 addr_reg;
u32 data_reg;
/* This takes care of the difference with desktop vs mobile phy */
- addr_reg = (hw->phy.type == e1000_phy_82578) ?
- I82578_ADDR_REG : I82577_ADDR_REG;
+ addr_reg = ((hw->phy.type == e1000_phy_82578) ?
+ I82578_ADDR_REG : I82577_ADDR_REG);
data_reg = addr_reg + 1;
/* All operations in this function are phy address 2 */
@@ -3050,8 +3062,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_MASK;
+ data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_1000))
@@ -3086,9 +3098,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val)
- phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -3215,8 +3227,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
- I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
return -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index b477fa53ec94..065f8c80d4f2 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -145,8 +145,7 @@ static int e1000e_phc_settime(struct ptp_clock_info *ptp,
unsigned long flags;
u64 ns;
- ns = ts->tv_sec * NSEC_PER_SEC;
- ns += ts->tv_nsec;
+ ns = timespec_to_ns(ts);
/* reset the timecounter */
spin_lock_irqsave(&adapter->systim_lock, flags);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 12b1d8480808..ff6a17cb1362 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -100,6 +100,7 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
break;
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
reg = rd32(E1000_MDICNFG);
@@ -149,6 +150,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
phy->ops.read_reg = igb_read_phy_reg_82580;
phy->ops.write_reg = igb_write_phy_reg_82580;
break;
@@ -174,13 +176,14 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
+ case M88E1545_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1111_I_PHY_ID:
phy->type = e1000_phy_m88;
+ phy->ops.check_polarity = igb_check_polarity_m88;
phy->ops.get_phy_info = igb_get_phy_info_m88;
- if (phy->id == I347AT4_E_PHY_ID ||
- phy->id == M88E1112_E_PHY_ID)
+ if (phy->id != M88E1111_I_PHY_ID)
phy->ops.get_cable_length =
igb_get_cable_length_m88_gen2;
else
@@ -227,7 +230,7 @@ out:
* igb_init_nvm_params_82575 - Init NVM func ptrs.
* @hw: pointer to the HW structure
**/
-s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = rd32(E1000_EECD);
@@ -287,6 +290,7 @@ s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
nvm->ops.read = igb_read_nvm_spi;
nvm->ops.write = igb_write_nvm_spi;
break;
+ case e1000_i354:
case e1000_i350:
nvm->ops.validate = igb_validate_nvm_checksum_i350;
nvm->ops.update = igb_update_nvm_checksum_i350;
@@ -352,6 +356,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
break;
case e1000_i350:
+ case e1000_i354:
mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break;
default:
@@ -384,6 +389,9 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
dev_spec->eee_disable = false;
else
dev_spec->eee_disable = true;
+ /* Allow a single clear of the SW semaphore on I210 and newer */
+ if (mac->type >= e1000_i210)
+ dev_spec->clear_semaphore_once = true;
/* physical interface link setup */
mac->ops.setup_physical_interface =
(hw->phy.media_type == e1000_media_type_copper)
@@ -435,8 +443,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->type = e1000_i350;
break;
case E1000_DEV_ID_I210_COPPER:
- case E1000_DEV_ID_I210_COPPER_OEM1:
- case E1000_DEV_ID_I210_COPPER_IT:
case E1000_DEV_ID_I210_FIBER:
case E1000_DEV_ID_I210_SERDES:
case E1000_DEV_ID_I210_SGMII:
@@ -445,14 +451,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_I211_COPPER:
mac->type = e1000_i211;
break;
+ case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
+ case E1000_DEV_ID_I354_SGMII:
+ case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
+ mac->type = e1000_i354;
+ break;
default:
return -E1000_ERR_MAC_INIT;
break;
}
/* Set media type */
- /*
- * The 82575 uses bits 22:23 for link mode. The mode can be changed
+ /* The 82575 uses bits 22:23 for link mode. The mode can be changed
* based on the EEPROM. We cannot rely upon device ID. There
* is no distinguishable difference between fiber and internal
* SerDes mode on the 82575. There can be an external PHY attached
@@ -621,8 +631,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
u32 ctrl_ext;
u32 mdic;
- /*
- * For SGMII PHYs, we try the list of possible addresses until
+ /* For SGMII PHYs, we try the list of possible addresses until
* we find one that works. For non-SGMII PHYs
* (e.g. integrated copper PHYs), an address of 1 should
* work. The result of this function should mean phy->phy_addr
@@ -644,6 +653,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
break;
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
mdic = rd32(E1000_MDICNFG);
@@ -665,8 +675,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
wrfl();
msleep(300);
- /*
- * The address field in the I2CCMD register is 3 bits and 0 is invalid.
+ /* The address field in the I2CCMD register is 3 bits and 0 is invalid.
* Therefore, we need to test 1-7
*/
for (phy->addr = 1; phy->addr < 8; phy->addr++) {
@@ -674,8 +683,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
if (ret_val == 0) {
hw_dbg("Vendor ID 0x%08X read at address %u\n",
phy_id, phy->addr);
- /*
- * At the time of this writing, The M88 part is
+ /* At the time of this writing, The M88 part is
* the only supported SGMII PHY product.
*/
if (phy_id == M88_VENDOR)
@@ -711,15 +719,13 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
{
s32 ret_val;
- /*
- * This isn't a true "hard" reset, but is the only reset
+ /* This isn't a true "hard" reset, but is the only reset
* available to us at this time.
*/
hw_dbg("Soft resetting SGMII attached PHY...\n");
- /*
- * SFP documentation requires the following to configure the SPF module
+ /* SFP documentation requires the following to configure the SPF module
* to work on SGMII. No further documentation is given.
*/
ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
@@ -774,8 +780,7 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
data);
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -838,8 +843,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
} else {
data &= ~E1000_82580_PM_D0_LPLU;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -867,7 +871,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
* During driver activity, SmartSpeed should be enabled so performance is
* maintained.
**/
-s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = 0;
@@ -877,8 +881,7 @@ s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
if (!active) {
data &= ~E1000_82580_PM_D3_LPLU;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -964,8 +967,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
if (!(swfw_sync & (fwmask | swmask)))
break;
- /*
- * Firmware currently using resource (fwmask)
+ /* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
igb_put_hw_semaphore(hw);
@@ -1065,8 +1067,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
&duplex);
- /*
- * Use this flag to determine if link needs to be checked or
+ /* Use this flag to determine if link needs to be checked or
* not. If we have link clear the flag so that we do not
* continue to check for link.
*/
@@ -1135,15 +1136,13 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
*speed = 0;
*duplex = 0;
- /*
- * Read the PCS Status register for link state. For non-copper mode,
+ /* Read the PCS Status register for link state. For non-copper mode,
* the status register is not accurate. The PCS status register is
* used instead.
*/
pcs = rd32(E1000_PCS_LSTAT);
- /*
- * The link up bit determines when link is up on autoneg. The sync ok
+ /* The link up bit determines when link is up on autoneg. The sync ok
* gets set once both sides sync up and agree upon link. Stable link
* can be determined by checking for both link up and link sync ok
*/
@@ -1214,8 +1213,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
u32 ctrl, icr;
s32 ret_val;
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = igb_disable_pcie_master(hw);
@@ -1244,8 +1242,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
ret_val = igb_get_auto_rd_done(hw);
if (ret_val) {
- /*
- * When auto config read does not complete, do not
+ /* When auto config read does not complete, do not
* return with an error. This can happen in situations
* where there is no eeprom and prevents getting link.
*/
@@ -1287,7 +1284,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
/* Disabling VLAN filtering */
hw_dbg("Initializing the IEEE VLAN\n");
- if (hw->mac.type == e1000_i350)
+ if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
igb_clear_vfta_i350(hw);
else
igb_clear_vfta(hw);
@@ -1308,8 +1305,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
/* Setup link and flow control */
ret_val = igb_setup_link(hw);
- /*
- * Clear all of the statistics registers (clear on read). It is
+ /* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@@ -1364,6 +1360,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
+ case M88E1545_E_PHY_ID:
case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
break;
@@ -1412,17 +1409,17 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
return ret_val;
- /*
- * On the 82575, SerDes loopback mode persists until it is
+ /* On the 82575, SerDes loopback mode persists until it is
* explicitly turned off or a power cycle is performed. A read to
* the register does not indicate its status. Therefore, we ensure
* loopback mode is disabled during initialization.
*/
wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
- /* power on the sfp cage if present */
+ /* power on the sfp cage if present and turn on I2C */
ctrl_ext = rd32(E1000_CTRL_EXT);
ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
wr32(E1000_CTRL_EXT, ctrl_ext);
ctrl_reg = rd32(E1000_CTRL);
@@ -1466,8 +1463,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
pcs_autoneg = false;
}
- /*
- * non-SGMII modes only supports a speed of 1000/Full for the
+ /* non-SGMII modes only supports a speed of 1000/Full for the
* link so it is best to just force the MAC and let the pcs
* link either autoneg or be forced to 1000/Full
*/
@@ -1481,8 +1477,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
wr32(E1000_CTRL, ctrl_reg);
- /*
- * New SerDes mode allows for forcing speed or autonegotiating speed
+ /* New SerDes mode allows for forcing speed or autonegotiating speed
* at 1gb. Autoneg should be default set by most drivers. This is the
* mode that will be compatible with older link partners and switches.
* However, both are supported by the hardware and some drivers/tools.
@@ -1592,8 +1587,7 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
{
s32 ret_val = 0;
- /*
- * If there's an alternate MAC address place it in RAR0
+ /* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
@@ -1777,8 +1771,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
goto out;
- /*
- * if capababilities version is type 1 we can write the
+ /* if capabilities version is type 1 we can write the
* timeout of 10ms to 200ms through the GCR register
*/
if (!(gcr & E1000_GCR_CAP_VER2)) {
@@ -1786,8 +1779,7 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
goto out;
}
- /*
- * for version 2 capabilities we need to write the config space
+ /* for version 2 capabilities we need to write the config space
* directly in order to set the completion timeout value for
* 16ms to 55ms
*/
@@ -1825,6 +1817,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
reg_offset = E1000_DTXSWC;
break;
case e1000_i350:
+ case e1000_i354:
reg_offset = E1000_TXSWC;
break;
default:
@@ -1866,6 +1859,7 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
wr32(E1000_DTXSWC, dtxswc);
break;
+ case e1000_i354:
case e1000_i350:
dtxswc = rd32(E1000_TXSWC);
if (enable)
@@ -1879,7 +1873,6 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
break;
}
-
}
/**
@@ -1914,7 +1907,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
-
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
goto out;
@@ -2016,8 +2008,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Get current control state. */
ctrl = rd32(E1000_CTRL);
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = igb_disable_pcie_master(hw);
@@ -2052,18 +2043,13 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
ret_val = igb_get_auto_rd_done(hw);
if (ret_val) {
- /*
- * When auto config read does not complete, do not
+ /* When auto config read does not complete, do not
* return with an error. This can happen in situations
* where there is no eeprom and prevents getting link.
*/
hw_dbg("Auto Read Done did not complete\n");
}
- /* If EEPROM is not present, run manual init scripts */
- if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
- igb_reset_init_script_82575(hw);
-
/* clear global device reset status bit */
wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
@@ -2197,7 +2183,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
/* if checksums compatibility bit is set validate checksums
- * for all 4 ports. */
+ * for all 4 ports.
+ */
eeprom_regions_count = 4;
}
@@ -2309,6 +2296,41 @@ out:
}
/**
+ * __igb_access_emi_reg - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ **/
+static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
+
+ return ret_val;
+}
+
+/**
+ * igb_read_emi_reg - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ return __igb_access_emi_reg(hw, addr, data, true);
+}
+
+/**
* igb_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
*
@@ -2338,7 +2360,6 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
hw_dbg("LPI Clock Stop Bit should not be set!\n");
-
} else {
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
E1000_IPCNFG_EEE_100M_AN);
@@ -2355,6 +2376,108 @@ out:
return ret_val;
}
+/**
+ * igb_set_eee_i354 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE legacy mode based on setting in dev_spec structure.
+ *
+ **/
+s32 igb_set_eee_i354(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_data;
+
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ (phy->id != M88E1545_E_PHY_ID))
+ goto out;
+
+ if (!hw->dev_spec._82575.eee_disable) {
+ /* Switch to PHY page 18. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= E1000_M88E1545_EEE_CTRL_1_MS;
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ /* Turn on EEE advertisement. */
+ ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data |= E1000_EEE_ADV_100_SUPPORTED |
+ E1000_EEE_ADV_1000_SUPPORTED;
+ ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ } else {
+ /* Turn off EEE advertisement. */
+ ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
+ E1000_EEE_ADV_1000_SUPPORTED);
+ ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_get_eee_status_i354 - Get EEE status
+ * @hw: pointer to the HW structure
+ * @status: EEE status
+ *
+ * Get EEE status by guessing based on whether Tx or Rx LPI indications have
+ * been received.
+ **/
+s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_data;
+
+ /* Check if EEE is supported on this device. */
+ if ((hw->phy.media_type != e1000_media_type_copper) ||
+ (phy->id != M88E1545_E_PHY_ID))
+ goto out;
+
+ ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
+ E1000_PCS_STATUS_DEV_I354,
+ &phy_data);
+ if (ret_val)
+ goto out;
+
+ *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
+ E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
+
+out:
+ return ret_val;
+}
+
static const u8 e1000_emc_temp_data[4] = {
E1000_EMC_INTERNAL_DATA,
E1000_EMC_DIODE1_DATA,
@@ -2368,11 +2491,12 @@ static const u8 e1000_emc_therm_limit[4] = {
E1000_EMC_DIODE3_THERM_LIMIT
};
-/* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
+/**
+ * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
*
* Updates the temperatures in mac.thermal_sensor_data
- */
+ **/
s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
@@ -2420,12 +2544,13 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
return status;
}
-/* igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
+/**
+ * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
* @hw: pointer to hardware structure
*
* Sets the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
- */
+ **/
s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 73ab41f0e032..74a1506b4235 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -263,7 +263,9 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
u16 igb_rxpbs_adjust_82580(u32 data);
+s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
s32 igb_set_eee_i350(struct e1000_hw *);
+s32 igb_set_eee_i354(struct e1000_hw *);
s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 7e13337d3b9d..31a0f82cc650 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -138,8 +138,7 @@
#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-/*
- * Use byte values for the following shift parameters
+/* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
@@ -237,11 +236,14 @@
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
/* BMC external code execution disabled */
+#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
+#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
/* Constants used to intrepret the masked PCI-X bus speed. */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
+#define SPEED_2500 2500
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
@@ -382,8 +384,7 @@
#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
/* TCP Timer */
-/*
- * This defines the bits that are set in the Interrupt Mask
+/* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
* o RXT0 = Receiver Timer Interrupt (ring 0)
* o TXDW = Transmit Descriptor Written Back
@@ -440,8 +441,7 @@
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
/* Receive Address */
-/*
- * Number of high/low register pairs in the RAR. The RAR (Receive Address
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
* (RAR[15]) for our directed address used by controllers with
@@ -760,8 +760,7 @@
#define MAX_PHY_MULTI_PAGE_REG 0xF
/* Bit definitions for valid PHY IDs. */
-/*
- * I = Integrated
+/* I = Integrated
* E = External
*/
#define M88E1111_I_PHY_ID 0x01410CC0
@@ -772,6 +771,7 @@
#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
#define I210_I_PHY_ID 0x01410C00
+#define M88E1545_E_PHY_ID 0x01410EA0
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -791,8 +791,7 @@
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
-/*
- * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
+/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
* 0=Normal 10BASE-T Rx Threshold
*/
/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
@@ -802,8 +801,7 @@
#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
-/*
- * 0 = <50M
+/* 0 = <50M
* 1 = 50-80M
* 2 = 80-110M
* 3 = 110-140M
@@ -816,20 +814,17 @@
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
/* M88E1000 Extended PHY Specific Control Register */
-/*
- * 1 = Lost lock detect enabled.
+/* 1 = Lost lock detect enabled.
* Will assert lost lock and bring
* link down if idle not seen
* within 1ms in 1000BASE-T
*/
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
@@ -844,8 +839,7 @@
/* i347-AT4 Extended PHY Specific Control Register */
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
@@ -895,6 +889,22 @@
#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
+#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
+#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
+#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
+#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */
+#define E1000_M88E1545_EEE_CTRL_1 0x0
+#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_EEE_ADV_DEV_I354 7
+#define E1000_EEE_ADV_ADDR_I354 60
+#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_PCS_STATUS_DEV_I354 3
+#define E1000_PCS_STATUS_ADDR_I354 1
+#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
+#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
+#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
/* SerDes Control */
#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 0d5cf9c63d0d..488abb24a54f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -38,38 +38,39 @@
struct e1000_hw;
-#define E1000_DEV_ID_82576 0x10C9
-#define E1000_DEV_ID_82576_FIBER 0x10E6
-#define E1000_DEV_ID_82576_SERDES 0x10E7
-#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
-#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
-#define E1000_DEV_ID_82576_NS 0x150A
-#define E1000_DEV_ID_82576_NS_SERDES 0x1518
-#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
-#define E1000_DEV_ID_82575EB_COPPER 0x10A7
-#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
-#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
-#define E1000_DEV_ID_82580_COPPER 0x150E
-#define E1000_DEV_ID_82580_FIBER 0x150F
-#define E1000_DEV_ID_82580_SERDES 0x1510
-#define E1000_DEV_ID_82580_SGMII 0x1511
-#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
-#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
-#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
-#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
-#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
-#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
-#define E1000_DEV_ID_I350_COPPER 0x1521
-#define E1000_DEV_ID_I350_FIBER 0x1522
-#define E1000_DEV_ID_I350_SERDES 0x1523
-#define E1000_DEV_ID_I350_SGMII 0x1524
+#define E1000_DEV_ID_82576 0x10C9
+#define E1000_DEV_ID_82576_FIBER 0x10E6
+#define E1000_DEV_ID_82576_SERDES 0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
+#define E1000_DEV_ID_82576_NS 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES 0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
+#define E1000_DEV_ID_82575EB_COPPER 0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
+#define E1000_DEV_ID_82580_COPPER 0x150E
+#define E1000_DEV_ID_82580_FIBER 0x150F
+#define E1000_DEV_ID_82580_SERDES 0x1510
+#define E1000_DEV_ID_82580_SGMII 0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
#define E1000_DEV_ID_I210_COPPER 0x1533
-#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
-#define E1000_DEV_ID_I210_COPPER_IT 0x1535
#define E1000_DEV_ID_I210_FIBER 0x1536
#define E1000_DEV_ID_I210_SERDES 0x1537
#define E1000_DEV_ID_I210_SGMII 0x1538
#define E1000_DEV_ID_I211_COPPER 0x1539
+#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
+#define E1000_DEV_ID_I354_SGMII 0x1F41
+#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
#define E1000_REVISION_2 2
#define E1000_REVISION_4 4
@@ -90,6 +91,7 @@ enum e1000_mac_type {
e1000_82576,
e1000_82580,
e1000_i350,
+ e1000_i354,
e1000_i210,
e1000_i211,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
@@ -98,7 +100,8 @@ enum e1000_mac_type {
enum e1000_media_type {
e1000_media_type_unknown = 0,
e1000_media_type_copper = 1,
- e1000_media_type_internal_serdes = 2,
+ e1000_media_type_fiber = 2,
+ e1000_media_type_internal_serdes = 3,
e1000_num_media_types
};
@@ -524,6 +527,7 @@ struct e1000_dev_spec_82575 {
bool sgmii_active;
bool global_device_reset;
bool eee_disable;
+ bool clear_semaphore_once;
};
struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 6a42344f24f1..ddb3cf51b9b9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -44,10 +44,42 @@
static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
{
u32 swsm;
- s32 ret_val = E1000_SUCCESS;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = rd32(E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ udelay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ /* In rare circumstances, the SW semaphore may already be held
+ * unintentionally. Clear the semaphore once before giving up.
+ */
+ if (hw->dev_spec._82575.clear_semaphore_once) {
+ hw->dev_spec._82575.clear_semaphore_once = false;
+ igb_put_hw_semaphore(hw);
+ for (i = 0; i < timeout; i++) {
+ swsm = rd32(E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ udelay(50);
+ }
+ }
+
+ /* If we do not have the semaphore here, we have to give up. */
+ if (i == timeout) {
+ hw_dbg("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = rd32(E1000_SWSM);
@@ -64,12 +96,10 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
/* Release semaphores */
igb_put_hw_semaphore(hw);
hw_dbg("Driver can't access the NVM\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ return -E1000_ERR_NVM;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
@@ -99,23 +129,6 @@ void igb_release_nvm_i210(struct e1000_hw *hw)
}
/**
- * igb_put_hw_semaphore_i210 - Release hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Release hardware semaphore used to access the PHY or NVM
- */
-static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
-{
- u32 swsm;
-
- swsm = rd32(E1000_SWSM);
-
- swsm &= ~E1000_SWSM_SWESMBI;
-
- wr32(E1000_SWSM, swsm);
-}
-
-/**
* igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
@@ -138,13 +151,11 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
}
swfw_sync = rd32(E1000_SW_FW_SYNC);
- if (!(swfw_sync & fwmask))
+ if (!(swfw_sync & (fwmask | swmask)))
break;
- /*
- * Firmware currently using resource (fwmask)
- */
- igb_put_hw_semaphore_i210(hw);
+ /* Firmware currently using resource (fwmask) */
+ igb_put_hw_semaphore(hw);
mdelay(5);
i++;
}
@@ -158,7 +169,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
swfw_sync |= swmask;
wr32(E1000_SW_FW_SYNC, swfw_sync);
- igb_put_hw_semaphore_i210(hw);
+ igb_put_hw_semaphore(hw);
out:
return ret_val;
}
@@ -182,7 +193,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
swfw_sync &= ~mask;
wr32(E1000_SW_FW_SYNC, swfw_sync);
- igb_put_hw_semaphore_i210(hw);
+ igb_put_hw_semaphore(hw);
}
/**
@@ -203,7 +214,8 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
- * to read in bursts than synchronizing access for each word. */
+ * to read in bursts than synchronizing access for each word.
+ */
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
@@ -242,8 +254,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
u32 attempts = 100000;
s32 ret_val = E1000_SUCCESS;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -294,7 +305,7 @@ out:
*
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
- */
+ **/
s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
@@ -326,7 +337,7 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
/**
* igb_read_nvm_i211 - Read NVM wrapper function for I211
* @hw: pointer to the HW structure
- * @address: the word address (aka eeprom offset) to read
+ * @words: number of words to read
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
@@ -549,8 +560,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- /*
- * Replace the read function with semaphore grabbing with
+ /* Replace the read function with semaphore grabbing with
* the one that skips this for a while.
* We have semaphore taken already here.
*/
@@ -570,7 +580,6 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
return status;
}
-
/**
* igb_update_nvm_checksum_i210 - Update EEPROM checksum
* @hw: pointer to the HW structure
@@ -585,8 +594,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, nvm_data;
- /*
- * Read the first word from the EEPROM. If this times out or fails, do
+ /* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
@@ -597,8 +605,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
}
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- /*
- * Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
* because we do not want to take the synchronization
* semaphores twice here.
*/
@@ -635,7 +642,7 @@ out:
* igb_pool_flash_update_done_i210 - Pool FLUDONE status.
* @hw: pointer to the HW structure
*
- */
+ **/
static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_NVM;
@@ -714,3 +721,68 @@ s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
out:
return ret_val;
}
+
+/**
+ * __igb_access_xmdio_reg - Read/write XMDIO register
+ * @hw: pointer to the HW structure
+ * @address: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: pointer to value to read/write from/to the XMDIO address
+ * @read: boolean flag to indicate read or write
+ **/
+static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
+ u8 dev_addr, u16 *data, bool read)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
+ dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
+ if (ret_val)
+ return ret_val;
+
+ /* Recalibrate the device back to 0 */
+ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
+ if (ret_val)
+ return ret_val;
+
+ return ret_val;
+}
+
+/**
+ * igb_read_xmdio_reg - Read XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be read from the EMI address
+ **/
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+{
+ return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ * igb_write_xmdio_reg - Write XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be written to the XMDIO address
+ **/
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+{
+ return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index e4e1a73b7c75..bfc08e05c907 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -45,6 +45,10 @@ extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
extern s32 igb_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
+extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 *data);
+extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 data);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index a5c7200b9a71..2559d70a2321 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -214,7 +214,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
else
vfta &= ~mask;
}
- if (hw->mac.type == e1000_i350)
+ if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
igb_write_vfta_i350(hw, index, vfta);
else
igb_write_vfta(hw, index, vfta);
@@ -230,8 +230,8 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
* Checks the nvm for an alternate MAC address. An alternate MAC address
* can be setup by pre-boot software and must be treated like a permanent
* address and must override the actual permanent MAC address. If an
- * alternate MAC address is fopund it is saved in the hw struct and
- * prgrammed into RAR0 and the cuntion returns success, otherwise the
+ * alternate MAC address is found it is saved in the hw struct and
+ * programmed into RAR0 and the function returns success, otherwise the
* function returns an error.
**/
s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
@@ -241,8 +241,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
- /*
- * Alternate MAC address is handled by the option ROM for 82580
+ /* Alternate MAC address is handled by the option ROM for 82580
* and newer. SW support not required.
*/
if (hw->mac.type >= e1000_82580)
@@ -285,8 +284,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
goto out;
}
- /*
- * We have a valid alternate MAC address, and we want to treat it the
+ /* We have a valid alternate MAC address, and we want to treat it the
* same as the normal permanent MAC address stored by the HW into the
* RAR. Do this by mapping this address into RAR0.
*/
@@ -309,8 +307,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
- /*
- * HW expects these in little endian so we reverse the byte order
+ /* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32) addr[0] |
@@ -323,8 +320,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
if (rar_low || rar_high)
rar_high |= E1000_RAH_AV;
- /*
- * Some bridges will combine consecutive 32-bit writes into
+ /* Some bridges will combine consecutive 32-bit writes into
* a single burst write, which will malfunction on some parts.
* The flushes avoid this.
*/
@@ -348,8 +344,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg, mta;
- /*
- * The MTA is a register array of 32-bit registers. It is
+ /* The MTA is a register array of 32-bit registers. It is
* treated like an array of (32*mta_reg_count) bits. We want to
* set bit BitArray[hash_value]. So we figure out what register
* the bit is in, read it, OR in the new bit, then write
@@ -386,15 +381,13 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
- /*
- * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
- /*
- * The portion of the address that is used for the hash table
+ /* The portion of the address that is used for the hash table
* is determined by the mc_filter_type setting.
* The algorithm is such that there is a total of 8 bits of shifting.
* The bit_shift for a mc_filter_type of 0 represents the number of
@@ -536,8 +529,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
s32 ret_val;
bool link;
- /*
- * We only want to go out to the PHY registers to see if Auto-Neg
+ /* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
@@ -547,8 +539,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
goto out;
}
- /*
- * First we want to see if the MII Status Register reports
+ /* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
@@ -561,14 +552,12 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
mac->get_link_status = false;
- /*
- * Check if there was DownShift, must be checked
+ /* Check if there was DownShift, must be checked
* immediately after link-up
*/
igb_check_downshift(hw);
- /*
- * If we are forcing speed/duplex, then we simply return since
+ /* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
if (!mac->autoneg) {
@@ -576,15 +565,13 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
goto out;
}
- /*
- * Auto-Neg is enabled. Auto Speed Detection takes care
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
igb_config_collision_dist(hw);
- /*
- * Configure Flow Control now that Auto-Neg has completed.
+ /* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
@@ -611,15 +598,13 @@ s32 igb_setup_link(struct e1000_hw *hw)
{
s32 ret_val = 0;
- /*
- * In the case of the phy reset being blocked, we already have a link.
+ /* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
if (igb_check_reset_block(hw))
goto out;
- /*
- * If requested flow control is set to default, set flow control
+ /* If requested flow control is set to default, set flow control
* based on the EEPROM flow control settings.
*/
if (hw->fc.requested_mode == e1000_fc_default) {
@@ -628,8 +613,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
goto out;
}
- /*
- * We want to save off the original Flow Control configuration just
+ /* We want to save off the original Flow Control configuration just
* in case we get disconnected and then reconnected into a different
* hub or switch with different Flow Control capabilities.
*/
@@ -642,8 +626,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Initialize the flow control address, type, and PAUSE timer
+ /* Initialize the flow control address, type, and PAUSE timer
* registers to their default values. This is done even if flow
* control is disabled, because it does not hurt anything to
* initialize these registers.
@@ -696,16 +679,14 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
s32 ret_val = 0;
u32 fcrtl = 0, fcrth = 0;
- /*
- * Set the flow control receive threshold registers. Normally,
+ /* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
* adjusted later by the driver's runtime code. However, if the
* ability to transmit pause frames is not enabled, then these
* registers will be set to 0.
*/
if (hw->fc.current_mode & e1000_fc_tx_pause) {
- /*
- * We need to set up the Receive Threshold high and low water
+ /* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of
* XON frames.
*/
@@ -733,8 +714,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
s32 ret_val = 0;
u16 nvm_data;
- /*
- * Read and store word 0x0F of the EEPROM. This word contains bits
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
* a bit that determines whether the HW defaults to enabling or
* disabling auto-negotiation, and the direction of the
@@ -778,8 +758,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
ctrl = rd32(E1000_CTRL);
- /*
- * Because we didn't get link via the internal auto-negotiation
+ /* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY
* auto-neg), we have to manually enable/disable transmit an
* receive flow control.
@@ -843,8 +822,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
- /*
- * Check for the case where we have fiber media and auto-neg failed
+ /* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
@@ -861,15 +839,13 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
}
- /*
- * Check for the case where we have copper media and auto-neg is
+ /* Check for the case where we have copper media and auto-neg is
* enabled. In this case, we need to check and see if Auto-Neg
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
- /*
- * Read the MII Status Register and check to see if AutoNeg
+ /* Read the MII Status Register and check to see if AutoNeg
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
@@ -888,8 +864,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
}
- /*
- * The AutoNeg process has completed, so we now need to
+ /* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement
* Register (Address 4) and the Auto_Negotiation Base
* Page Ability Register (Address 5) to determine how
@@ -904,8 +879,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Two bits in the Auto Negotiation Advertisement Register
+ /* Two bits in the Auto Negotiation Advertisement Register
* (Address 4) and two bits in the Auto Negotiation Base
* Page Ability Register (Address 5) determine flow control
* for both the PHY and the link partner. The following
@@ -940,8 +914,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /*
- * Now we need to check if the user selected RX ONLY
+ /* Now we need to check if the user selected RX ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise RX
* ONLY. Hence, we must now check to see if we need to
@@ -956,8 +929,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
"RX PAUSE frames only.\r\n");
}
}
- /*
- * For receiving PAUSE frames ONLY.
+ /* For receiving PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -971,8 +943,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
hw->fc.current_mode = e1000_fc_tx_pause;
hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
}
- /*
- * For transmitting PAUSE frames ONLY.
+ /* For transmitting PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -986,8 +957,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
hw->fc.current_mode = e1000_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
}
- /*
- * Per the IEEE spec, at this point flow control should be
+ /* Per the IEEE spec, at this point flow control should be
* disabled. However, we want to consider that we could
* be connected to a legacy switch that doesn't advertise
* desired flow control, but can be forced on the link
@@ -1007,9 +977,9 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
* be asked to delay transmission of packets than asking
* our link partner to pause transmission of frames.
*/
- else if ((hw->fc.requested_mode == e1000_fc_none ||
- hw->fc.requested_mode == e1000_fc_tx_pause) ||
- hw->fc.strict_ieee) {
+ else if ((hw->fc.requested_mode == e1000_fc_none) ||
+ (hw->fc.requested_mode == e1000_fc_tx_pause) ||
+ (hw->fc.strict_ieee)) {
hw->fc.current_mode = e1000_fc_none;
hw_dbg("Flow Control = NONE.\r\n");
} else {
@@ -1017,8 +987,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
}
- /*
- * Now we need to do one last check... If we auto-
+ /* Now we need to do one last check... If we auto-
* negotiated to HALF DUPLEX, flow control should not be
* enabled per IEEE 802.3 spec.
*/
@@ -1031,8 +1000,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
if (duplex == HALF_DUPLEX)
hw->fc.current_mode = e1000_fc_none;
- /*
- * Now we call a subroutine to actually force the MAC
+ /* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
ret_val = igb_force_mac_fc(hw);
@@ -1203,6 +1171,17 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
hw_dbg("Half Duplex\n");
}
+ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (hw->mac.type == e1000_i354) {
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+ hw_dbg("2500 Mbs, ");
+ hw_dbg("Full Duplex\n");
+ }
+ }
+
return 0;
}
@@ -1427,8 +1406,7 @@ s32 igb_blink_led(struct e1000_hw *hw)
u32 ledctl_blink = 0;
u32 i;
- /*
- * set the blink bit for each LED that's "on" (0x0E)
+ /* set the blink bit for each LED that's "on" (0x0E)
* in ledctl_mode2
*/
ledctl_blink = hw->mac.ledctl_mode2;
@@ -1467,7 +1445,7 @@ s32 igb_led_off(struct e1000_hw *hw)
* @hw: pointer to the HW structure
*
* Returns 0 (0) if successful, else returns -10
- * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
+ * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
* the master requests to be disabled.
*
* Disables PCI-Express master access and verifies there are no pending
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e6d6ce433261..5e13e83cc608 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -35,8 +35,7 @@
#include "e1000_defines.h"
#include "e1000_i210.h"
-/*
- * Functions that should not be called directly from drivers but can be used
+/* Functions that should not be called directly from drivers but can be used
* by other files in this 'shared code'
*/
s32 igb_blink_led(struct e1000_hw *hw);
@@ -49,15 +48,15 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw);
s32 igb_get_bus_info_pcie(struct e1000_hw *hw);
s32 igb_get_hw_semaphore(struct e1000_hw *hw);
s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
- u16 *duplex);
+ u16 *duplex);
s32 igb_id_led_init(struct e1000_hw *hw);
s32 igb_led_off(struct e1000_hw *hw);
void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count);
+ u8 *mc_addr_list, u32 mc_addr_count);
s32 igb_setup_link(struct e1000_hw *hw);
s32 igb_validate_mdi_setting(struct e1000_hw *hw);
s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
- u32 offset, u8 data);
+ u32 offset, u8 data);
void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
void igb_clear_vfta(struct e1000_hw *hw);
@@ -80,12 +79,12 @@ enum e1000_mng_mode {
e1000_mng_mode_host_if_only
};
-#define E1000_FACTPS_MNGCG 0x20000000
+#define E1000_FACTPS_MNGCG 0x20000000
-#define E1000_FWSM_MODE_MASK 0xE
-#define E1000_FWSM_MODE_SHIFT 1
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 38e0df350904..dac1447fabf7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -196,7 +196,8 @@ out:
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
-static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
@@ -222,7 +223,8 @@ out:
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
-static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
@@ -325,7 +327,6 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
s32 ret_val = -E1000_ERR_MBX;
u32 p2v_mailbox;
-
/* Take ownership of the buffer */
wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
@@ -347,7 +348,7 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
@@ -388,7 +389,7 @@ out_no_write:
* a message due to a VF request so no polling for message is needed.
**/
static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index c13b56d9edb2..de9bba41acf3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -30,42 +30,42 @@
#include "e1000_hw.h"
-#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
-#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
-#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
-#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
-#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
-#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
-#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
-#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
-#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
-#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is E1000_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
-#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
-#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
-#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- clear to send requests */
-#define E1000_VT_MSGINFO_SHIFT 16
+/* Messages below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS 0x20000000
+#define E1000_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
-#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_RESET 0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
-#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
-#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
-#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
-#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
-#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
-#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 5b62adbe134d..7f9cd7cbd353 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -289,15 +289,14 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
udelay(1);
timeout = NVM_MAX_RETRY_SPI;
- /*
- * Read "Status Register" repeatedly until the LSB is cleared.
+ /* Read "Status Register" repeatedly until the LSB is cleared.
* The EEPROM will signal that the command has been completed
* by clearing bit 0 of the internal status register. If it's
* not cleared within 'timeout', then error out.
*/
while (timeout) {
igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
- hw->nvm.opcode_bits);
+ hw->nvm.opcode_bits);
spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
break;
@@ -335,8 +334,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
u16 word_in;
u8 read_opcode = NVM_READ_OPCODE_SPI;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -363,8 +361,7 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
- /*
- * Read the data. SPI NVMs increment the address with each byte
+ /* Read the data. SPI NVMs increment the address with each byte
* read and will roll over if reading beyond the end. This allows
* us to read the whole NVM from any offset
*/
@@ -395,8 +392,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
u32 i, eerd = 0;
s32 ret_val = 0;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -408,7 +404,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
for (i = 0; i < words; i++) {
eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
- E1000_NVM_RW_REG_START;
+ E1000_NVM_RW_REG_START;
wr32(E1000_EERD, eerd);
ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
@@ -441,8 +437,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -472,8 +467,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
igb_standby_nvm(hw);
- /*
- * Some SPI eeproms use the 8th address bit embedded in the
+ /* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((nvm->address_bits == 8) && (offset >= 128))
@@ -538,8 +532,7 @@ s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
goto out;
}
- /*
- * if nvm_data is not ptr guard the PBA must be in legacy format which
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
* means pointer is actually our second data word for the PBA number
* and we can decode it into an ascii string
*/
@@ -728,6 +721,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
case e1000_82575:
case e1000_82576:
case e1000_82580:
+ case e1000_i354:
case e1000_i350:
case e1000_i210:
break;
@@ -746,6 +740,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
switch (hw->mac.type) {
case e1000_i210:
+ case e1000_i354:
case e1000_i350:
/* find combo image version */
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 2918c979b5bb..115b0da6e013 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -33,29 +33,29 @@
static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
- u16 *phy_ctrl);
+ u16 *phy_ctrl);
static s32 igb_wait_autoneg(struct e1000_hw *hw);
static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
/* Cable length tables */
-static const u16 e1000_m88_cable_length_table[] =
- { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+static const u16 e1000_m88_cable_length_table[] = {
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_m88_cable_length_table) / \
- sizeof(e1000_m88_cable_length_table[0]))
-
-static const u16 e1000_igp_2_cable_length_table[] =
- { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
- 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
- 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
- 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
- 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
- 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
- 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
- 104, 109, 114, 118, 121, 124};
+ (sizeof(e1000_m88_cable_length_table) / \
+ sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+ 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+ 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+ 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+ 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+ 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+ 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+ 104, 109, 114, 118, 121, 124};
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_igp_2_cable_length_table) / \
- sizeof(e1000_igp_2_cable_length_table[0]))
+ (sizeof(e1000_igp_2_cable_length_table) / \
+ sizeof(e1000_igp_2_cable_length_table[0]))
/**
* igb_check_reset_block - Check if PHY reset is blocked
@@ -71,8 +71,7 @@ s32 igb_check_reset_block(struct e1000_hw *hw)
manc = rd32(E1000_MANC);
- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
- E1000_BLK_PHY_RESET : 0;
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
}
/**
@@ -149,8 +148,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
goto out;
}
- /*
- * Set up Op-code, Phy Address, and register offset in the MDI
+ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
@@ -160,8 +158,7 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
wr32(E1000_MDIC, mdic);
- /*
- * Poll the ready bit to see if the MDI read completed
+ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
@@ -207,8 +204,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
goto out;
}
- /*
- * Set up Op-code, Phy Address, and register offset in the MDI
+ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
@@ -219,8 +215,7 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
wr32(E1000_MDIC, mdic);
- /*
- * Poll the ready bit to see if the MDI read completed
+ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
@@ -259,15 +254,13 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
struct e1000_phy_info *phy = &hw->phy;
u32 i, i2ccmd = 0;
-
- /*
- * Set up Op-code, Phy Address, and register address in the I2CCMD
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
* register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
- (E1000_I2CCMD_OPCODE_READ));
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ (E1000_I2CCMD_OPCODE_READ));
wr32(E1000_I2CCMD, i2ccmd);
@@ -317,15 +310,14 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
/* Swap the data bytes for the I2C interface */
phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
- /*
- * Set up Op-code, Phy Address, and register address in the I2CCMD
+ /* Set up Op-code, Phy Address, and register address in the I2CCMD
* register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_WRITE |
- phy_data_swapped);
+ (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE |
+ phy_data_swapped);
wr32(E1000_I2CCMD, i2ccmd);
@@ -371,8 +363,8 @@ s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
if (offset > MAX_PHY_MULTI_PAGE_REG) {
ret_val = igb_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (u16)offset);
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
if (ret_val) {
hw->phy.ops.release(hw);
goto out;
@@ -410,8 +402,8 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
if (offset > MAX_PHY_MULTI_PAGE_REG) {
ret_val = igb_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (u16)offset);
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
if (ret_val) {
hw->phy.ops.release(hw);
goto out;
@@ -419,7 +411,7 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
}
ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ data);
hw->phy.ops.release(hw);
@@ -439,7 +431,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
-
if (phy->reset_disable) {
ret_val = 0;
goto out;
@@ -472,8 +463,7 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
if (ret_val)
goto out;
phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
- /*
- * Options:
+ /* Options:
* 0 - Auto (default)
* 1 - MDI mode
* 2 - MDI-X mode
@@ -520,8 +510,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
- /*
- * Options:
+ /* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
@@ -546,8 +535,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
break;
}
- /*
- * Options:
+ /* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
@@ -562,12 +550,11 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
goto out;
if (phy->revision < E1000_REVISION_4) {
- /*
- * Force TX_CLK in the Extended PHY Specific Control Register
+ /* Force TX_CLK in the Extended PHY Specific Control Register
* to 25MHz clock.
*/
ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- &phy_data);
+ &phy_data);
if (ret_val)
goto out;
@@ -630,8 +617,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Options:
+ /* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
@@ -659,8 +645,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
break;
}
- /*
- * Options:
+ /* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
@@ -714,14 +699,12 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
goto out;
}
- /*
- * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
* timeout issues when LFS is enabled.
*/
msleep(100);
- /*
- * The NVM settings will configure LPLU in D3 for
+ /* The NVM settings will configure LPLU in D3 for
* non-IGP1 PHYs.
*/
if (phy->type == e1000_phy_igp) {
@@ -765,8 +748,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
/* set auto-master slave resolution settings */
if (hw->mac.autoneg) {
- /*
- * when autonegotiation advertisement is only 1000Mbps then we
+ /* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
* resolution as hardware default.
*/
@@ -844,14 +826,12 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_ctrl;
- /*
- * Perform some bounds checking on the autoneg advertisement
+ /* Perform some bounds checking on the autoneg advertisement
* parameter.
*/
phy->autoneg_advertised &= phy->autoneg_mask;
- /*
- * If autoneg_advertised is zero, we assume it was not defaulted
+ /* If autoneg_advertised is zero, we assume it was not defaulted
* by the calling code so we set to advertise full capability.
*/
if (phy->autoneg_advertised == 0)
@@ -865,8 +845,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
}
hw_dbg("Restarting Auto-Neg\n");
- /*
- * Restart auto-negotiation by setting the Auto Neg Enable bit and
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
@@ -878,8 +857,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Does the user want to wait for Auto-Neg to complete here, or
+ /* Does the user want to wait for Auto-Neg to complete here, or
* check at a later time (for example, callback routine).
*/
if (phy->autoneg_wait_to_complete) {
@@ -928,16 +906,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
goto out;
}
- /*
- * Need to parse both autoneg_advertised and fc and set up
+ /* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
- /*
- * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
@@ -983,8 +959,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
- /*
- * Check for a software override of the flow control settings, and
+ /* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
@@ -1003,15 +978,13 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
*/
switch (hw->fc.current_mode) {
case e1000_fc_none:
- /*
- * Flow control (RX & TX) is completely disabled by a
+ /* Flow control (RX & TX) is completely disabled by a
* software over-ride.
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_rx_pause:
- /*
- * RX Flow control is enabled, and TX Flow control is
+ /* RX Flow control is enabled, and TX Flow control is
* disabled, by a software over-ride.
*
* Since there really isn't a way to advertise that we are
@@ -1023,16 +996,14 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_tx_pause:
- /*
- * TX Flow control is enabled, and RX Flow control is
+ /* TX Flow control is enabled, and RX Flow control is
* disabled, by a software over-ride.
*/
mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
break;
case e1000_fc_full:
- /*
- * Flow control (both RX and TX) is enabled by a software
+ /* Flow control (both RX and TX) is enabled by a software
* over-ride.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@@ -1075,18 +1046,15 @@ s32 igb_setup_copper_link(struct e1000_hw *hw)
s32 ret_val;
bool link;
-
if (hw->mac.autoneg) {
- /*
- * Setup autoneg and flow control advertisement and perform
+ /* Setup autoneg and flow control advertisement and perform
* autonegotiation.
*/
ret_val = igb_copper_link_autoneg(hw);
if (ret_val)
goto out;
} else {
- /*
- * PHY will be set to 10H, 10F, 100H or 100F
+ /* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
hw_dbg("Forcing Speed and Duplex\n");
@@ -1097,14 +1065,10 @@ s32 igb_setup_copper_link(struct e1000_hw *hw)
}
}
- /*
- * Check link status. Wait up to 100 microseconds for link to become
+ /* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
- ret_val = igb_phy_has_link(hw,
- COPPER_LINK_UP_LIMIT,
- 10,
- &link);
+ ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
if (ret_val)
goto out;
@@ -1145,8 +1109,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@@ -1167,10 +1130,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
- ret_val = igb_phy_has_link(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
+ ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
if (ret_val)
goto out;
@@ -1178,10 +1138,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
hw_dbg("Link taking longer than expected.\n");
/* Try once more */
- ret_val = igb_phy_has_link(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
+ ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
if (ret_val)
goto out;
}
@@ -1209,8 +1166,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
/* I210 and I211 devices support Auto-Crossover in forced operation. */
if (phy->type != e1000_phy_i210) {
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000
+ /* Clear Auto-Crossover to force MDI manually. M88E1000
* requires MDI forced whenever speed and duplex are forced.
*/
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
@@ -1266,13 +1222,12 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (!reset_dsp)
hw_dbg("Link taking longer than expected.\n");
else {
- /*
- * We didn't get link.
+ /* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = phy->ops.write_reg(hw,
- M88E1000_PHY_PAGE_SELECT,
- 0x001d);
+ M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
if (ret_val)
goto out;
ret_val = igb_phy_reset_dsp(hw);
@@ -1298,8 +1253,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Resetting the phy means we need to re-force TX_CLK in the
+ /* Resetting the phy means we need to re-force TX_CLK in the
* Extended PHY Specific Control Register to 25MHz clock from
* the reset value of 2.5MHz.
*/
@@ -1308,8 +1262,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * In addition, we must re-enable CRS on Tx for both half and full
+ /* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1336,7 +1289,7 @@ out:
* take affect.
**/
static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
- u16 *phy_ctrl)
+ u16 *phy_ctrl)
{
struct e1000_mac_info *mac = &hw->mac;
u32 ctrl;
@@ -1417,8 +1370,7 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
data);
if (ret_val)
goto out;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -1461,13 +1413,13 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- &data);
+ &data);
if (ret_val)
goto out;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- data);
+ data);
}
out:
@@ -1556,8 +1508,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
s32 ret_val;
u16 data, offset, mask;
- /*
- * Polarity is determined based on the speed of
+ /* Polarity is determined based on the speed of
* our connection.
*/
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
@@ -1569,8 +1520,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
offset = IGP01E1000_PHY_PCS_INIT_REG;
mask = IGP01E1000_PHY_POLARITY_MASK;
} else {
- /*
- * This really only applies to 10Mbps since
+ /* This really only applies to 10Mbps since
* there is no polarity for 100Mbps (always 0).
*/
offset = IGP01E1000_PHY_PORT_STATUS;
@@ -1589,7 +1539,7 @@ out:
}
/**
- * igb_wait_autoneg - Wait for auto-neg compeletion
+ * igb_wait_autoneg - Wait for auto-neg completion
* @hw: pointer to the HW structure
*
* Waits for auto-negotiation to complete or for the auto-negotiation time
@@ -1613,8 +1563,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
msleep(100);
}
- /*
- * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
* has completed.
*/
return ret_val;
@@ -1630,21 +1579,19 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
* Polls the PHY status register for link, 'iterations' number of times.
**/
s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success)
+ u32 usec_interval, bool *success)
{
s32 ret_val = 0;
u16 i, phy_status;
for (i = 0; i < iterations; i++) {
- /*
- * Some PHYs require the PHY_STATUS register to be read
+ /* Some PHYs require the PHY_STATUS register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
- if (ret_val) {
- /*
- * If the first read fails, another entity may have
+ if (ret_val && usec_interval > 0) {
+ /* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
@@ -1735,6 +1682,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
phy->cable_length = phy_data / (is_cm ? 100 : 1);
break;
+ case M88E1545_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -1834,10 +1782,10 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
- IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
};
/* Read the AGC registers for all channels */
@@ -1846,8 +1794,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Getting bits 15:9, which represent the combination of
+ /* Getting bits 15:9, which represent the combination of
* coarse and fine gain values. The result is a number
* that can be put into the lookup table to obtain the
* approximate cable length.
@@ -2167,15 +2114,13 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
/* Change cg_icount + enable integbp for channels BCD */
hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
- /*
- * Change cg_icount + enable integbp + change prop_factor_master
+ /* Change cg_icount + enable integbp + change prop_factor_master
* to 8 for channel A
*/
hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
/* Disable AHT in Slave mode on channel A */
hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
- /*
- * Enable LPLU and disable AN to 1000 in non-D0a states,
+ /* Enable LPLU and disable AN to 1000 in non-D0a states,
* Enable SPD+B2B
*/
hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
@@ -2257,8 +2202,8 @@ static s32 igb_check_polarity_82580(struct e1000_hw *hw)
if (!ret_val)
phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal;
return ret_val;
}
@@ -2278,7 +2223,6 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
u16 phy_data;
bool link;
-
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
goto out;
@@ -2289,8 +2233,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Clear Auto-Crossover to force MDI manually. 82580 requires MDI
+ /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
@@ -2310,10 +2253,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
- ret_val = igb_phy_has_link(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
+ ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
if (ret_val)
goto out;
@@ -2321,10 +2261,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
hw_dbg("Link taking longer than expected.\n");
/* Try once more */
- ret_val = igb_phy_has_link(hw,
- PHY_FORCE_LIMIT,
- 100000,
- &link);
+ ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
if (ret_val)
goto out;
}
@@ -2349,7 +2286,6 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw)
u16 data;
bool link;
-
ret_val = igb_phy_has_link(hw, 1, 0, &link);
if (ret_val)
goto out;
@@ -2383,12 +2319,12 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw)
goto out;
phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ ? e1000_1000t_rx_status_ok
+ : e1000_1000t_rx_status_not_ok;
} else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined;
@@ -2412,13 +2348,12 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data, length;
-
ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
if (ret_val)
goto out;
length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
- I82580_DSTATUS_CABLE_LENGTH_SHIFT;
+ I82580_DSTATUS_CABLE_LENGTH_SHIFT;
if (length == E1000_CABLE_LENGTH_UNDEFINED)
ret_val = -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 15343286082e..82632c6c53af 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -65,6 +65,7 @@
#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_LEDMUX 0x08130 /* LED MUX Control */
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
@@ -83,6 +84,9 @@
#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
+#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
+#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
+#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
/* IEEE 1588 TIMESYNCH */
#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
@@ -117,21 +121,21 @@
#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
/* DMA Coalescing registers */
-#define E1000_DMACR 0x02508 /* Control Register */
-#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
-#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
-#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
-#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
-#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+#define E1000_DMACR 0x02508 /* Control Register */
+#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
/* TX Rate Limit Registers */
-#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
-#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
-#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
+#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
+#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
/* Split and Replication RX Control - RW */
-#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
/* Thermal sensor configuration and status registers */
#define E1000_THMJT 0x08100 /* Junction Temperature */
@@ -140,8 +144,7 @@
#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
-/*
- * Convenience macros
+/* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
@@ -287,7 +290,7 @@
#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
#define E1000_RA 0x05400 /* Receive Address - RW Array */
-#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
+#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
(0x054E0 + ((_i - 16) * 8)))
@@ -360,21 +363,25 @@
(readl(hw->hw_addr + reg + ((offset) << 2)))
/* DMA Coalescing registers */
-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
/* Energy Efficient Ethernet "EEE" register */
-#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
-#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
-#define E1000_EEE_SU 0X0E34 /* EEE Setup */
+#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
+#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
+#define E1000_EEE_SU 0X0E34 /* EEE Setup */
+#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
+#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
+#define E1000_MMDAC 13 /* MMD Access Control */
+#define E1000_MMDAAD 14 /* MMD Access Address/Data */
/* Thermal Sensor Register */
-#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
/* OS2BMC Registers */
-#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
-#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
-#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
-#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
+#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
#define E1000_I210_FLMNGCTL 0x12038
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 25151401c2ab..9d6c075e232d 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -44,54 +44,54 @@
struct igb_adapter;
-#define E1000_PCS_CFG_IGN_SD 1
+#define E1000_PCS_CFG_IGN_SD 1
/* Interrupt defines */
-#define IGB_START_ITR 648 /* ~6000 ints/sec */
-#define IGB_4K_ITR 980
-#define IGB_20K_ITR 196
-#define IGB_70K_ITR 56
+#define IGB_START_ITR 648 /* ~6000 ints/sec */
+#define IGB_4K_ITR 980
+#define IGB_20K_ITR 196
+#define IGB_70K_ITR 56
/* TX/RX descriptor defines */
-#define IGB_DEFAULT_TXD 256
-#define IGB_DEFAULT_TX_WORK 128
-#define IGB_MIN_TXD 80
-#define IGB_MAX_TXD 4096
+#define IGB_DEFAULT_TXD 256
+#define IGB_DEFAULT_TX_WORK 128
+#define IGB_MIN_TXD 80
+#define IGB_MAX_TXD 4096
-#define IGB_DEFAULT_RXD 256
-#define IGB_MIN_RXD 80
-#define IGB_MAX_RXD 4096
+#define IGB_DEFAULT_RXD 256
+#define IGB_MIN_RXD 80
+#define IGB_MAX_RXD 4096
-#define IGB_DEFAULT_ITR 3 /* dynamic */
-#define IGB_MAX_ITR_USECS 10000
-#define IGB_MIN_ITR_USECS 10
-#define NON_Q_VECTORS 1
-#define MAX_Q_VECTORS 8
+#define IGB_DEFAULT_ITR 3 /* dynamic */
+#define IGB_MAX_ITR_USECS 10000
+#define IGB_MIN_ITR_USECS 10
+#define NON_Q_VECTORS 1
+#define MAX_Q_VECTORS 8
/* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES 8
-#define IGB_MAX_RX_QUEUES_82575 4
-#define IGB_MAX_RX_QUEUES_I211 2
-#define IGB_MAX_TX_QUEUES 8
-#define IGB_MAX_VF_MC_ENTRIES 30
-#define IGB_MAX_VF_FUNCTIONS 8
-#define IGB_MAX_VFTA_ENTRIES 128
-#define IGB_82576_VF_DEV_ID 0x10CA
-#define IGB_I350_VF_DEV_ID 0x1520
+#define IGB_MAX_RX_QUEUES 8
+#define IGB_MAX_RX_QUEUES_82575 4
+#define IGB_MAX_RX_QUEUES_I211 2
+#define IGB_MAX_TX_QUEUES 8
+#define IGB_MAX_VF_MC_ENTRIES 30
+#define IGB_MAX_VF_FUNCTIONS 8
+#define IGB_MAX_VFTA_ENTRIES 128
+#define IGB_82576_VF_DEV_ID 0x10CA
+#define IGB_I350_VF_DEV_ID 0x1520
/* NVM version defines */
-#define IGB_MAJOR_MASK 0xF000
-#define IGB_MINOR_MASK 0x0FF0
-#define IGB_BUILD_MASK 0x000F
-#define IGB_COMB_VER_MASK 0x00FF
-#define IGB_MAJOR_SHIFT 12
-#define IGB_MINOR_SHIFT 4
-#define IGB_COMB_VER_SHFT 8
-#define IGB_NVM_VER_INVALID 0xFFFF
-#define IGB_ETRACK_SHIFT 16
-#define NVM_ETRACK_WORD 0x0042
-#define NVM_COMB_VER_OFF 0x0083
-#define NVM_COMB_VER_PTR 0x003d
+#define IGB_MAJOR_MASK 0xF000
+#define IGB_MINOR_MASK 0x0FF0
+#define IGB_BUILD_MASK 0x000F
+#define IGB_COMB_VER_MASK 0x00FF
+#define IGB_MAJOR_SHIFT 12
+#define IGB_MINOR_SHIFT 4
+#define IGB_COMB_VER_SHFT 8
+#define IGB_NVM_VER_INVALID 0xFFFF
+#define IGB_ETRACK_SHIFT 16
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -103,6 +103,7 @@ struct vf_data_storage {
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
+ bool spoofchk_enabled;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -121,14 +122,14 @@ struct vf_data_storage {
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
-#define IGB_RX_PTHRESH 8
-#define IGB_RX_HTHRESH 8
-#define IGB_TX_PTHRESH 8
-#define IGB_TX_HTHRESH 1
-#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 4)
-#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 16)
+#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
+#define IGB_RX_HTHRESH 8
+#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
+#define IGB_TX_HTHRESH 1
+#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
+ adapter->msix_entries) ? 1 : 4)
+#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
+ adapter->msix_entries) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -140,17 +141,17 @@ struct vf_data_storage {
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define AUTO_ALL_MODES 0
-#define IGB_EEPROM_APME 0x0400
+#define AUTO_ALL_MODES 0
+#define IGB_EEPROM_APME 0x0400
#ifndef IGB_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define IGB_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define IGB_MNG_VLAN_NONE -1
+#define IGB_MNG_VLAN_NONE -1
enum igb_tx_flags {
/* cmd_type flags */
@@ -164,11 +165,10 @@ enum igb_tx_flags {
};
/* VLAN info */
-#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
-/*
- * The largest size we can write to the descriptor is 65535. In order to
+/* The largest size we can write to the descriptor is 65535. In order to
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
@@ -178,8 +178,17 @@ enum igb_tx_flags {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+/* EEPROM byte offsets */
+#define IGB_SFF_8472_SWAP 0x5C
+#define IGB_SFF_8472_COMP 0x5E
+
+/* Bitmasks */
+#define IGB_SFF_ADDRESSING_MODE 0x4
+#define IGB_SFF_8472_UNSUP 0x00
+
/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
+ * so a DMA handle can be stored along with the buffer
+ */
struct igb_tx_buffer {
union e1000_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
@@ -284,25 +293,17 @@ struct igb_q_vector {
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
- IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
-#define ring_uses_build_skb(ring) \
- test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-#define set_ring_build_skb_enabled(ring) \
- set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-#define clear_ring_build_skb_enabled(ring) \
- clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
-#define IGB_RX_DESC(R, i) \
+#define IGB_RX_DESC(R, i) \
(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
-#define IGB_TX_DESC(R, i) \
+#define IGB_TX_DESC(R, i) \
(&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
-#define IGB_TX_CTXTDESC(R, i) \
+#define IGB_TX_CTXTDESC(R, i) \
(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
@@ -461,12 +462,12 @@ struct igb_adapter {
#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
/* DMA Coalescing defines */
-#define IGB_MIN_TXPBSIZE 20408
-#define IGB_TX_BUF_4096 4096
-#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
+#define IGB_MIN_TXPBSIZE 20408
+#define IGB_TX_BUF_4096 4096
+#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
-#define IGB_82576_TSYNC_SHIFT 19
-#define IGB_TS_HDR_LEN 16
+#define IGB_82576_TSYNC_SHIFT 19
+#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a3830a8ba4c1..7876240fa74e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -38,6 +38,7 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/highmem.h>
+#include <linux/mdio.h>
#include "igb.h"
@@ -178,44 +179,67 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_TP;
ecmd->phy_address = hw->phy.addr;
+ ecmd->transceiver = XCVR_INTERNAL;
} else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause);
+ if (hw->mac.type == e1000_i354)
+ ecmd->supported |= SUPPORTED_2500baseX_Full;
+
+ ecmd->advertising = ADVERTISED_FIBRE;
+
+ switch (adapter->link_speed) {
+ case SPEED_2500:
+ ecmd->advertising = ADVERTISED_2500baseX_Full;
+ break;
+ case SPEED_1000:
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ break;
+ case SPEED_100:
+ ecmd->advertising = ADVERTISED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg |
- ADVERTISED_Pause);
+ if (hw->mac.autoneg == 1)
+ ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
}
- ecmd->transceiver = XCVR_INTERNAL;
-
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU) {
-
- if ((status & E1000_STATUS_SPEED_1000) ||
- hw->phy.media_type != e1000_media_type_copper)
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ if ((hw->mac.type == e1000_i354) &&
+ (status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER))
+ ecmd->speed = SPEED_2500;
+ else if (status & E1000_STATUS_SPEED_1000)
+ ecmd->speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ ecmd->speed = SPEED_100;
else
- ethtool_cmd_speed_set(ecmd, SPEED_10);
-
+ ecmd->speed = SPEED_10;
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->speed = -1;
ecmd->duplex = -1;
}
- ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ if ((hw->phy.media_type == e1000_media_type_fiber) ||
+ hw->mac.autoneg)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if (hw->phy.media_type == e1000_media_type_copper)
@@ -238,15 +262,15 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_hw *hw = &adapter->hw;
/* When SoL/IDER sessions are active, autoneg/speed/duplex
- * cannot be changed */
+ * cannot be changed
+ */
if (igb_check_reset_block(hw)) {
dev_err(&adapter->pdev->dev,
"Cannot change link characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
- /*
- * MDI setting is only allowed when autoneg enabled because
+ /* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@@ -266,9 +290,31 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
- hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ switch (adapter->link_speed) {
+ case SPEED_2500:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_2500baseX_Full;
+ break;
+ case SPEED_1000:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_1000baseT_Full;
+ break;
+ case SPEED_100:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+ } else {
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ }
ecmd->advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
@@ -283,8 +329,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* MDI-X => 2; MDI => 1; Auto => 3 */
if (ecmd->eth_tp_mdix_ctrl) {
- /*
- * fix up the value for auto (3 => 0) as zero is mapped
+ /* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
@@ -309,8 +354,7 @@ static u32 igb_get_link(struct net_device *netdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_mac_info *mac = &adapter->hw.mac;
- /*
- * If the link is not reported up to netdev, interrupts are disabled,
+ /* If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly
@@ -430,7 +474,8 @@ static void igb_get_regs(struct net_device *netdev,
/* Interrupt */
/* Reading EICS for EICR because they read the
- * same but EICS does not clear on read */
+ * same but EICS does not clear on read
+ */
regs_buff[13] = rd32(E1000_EICS);
regs_buff[14] = rd32(E1000_EICS);
regs_buff[15] = rd32(E1000_EIMS);
@@ -438,7 +483,8 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[17] = rd32(E1000_EIAC);
regs_buff[18] = rd32(E1000_EIAM);
/* Reading ICS for ICR because they read the
- * same but ICS does not clear on read */
+ * same but ICS does not clear on read
+ */
regs_buff[19] = rd32(E1000_ICS);
regs_buff[20] = rd32(E1000_ICS);
regs_buff[21] = rd32(E1000_IMS);
@@ -688,12 +734,12 @@ static int igb_get_eeprom(struct net_device *netdev,
if (hw->nvm.type == e1000_nvm_eeprom_spi)
ret_val = hw->nvm.ops.read(hw, first_word,
- last_word - first_word + 1,
- eeprom_buff);
+ last_word - first_word + 1,
+ eeprom_buff);
else {
for (i = 0; i < last_word - first_word + 1; i++) {
ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
- &eeprom_buff[i]);
+ &eeprom_buff[i]);
if (ret_val)
break;
}
@@ -740,15 +786,17 @@ static int igb_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
+ /* need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
ret_val = hw->nvm.ops.read(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
+ /* need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
@@ -763,10 +811,11 @@ static int igb_set_eeprom(struct net_device *netdev,
eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
ret_val = hw->nvm.ops.write(hw, first_word,
- last_word - first_word + 1, eeprom_buff);
+ last_word - first_word + 1, eeprom_buff);
/* Update the checksum over the first part of the EEPROM if needed
- * and flush shadow RAM for 82573 controllers */
+ * and flush shadow RAM for 82573 controllers
+ */
if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
hw->nvm.ops.update(hw);
@@ -783,8 +832,7 @@ static void igb_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
- /*
- * EEPROM image version # is reported as firmware version # for
+ /* EEPROM image version # is reported as firmware version # for
* 82575 controllers
*/
strlcpy(drvinfo->fw_version, adapter->fw_version,
@@ -847,9 +895,11 @@ static int igb_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
+ temp_ring = vmalloc(adapter->num_tx_queues *
+ sizeof(struct igb_ring));
else
- temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
+ temp_ring = vmalloc(adapter->num_rx_queues *
+ sizeof(struct igb_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -858,10 +908,9 @@ static int igb_set_ringparam(struct net_device *netdev,
igb_down(adapter);
- /*
- * We can't just free everything and then setup again,
+ /* We can't just free everything and then setup again,
* because the ISRs in MSI-X mode get passed pointers
- * to the tx and rx ring structs.
+ * to the Tx and Rx ring structs.
*/
if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -1199,6 +1248,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
switch (adapter->hw.mac.type) {
case e1000_i350:
+ case e1000_i354:
test = reg_test_i350;
toggle = 0x7FEFF3FF;
break;
@@ -1361,6 +1411,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
ics_mask = 0x77DCFED5;
break;
case e1000_i350:
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
ics_mask = 0x77DCFED5;
@@ -1627,17 +1678,12 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
wr32(E1000_CONNSW, reg);
/* Unset sigdetect for SERDES loopback on
- * 82580 and i350 devices.
+ * 82580 and newer devices.
*/
- switch (hw->mac.type) {
- case e1000_82580:
- case e1000_i350:
+ if (hw->mac.type >= e1000_82580) {
reg = rd32(E1000_PCS_CFG0);
reg |= E1000_PCS_CFG_IGN_SD;
wr32(E1000_PCS_CFG0, reg);
- break;
- default:
- break;
}
/* Set PCS register for forced speed */
@@ -1723,8 +1769,8 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
}
static int igb_clean_test_rings(struct igb_ring *rx_ring,
- struct igb_ring *tx_ring,
- unsigned int size)
+ struct igb_ring *tx_ring,
+ unsigned int size)
{
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
@@ -1737,7 +1783,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
- /* check rx buffer */
+ /* check Rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* sync Rx buffer for CPU read */
@@ -1756,11 +1802,11 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);
- /* unmap buffer on tx side */
+ /* unmap buffer on Tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- /* increment rx/tx next to clean counters */
+ /* increment Rx/Tx next to clean counters */
rx_ntc++;
if (rx_ntc == rx_ring->count)
rx_ntc = 0;
@@ -1801,8 +1847,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
igb_create_lbtest_frame(skb, size);
skb_put(skb, size);
- /*
- * Calculate the loop count based on the largest descriptor ring
+ /* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
@@ -1829,7 +1874,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
break;
}
- /* allow 200 milliseconds for packets to go from tx to rx */
+ /* allow 200 milliseconds for packets to go from Tx to Rx */
msleep(200);
good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
@@ -1848,13 +1893,21 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
{
/* PHY loopback cannot be performed if SoL/IDER
- * sessions are active */
+ * sessions are active
+ */
if (igb_check_reset_block(&adapter->hw)) {
dev_err(&adapter->pdev->dev,
"Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
}
+
+ if (adapter->hw.mac.type == e1000_i354) {
+ dev_info(&adapter->pdev->dev,
+ "Loopback test not supported on i354.\n");
+ *data = 0;
+ goto out;
+ }
*data = igb_setup_desc_rings(adapter);
if (*data)
goto out;
@@ -1879,7 +1932,8 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
- * could take as long as 2-3 minutes */
+ * could take as long as 2-3 minutes
+ */
do {
hw->mac.ops.check_for_link(&adapter->hw);
if (hw->mac.serdes_has_link)
@@ -1922,7 +1976,8 @@ static void igb_diag_test(struct net_device *netdev,
igb_power_up_link(adapter);
/* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
+ * interfere with test result
+ */
if (igb_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1987,8 +2042,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct igb_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC |
- WAKE_PHY;
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
wol->wolopts = 0;
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
@@ -2263,7 +2318,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
sprintf(p, "rx_queue_%u_alloc_failed", i);
p += ETH_GSTRING_LEN;
}
-/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
+ /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
@@ -2283,6 +2338,7 @@ static int igb_get_ts_info(struct net_device *dev,
case e1000_82576:
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
info->so_timestamping =
@@ -2362,7 +2418,7 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
}
static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct igb_adapter *adapter = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -2506,7 +2562,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ipcnfg, eeer;
+ u32 ipcnfg, eeer, ret_val;
+ u16 phy_data;
if ((hw->mac.type < e1000_i350) ||
(hw->phy.media_type != e1000_media_type_copper))
@@ -2525,6 +2582,32 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
edata->advertised |= ADVERTISED_100baseT_Full;
+ /* EEE Link Partner Advertised */
+ switch (hw->mac.type) {
+ case e1000_i350:
+ ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
+ E1000_EEE_LP_ADV_DEV_I210,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ default:
+ break;
+ }
+
if (eeer & E1000_EEER_EEE_NEG)
edata->eee_active = true;
@@ -2600,6 +2683,85 @@ static int igb_set_eee(struct net_device *netdev,
return 0;
}
+static int igb_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 status = E1000_SUCCESS;
+ u16 sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ if ((hw->phy.media_type == e1000_media_type_copper) ||
+ (hw->phy.media_type == e1000_media_type_unknown))
+ return -EOPNOTSUPP;
+
+ /* Check whether we support SFF-8472 or not */
+ status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
+ if (status != E1000_SUCCESS)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
+ if (status != E1000_SUCCESS)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
+ hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
+ /* We have an SFP, but it does not support SFF-8472 */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have an SFP which supports a revision of SFF-8472 */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int igb_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 status = E1000_SUCCESS;
+ u16 *dataword;
+ u16 first_word, last_word;
+ int i = 0;
+
+ if (ee->len == 0)
+ return -EINVAL;
+
+ first_word = ee->offset >> 1;
+ last_word = (ee->offset + ee->len - 1) >> 1;
+
+ dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
+ if (!dataword)
+ return -ENOMEM;
+
+ /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
+ if (status != E1000_SUCCESS)
+ /* Error occurred while reading module */
+ return -EIO;
+
+ be16_to_cpus(&dataword[i]);
+ }
+
+ memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len);
+ kfree(dataword);
+
+ return 0;
+}
+
static int igb_ethtool_begin(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2614,36 +2776,38 @@ static void igb_ethtool_complete(struct net_device *netdev)
}
static const struct ethtool_ops igb_ethtool_ops = {
- .get_settings = igb_get_settings,
- .set_settings = igb_set_settings,
- .get_drvinfo = igb_get_drvinfo,
- .get_regs_len = igb_get_regs_len,
- .get_regs = igb_get_regs,
- .get_wol = igb_get_wol,
- .set_wol = igb_set_wol,
- .get_msglevel = igb_get_msglevel,
- .set_msglevel = igb_set_msglevel,
- .nway_reset = igb_nway_reset,
- .get_link = igb_get_link,
- .get_eeprom_len = igb_get_eeprom_len,
- .get_eeprom = igb_get_eeprom,
- .set_eeprom = igb_set_eeprom,
- .get_ringparam = igb_get_ringparam,
- .set_ringparam = igb_set_ringparam,
- .get_pauseparam = igb_get_pauseparam,
- .set_pauseparam = igb_set_pauseparam,
- .self_test = igb_diag_test,
- .get_strings = igb_get_strings,
- .set_phys_id = igb_set_phys_id,
- .get_sset_count = igb_get_sset_count,
- .get_ethtool_stats = igb_get_ethtool_stats,
- .get_coalesce = igb_get_coalesce,
- .set_coalesce = igb_set_coalesce,
- .get_ts_info = igb_get_ts_info,
+ .get_settings = igb_get_settings,
+ .set_settings = igb_set_settings,
+ .get_drvinfo = igb_get_drvinfo,
+ .get_regs_len = igb_get_regs_len,
+ .get_regs = igb_get_regs,
+ .get_wol = igb_get_wol,
+ .set_wol = igb_set_wol,
+ .get_msglevel = igb_get_msglevel,
+ .set_msglevel = igb_set_msglevel,
+ .nway_reset = igb_nway_reset,
+ .get_link = igb_get_link,
+ .get_eeprom_len = igb_get_eeprom_len,
+ .get_eeprom = igb_get_eeprom,
+ .set_eeprom = igb_set_eeprom,
+ .get_ringparam = igb_get_ringparam,
+ .set_ringparam = igb_set_ringparam,
+ .get_pauseparam = igb_get_pauseparam,
+ .set_pauseparam = igb_set_pauseparam,
+ .self_test = igb_diag_test,
+ .get_strings = igb_get_strings,
+ .set_phys_id = igb_set_phys_id,
+ .get_sset_count = igb_get_sset_count,
+ .get_ethtool_stats = igb_get_ethtool_stats,
+ .get_coalesce = igb_get_coalesce,
+ .set_coalesce = igb_set_coalesce,
+ .get_ts_info = igb_get_ts_info,
.get_rxnfc = igb_get_rxnfc,
.set_rxnfc = igb_set_rxnfc,
.get_eee = igb_get_eee,
.set_eee = igb_set_eee,
+ .get_module_info = igb_get_module_info,
+ .get_module_eeprom = igb_get_module_eeprom,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 0478a1abe541..58f1ce967aeb 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -45,21 +45,21 @@ static struct i2c_board_info i350_sensor_info = {
/* hwmon callback functions */
static ssize_t igb_hwmon_show_location(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
+ dev_attr);
return sprintf(buf, "loc%u\n",
igb_attr->sensor->location);
}
static ssize_t igb_hwmon_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
+ dev_attr);
unsigned int value;
/* reset the temp field */
@@ -74,11 +74,11 @@ static ssize_t igb_hwmon_show_temp(struct device *dev,
}
static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
+ dev_attr);
unsigned int value = igb_attr->sensor->caution_thresh;
/* display millidegree */
@@ -88,11 +88,11 @@ static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
}
static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
+ dev_attr);
unsigned int value = igb_attr->sensor->max_op_thresh;
/* display millidegree */
@@ -111,7 +111,8 @@ static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
* the data structures we need to get the data to display.
*/
static int igb_add_hwmon_attr(struct igb_adapter *adapter,
- unsigned int offset, int type) {
+ unsigned int offset, int type)
+{
int rc;
unsigned int n_attr;
struct hwmon_attr *igb_attr;
@@ -217,7 +218,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
*/
n_attrs = E1000_MAX_SENSORS * 4;
igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!igb_hwmon->hwmon_list) {
rc = -ENOMEM;
goto err;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8496adfc6a68..64cbe0dfe043 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,9 +60,9 @@
#include <linux/i2c.h>
#include "igb.h"
-#define MAJ 4
-#define MIN 1
-#define BUILD 2
+#define MAJ 5
+#define MIN 0
+#define BUILD 3
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -77,6 +77,9 @@ static const struct e1000_info *igb_info_tbl[] = {
};
static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
@@ -156,8 +159,8 @@ static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
-static int igb_vlan_rx_add_vid(struct net_device *, u16);
-static int igb_vlan_rx_kill_vid(struct net_device *, u16);
+static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
static void igb_ping_all_vfs(struct igb_adapter *);
@@ -169,13 +172,14 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
int vf, u16 vlan, u8 qos);
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
+ bool setting);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
#endif
#ifdef CONFIG_PM
@@ -292,9 +296,7 @@ static const struct igb_reg_info igb_reg_info_tbl[] = {
{}
};
-/*
- * igb_regdump - register printout routine
- */
+/* igb_regdump - register printout routine */
static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
{
int n = 0;
@@ -360,9 +362,7 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
regs[2], regs[3]);
}
-/*
- * igb_dump - Print registers, tx-rings and rx-rings
- */
+/* igb_dump - Print registers, Tx-rings and Rx-rings */
static void igb_dump(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -569,12 +569,13 @@ exit:
return;
}
-/* igb_get_i2c_data - Reads the I2C SDA data bit
+/**
+ * igb_get_i2c_data - Reads the I2C SDA data bit
* @hw: pointer to hardware structure
* @i2cctl: Current value of I2CCTL register
*
* Returns the I2C data bit value
- */
+ **/
static int igb_get_i2c_data(void *data)
{
struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -584,12 +585,13 @@ static int igb_get_i2c_data(void *data)
return ((i2cctl & E1000_I2C_DATA_IN) != 0);
}
-/* igb_set_i2c_data - Sets the I2C data bit
+/**
+ * igb_set_i2c_data - Sets the I2C data bit
* @data: pointer to hardware structure
* @state: I2C data value (0 or 1) to set
*
* Sets the I2C data bit
- */
+ **/
static void igb_set_i2c_data(void *data, int state)
{
struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -608,12 +610,13 @@ static void igb_set_i2c_data(void *data, int state)
}
-/* igb_set_i2c_clk - Sets the I2C SCL clock
+/**
+ * igb_set_i2c_clk - Sets the I2C SCL clock
* @data: pointer to hardware structure
* @state: state to set clock
*
* Sets the I2C clock line to state
- */
+ **/
static void igb_set_i2c_clk(void *data, int state)
{
struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -631,11 +634,12 @@ static void igb_set_i2c_clk(void *data, int state)
wrfl();
}
-/* igb_get_i2c_clk - Gets the I2C SCL clock state
+/**
+ * igb_get_i2c_clk - Gets the I2C SCL clock state
* @data: pointer to hardware structure
*
* Gets the I2C clock state
- */
+ **/
static int igb_get_i2c_clk(void *data)
{
struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -655,8 +659,10 @@ static const struct i2c_algo_bit_data igb_i2c_algo = {
};
/**
- * igb_get_hw_dev - return device
- * used by hardware layer to print debugging information
+ * igb_get_hw_dev - return device
+ * @hw: pointer to hardware structure
+ *
+ * used by hardware layer to print debugging information
**/
struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
{
@@ -665,10 +671,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
}
/**
- * igb_init_module - Driver Registration Routine
+ * igb_init_module - Driver Registration Routine
*
- * igb_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
+ * igb_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
**/
static int __init igb_init_module(void)
{
@@ -688,10 +694,10 @@ static int __init igb_init_module(void)
module_init(igb_init_module);
/**
- * igb_exit_module - Driver Exit Cleanup Routine
+ * igb_exit_module - Driver Exit Cleanup Routine
*
- * igb_exit_module is called just before the driver is removed
- * from memory.
+ * igb_exit_module is called just before the driver is removed
+ * from memory.
**/
static void __exit igb_exit_module(void)
{
@@ -705,11 +711,11 @@ module_exit(igb_exit_module);
#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
/**
- * igb_cache_ring_register - Descriptor ring to register mapping
- * @adapter: board private structure to initialize
+ * igb_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
*
- * Once we know the feature-set enabled for the device, we'll cache
- * the register offset the descriptor ring is assigned to.
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
**/
static void igb_cache_ring_register(struct igb_adapter *adapter)
{
@@ -726,11 +732,12 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
if (adapter->vfs_allocated_count) {
for (; i < adapter->rss_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset +
- Q_IDX_82576(i);
+ Q_IDX_82576(i);
}
case e1000_82575:
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
default:
@@ -785,9 +792,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
switch (hw->mac.type) {
case e1000_82575:
/* The 82575 assigns vectors using a bitmask, which matches the
- bitmask for the EICR/EIMS/EIMC registers. To assign one
- or more queues to a vector, we write the appropriate bits
- into the MSIXBM register for that vector. */
+ * bitmask for the EICR/EIMS/EIMC registers. To assign one
+ * or more queues to a vector, we write the appropriate bits
+ * into the MSIXBM register for that vector.
+ */
if (rx_queue > IGB_N0_QUEUE)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
@@ -798,8 +806,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
q_vector->eims_value = msixbm;
break;
case e1000_82576:
- /*
- * 82576 uses a table that essentially consists of 2 columns
+ /* 82576 uses a table that essentially consists of 2 columns
* with 8 rows. The ordering is column-major so we use the
* lower 3 bits as the row index, and the 4th bit as the
* column offset.
@@ -816,10 +823,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
break;
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
- /*
- * On 82580 and newer adapters the scheme is similar to 82576
+ /* On 82580 and newer adapters the scheme is similar to 82576
* however instead of ordering column-major we have things
* ordered row-major. So we traverse the table by using
* bit 0 as the column offset, and the remaining bits as the
@@ -848,10 +855,11 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
}
/**
- * igb_configure_msix - Configure MSI-X hardware
+ * igb_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure to initialize
*
- * igb_configure_msix sets up the hardware to properly
- * generate MSI-X interrupts.
+ * igb_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
**/
static void igb_configure_msix(struct igb_adapter *adapter)
{
@@ -875,8 +883,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
wr32(E1000_CTRL_EXT, tmp);
/* enable msix_other interrupt */
- array_wr32(E1000_MSIXBM(0), vector++,
- E1000_EIMS_OTHER);
+ array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
adapter->eims_other = E1000_EIMS_OTHER;
break;
@@ -884,13 +891,15 @@ static void igb_configure_msix(struct igb_adapter *adapter)
case e1000_82576:
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
/* Turn on MSI-X capability first, or our settings
- * won't stick. And it will take days to debug. */
+ * won't stick. And it will take days to debug.
+ */
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
- E1000_GPIE_PBA | E1000_GPIE_EIAME |
- E1000_GPIE_NSICR);
+ E1000_GPIE_PBA | E1000_GPIE_EIAME |
+ E1000_GPIE_NSICR);
/* enable msix_other interrupt */
adapter->eims_other = 1 << vector;
@@ -912,10 +921,11 @@ static void igb_configure_msix(struct igb_adapter *adapter)
}
/**
- * igb_request_msix - Initialize MSI-X interrupts
+ * igb_request_msix - Initialize MSI-X interrupts
+ * @adapter: board private structure to initialize
*
- * igb_request_msix allocates MSI-X vectors and requests interrupts from the
- * kernel.
+ * igb_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
**/
static int igb_request_msix(struct igb_adapter *adapter)
{
@@ -924,7 +934,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
int i, err = 0, vector = 0, free_vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
- igb_msix_other, 0, netdev->name, adapter);
+ igb_msix_other, 0, netdev->name, adapter);
if (err)
goto err_out;
@@ -948,8 +958,8 @@ static int igb_request_msix(struct igb_adapter *adapter)
sprintf(q_vector->name, "%s-unused", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- igb_msix_ring, 0, q_vector->name,
- q_vector);
+ igb_msix_ring, 0, q_vector->name,
+ q_vector);
if (err)
goto err_free;
}
@@ -982,13 +992,13 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
}
/**
- * igb_free_q_vector - Free memory allocated for specific interrupt vector
- * @adapter: board private structure to initialize
- * @v_idx: Index of vector to be freed
+ * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
*
- * This function frees the memory allocated to the q_vector. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
**/
static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
{
@@ -1003,20 +1013,19 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /*
- * ixgbe_get_stats64() might access the rings on this vector,
+ /* ixgbe_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
}
/**
- * igb_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
+ * igb_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
*
- * This function frees the memory allocated to the q_vectors. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
**/
static void igb_free_q_vectors(struct igb_adapter *adapter)
{
@@ -1031,10 +1040,11 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
}
/**
- * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ * @adapter: board private structure to initialize
*
- * This function resets the device so that it has 0 rx queues, tx queues, and
- * MSI-X interrupts allocated.
+ * This function resets the device so that it has 0 Rx queues, Tx queues, and
+ * MSI-X interrupts allocated.
*/
static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{
@@ -1043,10 +1053,12 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
}
/**
- * igb_set_interrupt_capability - set MSI or MSI-X if supported
+ * igb_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: board private structure to initialize
+ * @msix: boolean value of MSIX capability
*
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
**/
static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
{
@@ -1063,10 +1075,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
else
adapter->num_tx_queues = adapter->rss_queues;
- /* start with one vector for every rx queue */
+ /* start with one vector for every Rx queue */
numvecs = adapter->num_rx_queues;
- /* if tx handler is separate add 1 for every tx queue */
+ /* if Tx handler is separate add 1 for every Tx queue */
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
numvecs += adapter->num_tx_queues;
@@ -1128,16 +1140,16 @@ static void igb_add_ring(struct igb_ring *ring,
}
/**
- * igb_alloc_q_vector - Allocate memory for a single interrupt vector
- * @adapter: board private structure to initialize
- * @v_count: q_vectors allocated on adapter, used for ring interleaving
- * @v_idx: index of vector in adapter struct
- * @txr_count: total number of Tx rings to allocate
- * @txr_idx: index of first Tx ring to allocate
- * @rxr_count: total number of Rx rings to allocate
- * @rxr_idx: index of first Rx ring to allocate
+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
*
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
static int igb_alloc_q_vector(struct igb_adapter *adapter,
int v_count, int v_idx,
@@ -1179,6 +1191,17 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
/* initialize pointer to rings */
ring = q_vector->ring;
+ /* intialize ITR */
+ if (rxr_count) {
+ /* rx or rx/tx vector */
+ if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ } else {
+ /* tx only vector */
+ if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+ q_vector->itr_val = adapter->tx_itr_setting;
+ }
+
if (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
@@ -1221,9 +1244,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
/*
- * On i350, i210, and i211, loopback VLAN packets
+ * On i350, i354, i210, and i211, loopback VLAN packets
* have the tag byte-swapped.
- * */
+ */
if (adapter->hw.mac.type >= e1000_i350)
set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
@@ -1240,11 +1263,11 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
/**
- * igb_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
**/
static int igb_alloc_q_vectors(struct igb_adapter *adapter)
{
@@ -1298,9 +1321,11 @@ err_out:
}
/**
- * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ * @adapter: board private structure to initialize
+ * @msix: boolean value of MSIX capability
*
- * This function initializes the interrupts and allocates all of the queues.
+ * This function initializes the interrupts and allocates all of the queues.
**/
static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
{
@@ -1325,10 +1350,11 @@ err_alloc_q_vectors:
}
/**
- * igb_request_irq - initialize interrupts
+ * igb_request_irq - initialize interrupts
+ * @adapter: board private structure to initialize
*
- * Attempts to configure interrupts using the best available
- * capabilities of the hardware and kernel.
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
**/
static int igb_request_irq(struct igb_adapter *adapter)
{
@@ -1394,15 +1420,14 @@ static void igb_free_irq(struct igb_adapter *adapter)
}
/**
- * igb_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
+ * igb_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
**/
static void igb_irq_disable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- /*
- * we need to be careful when disabling interrupts. The VFs are also
+ /* we need to be careful when disabling interrupts. The VFs are also
* mapped into these registers and so clearing the bits can cause
* issues on the VF drivers so we only need to clear what we set
*/
@@ -1427,8 +1452,8 @@ static void igb_irq_disable(struct igb_adapter *adapter)
}
/**
- * igb_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
+ * igb_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
**/
static void igb_irq_enable(struct igb_adapter *adapter)
{
@@ -1477,13 +1502,12 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
}
/**
- * igb_release_hw_control - release control of the h/w to f/w
- * @adapter: address of board private structure
- *
- * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded.
+ * igb_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
*
+ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
**/
static void igb_release_hw_control(struct igb_adapter *adapter)
{
@@ -1497,13 +1521,12 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
}
/**
- * igb_get_hw_control - get control of the h/w from f/w
- * @adapter: address of board private structure
- *
- * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded.
+ * igb_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
*
+ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded.
**/
static void igb_get_hw_control(struct igb_adapter *adapter)
{
@@ -1517,8 +1540,8 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
}
/**
- * igb_configure - configure the hardware for RX and TX
- * @adapter: private board structure
+ * igb_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
**/
static void igb_configure(struct igb_adapter *adapter)
{
@@ -1541,7 +1564,8 @@ static void igb_configure(struct igb_adapter *adapter)
/* call igb_desc_unused which always leaves
* at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean */
+ * next_to_use != next_to_clean
+ */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
@@ -1549,8 +1573,8 @@ static void igb_configure(struct igb_adapter *adapter)
}
/**
- * igb_power_up_link - Power up the phy/serdes link
- * @adapter: address of board private structure
+ * igb_power_up_link - Power up the phy/serdes link
+ * @adapter: address of board private structure
**/
void igb_power_up_link(struct igb_adapter *adapter)
{
@@ -1563,8 +1587,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
}
/**
- * igb_power_down_link - Power down the phy/serdes link
- * @adapter: address of board private structure
+ * igb_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
*/
static void igb_power_down_link(struct igb_adapter *adapter)
{
@@ -1575,8 +1599,8 @@ static void igb_power_down_link(struct igb_adapter *adapter)
}
/**
- * igb_up - Open the interface and prepare it to handle traffic
- * @adapter: board private structure
+ * igb_up - Open the interface and prepare it to handle traffic
+ * @adapter: board private structure
**/
int igb_up(struct igb_adapter *adapter)
{
@@ -1624,7 +1648,8 @@ void igb_down(struct igb_adapter *adapter)
int i;
/* signal that we're down so the interrupt handler does not
- * reschedule our watchdog timer */
+ * reschedule our watchdog timer
+ */
set_bit(__IGB_DOWN, &adapter->state);
/* disable receives in the hardware */
@@ -1694,6 +1719,7 @@ void igb_reset(struct igb_adapter *adapter)
*/
switch (mac->type) {
case e1000_i350:
+ case e1000_i354:
case e1000_82580:
pba = rd32(E1000_RXPBS);
pba = igb_rxpbs_adjust_82580(pba);
@@ -1720,14 +1746,16 @@ void igb_reset(struct igb_adapter *adapter)
* rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
- * expressed in KB. */
+ * expressed in KB.
+ */
pba = rd32(E1000_PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /* the tx fifo also stores 16 bytes of information about the tx
- * but don't include ethernet FCS because hardware appends it */
+ /* the Tx fifo also stores 16 bytes of information about the Tx
+ * but don't include ethernet FCS because hardware appends it
+ */
min_tx_space = (adapter->max_frame_size +
sizeof(union e1000_adv_tx_desc) -
ETH_FCS_LEN) * 2;
@@ -1740,13 +1768,15 @@ void igb_reset(struct igb_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
- * allocation, take space away from current Rx allocation */
+ * allocation, take space away from current Rx allocation
+ */
if (tx_space < min_tx_space &&
((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space);
- /* if short on rx space, rx wins and must trump tx
- * adjustment */
+ /* if short on Rx space, Rx wins and must trump Tx
+ * adjustment
+ */
if (pba < min_rx_space)
pba = min_rx_space;
}
@@ -1758,7 +1788,8 @@ void igb_reset(struct igb_adapter *adapter)
* (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of:
* - 90% of the Rx FIFO size, or
- * - the full Rx FIFO size minus one full frame */
+ * - the full Rx FIFO size minus one full frame
+ */
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
@@ -1789,8 +1820,7 @@ void igb_reset(struct igb_adapter *adapter)
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
- /*
- * Flow control settings reset on hardware reset, so guarantee flow
+ /* Flow control settings reset on hardware reset, so guarantee flow
* control is off when forcing speed.
*/
if (!hw->mac.autoneg)
@@ -1826,14 +1856,13 @@ void igb_reset(struct igb_adapter *adapter)
static netdev_features_t igb_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -1844,7 +1873,7 @@ static int igb_set_features(struct net_device *netdev,
netdev_features_t changed = netdev->features ^ features;
struct igb_adapter *adapter = netdev_priv(netdev);
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
igb_vlan_mode(netdev, features);
if (!(changed & NETIF_F_RXALL))
@@ -1876,6 +1905,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_set_vf_mac = igb_ndo_set_vf_mac,
.ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
.ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
.ndo_get_vf_config = igb_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = igb_netpoll,
@@ -1887,7 +1917,6 @@ static const struct net_device_ops igb_netdev_ops = {
/**
* igb_set_fw_version - Configure version string for ethtool
* @adapter: adapter struct
- *
**/
void igb_set_fw_version(struct igb_adapter *adapter)
{
@@ -1923,10 +1952,10 @@ void igb_set_fw_version(struct igb_adapter *adapter)
return;
}
-/* igb_init_i2c - Init I2C interface
+/**
+ * igb_init_i2c - Init I2C interface
* @adapter: pointer to adapter structure
- *
- */
+ **/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
s32 status = E1000_SUCCESS;
@@ -1951,15 +1980,15 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
}
/**
- * igb_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in igb_pci_tbl
+ * igb_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igb_pci_tbl
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative on failure
*
- * igb_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
+ * igb_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
**/
static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1996,18 +2025,19 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
}
err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM),
- igb_driver_name);
+ IORESOURCE_MEM),
+ igb_driver_name);
if (err)
goto err_pci_reg;
@@ -2085,8 +2115,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"PHY reset is blocked due to SOL/IDER session.\n");
- /*
- * features is initialized to 0 in allocation, it might have bits
+ /* features is initialized to 0 in allocation, it might have bits
* set by igb_sw_init so we should use an or instead of an
* assignment.
*/
@@ -2097,15 +2126,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO6 |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_TX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
netdev->hw_features |= NETIF_F_RXALL;
/* set this bit last since it cannot be part of hw_features */
- netdev->features |= NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= NETIF_F_TSO |
NETIF_F_TSO6 |
@@ -2130,11 +2159,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
/* before reading the NVM, reset the controller to put the device in a
- * known good starting state */
+ * known good starting state
+ */
hw->mac.ops.reset_hw(hw);
- /*
- * make sure the NVM is good , i211 parts have special NVM that
+ /* make sure the NVM is good , i211 parts have special NVM that
* doesn't contain a checksum
*/
if (hw->mac.type != e1000_i211) {
@@ -2161,9 +2190,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
igb_set_fw_version(adapter);
setup_timer(&adapter->watchdog_timer, igb_watchdog,
- (unsigned long) adapter);
+ (unsigned long) adapter);
setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
- (unsigned long) adapter);
+ (unsigned long) adapter);
INIT_WORK(&adapter->reset_task, igb_reset_task);
INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
@@ -2185,8 +2214,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Check the NVM for wake support on non-port A ports */
if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
- NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
- &eeprom_data);
+ NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+ &eeprom_data);
else if (hw->bus.func == 1)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
@@ -2195,7 +2224,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* now that we have the eeprom settings, apply the special cases where
* the eeprom may be wrong or the board simply won't support wake on
- * lan on a particular port */
+ * lan on a particular port
+ */
switch (pdev->device) {
case E1000_DEV_ID_82575GB_QUAD_COPPER:
adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
@@ -2204,7 +2234,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
/* Wake events only supported on port A for dual fiber
- * regardless of eeprom setting */
+ * regardless of eeprom setting
+ */
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
@@ -2274,8 +2305,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
u16 ets_word;
- /*
- * Read the NVM to determine if this i350 device supports an
+ /* Read the NVM to determine if this i350 device supports an
* external thermal sensor.
*/
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
@@ -2294,17 +2324,20 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
igb_ptp_init(adapter);
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
- /* print bus type/speed/width info */
- dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
- netdev->name,
- ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
- (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
- "unknown"),
- ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
- (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
- "unknown"),
- netdev->dev_addr);
+ /* print bus type/speed/width info, not applicable to i354 */
+ if (hw->mac.type != e1000_i354) {
+ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
+ netdev->name,
+ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
+ "unknown"),
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ?
+ "Width x4" :
+ (hw->bus.width == e1000_bus_width_pcie_x2) ?
+ "Width x2" :
+ (hw->bus.width == e1000_bus_width_pcie_x1) ?
+ "Width x1" : "unknown"), netdev->dev_addr);
+ }
ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
if (ret_val)
@@ -2321,6 +2354,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case e1000_i211:
igb_set_eee_i350(hw);
break;
+ case e1000_i354:
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ if ((rd32(E1000_CTRL_EXT) &
+ E1000_CTRL_EXT_LINK_MODE_SGMII))
+ igb_set_eee_i354(hw);
+ }
+ break;
default:
break;
}
@@ -2344,7 +2384,7 @@ err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -2361,7 +2401,7 @@ static int igb_disable_sriov(struct pci_dev *pdev)
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
/* disable iov and allow time for transactions to clear */
- if (igb_vfs_are_assigned(adapter)) {
+ if (pci_vfs_assigned(pdev)) {
dev_warn(&pdev->dev,
"Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
return -EPERM;
@@ -2444,26 +2484,24 @@ out:
}
#endif
-/*
+/**
* igb_remove_i2c - Cleanup I2C interface
* @adapter: pointer to adapter structure
- *
- */
+ **/
static void igb_remove_i2c(struct igb_adapter *adapter)
{
-
/* free the adapter bus structure */
i2c_del_adapter(&adapter->i2c_adap);
}
/**
- * igb_remove - Device Removal Routine
- * @pdev: PCI device information struct
+ * igb_remove - Device Removal Routine
+ * @pdev: PCI device information struct
*
- * igb_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
- * Hot-Plug event, or because the driver is going to be removed from
- * memory.
+ * igb_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
**/
static void igb_remove(struct pci_dev *pdev)
{
@@ -2477,8 +2515,7 @@ static void igb_remove(struct pci_dev *pdev)
#endif
igb_remove_i2c(adapter);
igb_ptp_stop(adapter);
- /*
- * The watchdog timer may be rescheduled, so explicitly
+ /* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
*/
set_bit(__IGB_DOWN, &adapter->state);
@@ -2498,7 +2535,8 @@ static void igb_remove(struct pci_dev *pdev)
#endif
/* Release control of h/w to f/w. If f/w is AMT enabled, this
- * would have already happened in close and is redundant. */
+ * would have already happened in close and is redundant.
+ */
igb_release_hw_control(adapter);
unregister_netdev(netdev);
@@ -2513,7 +2551,7 @@ static void igb_remove(struct pci_dev *pdev)
if (hw->flash_address)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_select_bars(pdev, IORESOURCE_MEM));
kfree(adapter->shadow_vfta);
free_netdev(netdev);
@@ -2524,13 +2562,13 @@ static void igb_remove(struct pci_dev *pdev)
}
/**
- * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
- * @adapter: board private structure to initialize
+ * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+ * @adapter: board private structure to initialize
*
- * This function initializes the vf specific data storage and then attempts to
- * allocate the VFs. The reason for ordering it this way is because it is much
- * mor expensive time wise to disable SR-IOV than it is to allocate and free
- * the memory for the VFs.
+ * This function initializes the vf specific data storage and then attempts to
+ * allocate the VFs. The reason for ordering it this way is because it is much
+ * mor expensive time wise to disable SR-IOV than it is to allocate and free
+ * the memory for the VFs.
**/
static void igb_probe_vfs(struct igb_adapter *adapter)
{
@@ -2576,6 +2614,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
}
/* fall through */
case e1000_82580:
+ case e1000_i354:
default:
max_rss_queues = IGB_MAX_RX_QUEUES;
break;
@@ -2590,8 +2629,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
/* Device supports enough interrupts without queue pairing. */
break;
case e1000_82576:
- /*
- * If VFs are going to be allocated with RSS queues then we
+ /* If VFs are going to be allocated with RSS queues then we
* should pair the queues in order to conserve interrupts due
* to limited supply.
*/
@@ -2601,10 +2639,10 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
/* fall through */
case e1000_82580:
case e1000_i350:
+ case e1000_i354:
case e1000_i210:
default:
- /*
- * If rss_queues > half of max_rss_queues, pair the queues in
+ /* If rss_queues > half of max_rss_queues, pair the queues in
* order to conserve interrupts due to limited supply.
*/
if (adapter->rss_queues > (max_rss_queues / 2))
@@ -2614,12 +2652,12 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
}
/**
- * igb_sw_init - Initialize general software structures (struct igb_adapter)
- * @adapter: board private structure to initialize
+ * igb_sw_init - Initialize general software structures (struct igb_adapter)
+ * @adapter: board private structure to initialize
*
- * igb_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
+ * igb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
**/
static int igb_sw_init(struct igb_adapter *adapter)
{
@@ -2689,16 +2727,16 @@ static int igb_sw_init(struct igb_adapter *adapter)
}
/**
- * igb_open - Called when a network interface is made active
- * @netdev: network interface device structure
+ * igb_open - Called when a network interface is made active
+ * @netdev: network interface device structure
*
- * Returns 0 on success, negative value on failure
+ * Returns 0 on success, negative value on failure
*
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
**/
static int __igb_open(struct net_device *netdev, bool resuming)
{
@@ -2734,7 +2772,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
- * clean_rx handler before we do so. */
+ * clean_rx handler before we do so.
+ */
igb_configure(adapter);
err = igb_request_irq(adapter);
@@ -2803,15 +2842,15 @@ static int igb_open(struct net_device *netdev)
}
/**
- * igb_close - Disables a network interface
- * @netdev: network interface device structure
+ * igb_close - Disables a network interface
+ * @netdev: network interface device structure
*
- * Returns 0, this is not allowed to fail
+ * Returns 0, this is not allowed to fail
*
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the driver's control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
**/
static int __igb_close(struct net_device *netdev, bool suspending)
{
@@ -2840,10 +2879,10 @@ static int igb_close(struct net_device *netdev)
}
/**
- * igb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ * igb_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
*
- * Return 0 on success, negative on failure
+ * Return 0 on success, negative on failure
**/
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
@@ -2878,11 +2917,11 @@ err:
}
/**
- * igb_setup_all_tx_resources - wrapper to allocate Tx resources
- * (Descriptors) for all queues
- * @adapter: board private structure
+ * igb_setup_all_tx_resources - wrapper to allocate Tx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
*
- * Return 0 on success, negative on failure
+ * Return 0 on success, negative on failure
**/
static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
{
@@ -2904,8 +2943,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
}
/**
- * igb_setup_tctl - configure the transmit control registers
- * @adapter: Board private structure
+ * igb_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
**/
void igb_setup_tctl(struct igb_adapter *adapter)
{
@@ -2930,11 +2969,11 @@ void igb_setup_tctl(struct igb_adapter *adapter)
}
/**
- * igb_configure_tx_ring - Configure transmit ring after Reset
- * @adapter: board private structure
- * @ring: tx ring to configure
+ * igb_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
*
- * Configure a transmit ring after a reset.
+ * Configure a transmit ring after a reset.
**/
void igb_configure_tx_ring(struct igb_adapter *adapter,
struct igb_ring *ring)
@@ -2950,9 +2989,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
mdelay(10);
wr32(E1000_TDLEN(reg_idx),
- ring->count * sizeof(union e1000_adv_tx_desc));
+ ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
- tdba & 0x00000000ffffffffULL);
+ tdba & 0x00000000ffffffffULL);
wr32(E1000_TDBAH(reg_idx), tdba >> 32);
ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
@@ -2968,10 +3007,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
}
/**
- * igb_configure_tx - Configure transmit Unit after Reset
- * @adapter: board private structure
+ * igb_configure_tx - Configure transmit Unit after Reset
+ * @adapter: board private structure
*
- * Configure the Tx unit of the MAC after a reset.
+ * Configure the Tx unit of the MAC after a reset.
**/
static void igb_configure_tx(struct igb_adapter *adapter)
{
@@ -2982,10 +3021,10 @@ static void igb_configure_tx(struct igb_adapter *adapter)
}
/**
- * igb_setup_rx_resources - allocate Rx resources (Descriptors)
- * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative on failure
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
@@ -3021,11 +3060,11 @@ err:
}
/**
- * igb_setup_all_rx_resources - wrapper to allocate Rx resources
- * (Descriptors) for all queues
- * @adapter: board private structure
+ * igb_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
*
- * Return 0 on success, negative on failure
+ * Return 0 on success, negative on failure
**/
static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
{
@@ -3047,8 +3086,8 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
}
/**
- * igb_setup_mrqc - configure the multiple receive queue control registers
- * @adapter: Board private structure
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
**/
static void igb_setup_mrqc(struct igb_adapter *adapter)
{
@@ -3081,8 +3120,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
break;
}
- /*
- * Populate the indirection table 4 entries at a time. To do this
+ /* Populate the indirection table 4 entries at a time. To do this
* we are generating the results for n and n+2 and then interleaving
* those with the results with n+1 and n+3.
*/
@@ -3098,8 +3136,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
wr32(E1000_RETA(j), reta);
}
- /*
- * Disable raw packet checksumming so that RSS hash is placed in
+ /* Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback. No need to enable TCP/UDP/IP checksum
* offloads as they are enabled by default
*/
@@ -3129,7 +3166,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
/* If VMDq is enabled then we set the appropriate mode for that, else
* we default to RSS so that an RSS hash is calculated per packet even
- * if we are only using one queue */
+ * if we are only using one queue
+ */
if (adapter->vfs_allocated_count) {
if (hw->mac.type > e1000_82575) {
/* Set the default pool for the PF's first queue */
@@ -3154,8 +3192,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
}
/**
- * igb_setup_rctl - configure the receive control registers
- * @adapter: Board private structure
+ * igb_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
**/
void igb_setup_rctl(struct igb_adapter *adapter)
{
@@ -3170,8 +3208,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
- /*
- * enable stripping of CRC. It's unlikely this will break BMC
+ /* enable stripping of CRC. It's unlikely this will break BMC
* redirection as it did with e1000. Newer features require
* that the HW strips the CRC.
*/
@@ -3198,7 +3235,8 @@ void igb_setup_rctl(struct igb_adapter *adapter)
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
- * in e1000e_set_rx_mode */
+ * in e1000e_set_rx_mode
+ */
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -3221,7 +3259,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
u32 vmolr;
/* if it isn't the PF check to see if VFs are enabled and
- * increase the size to support vlan tags */
+ * increase the size to support vlan tags
+ */
if (vfn < adapter->vfs_allocated_count &&
adapter->vf_data[vfn].vlans_enabled)
size += VLAN_TAG_SIZE;
@@ -3235,10 +3274,10 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
}
/**
- * igb_rlpml_set - set maximum receive packet size
- * @adapter: board private structure
+ * igb_rlpml_set - set maximum receive packet size
+ * @adapter: board private structure
*
- * Configure maximum receivable packet size.
+ * Configure maximum receivable packet size.
**/
static void igb_rlpml_set(struct igb_adapter *adapter)
{
@@ -3248,8 +3287,7 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
if (pf_id) {
igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
- /*
- * If we're in VMDQ or SR-IOV mode, then set global RLPML
+ /* If we're in VMDQ or SR-IOV mode, then set global RLPML
* to our max jumbo frame size, in case we need to enable
* jumbo frames on one of the rings later.
* This will not pass over-length frames into the default
@@ -3267,17 +3305,16 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
- /*
- * This register exists only on 82576 and newer so if we are older then
+ /* This register exists only on 82576 and newer so if we are older then
* we should exit and do nothing
*/
if (hw->mac.type < e1000_82576)
return;
vmolr = rd32(E1000_VMOLR(vfn));
- vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
if (aupe)
- vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
else
vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
@@ -3286,25 +3323,24 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
- /*
- * for VMDq only allow the VFs and pool 0 to accept broadcast and
+ /* for VMDq only allow the VFs and pool 0 to accept broadcast and
* multicast packets
*/
if (vfn <= adapter->vfs_allocated_count)
- vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
+ vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
wr32(E1000_VMOLR(vfn), vmolr);
}
/**
- * igb_configure_rx_ring - Configure a receive ring after Reset
- * @adapter: board private structure
- * @ring: receive ring to be configured
+ * igb_configure_rx_ring - Configure a receive ring after Reset
+ * @adapter: board private structure
+ * @ring: receive ring to be configured
*
- * Configure the Rx unit of the MAC after a reset.
+ * Configure the Rx unit of the MAC after a reset.
**/
void igb_configure_rx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
+ struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u64 rdba = ring->dma;
@@ -3319,7 +3355,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rdba & 0x00000000ffffffffULL);
wr32(E1000_RDBAH(reg_idx), rdba >> 32);
wr32(E1000_RDLEN(reg_idx),
- ring->count * sizeof(union e1000_adv_rx_desc));
+ ring->count * sizeof(union e1000_adv_rx_desc));
/* initialize head and tail */
ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
@@ -3350,25 +3386,11 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
-static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
- struct igb_ring *rx_ring)
-{
-#define IGB_MAX_BUILD_SKB_SIZE \
- (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
- (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
-
- /* set build_skb flag */
- if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
- set_ring_build_skb_enabled(rx_ring);
- else
- clear_ring_build_skb_enabled(rx_ring);
-}
-
/**
- * igb_configure_rx - Configure receive Unit after Reset
- * @adapter: board private structure
+ * igb_configure_rx - Configure receive Unit after Reset
+ * @adapter: board private structure
*
- * Configure the Rx unit of the MAC after a reset.
+ * Configure the Rx unit of the MAC after a reset.
**/
static void igb_configure_rx(struct igb_adapter *adapter)
{
@@ -3379,22 +3401,20 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* set the correct pool for the PF default MAC address in entry 0 */
igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
- adapter->vfs_allocated_count);
+ adapter->vfs_allocated_count);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igb_ring *rx_ring = adapter->rx_ring[i];
- igb_set_rx_buffer_len(adapter, rx_ring);
- igb_configure_rx_ring(adapter, rx_ring);
- }
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
- * igb_free_tx_resources - Free Tx Resources per Queue
- * @tx_ring: Tx descriptor ring for a specific queue
+ * igb_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
*
- * Free all transmit software resources
+ * Free all transmit software resources
**/
void igb_free_tx_resources(struct igb_ring *tx_ring)
{
@@ -3414,10 +3434,10 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
}
/**
- * igb_free_all_tx_resources - Free Tx Resources for All Queues
- * @adapter: board private structure
+ * igb_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
*
- * Free all transmit software resources
+ * Free all transmit software resources
**/
static void igb_free_all_tx_resources(struct igb_adapter *adapter)
{
@@ -3450,8 +3470,8 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
}
/**
- * igb_clean_tx_ring - Free Tx Buffers
- * @tx_ring: ring to be cleaned
+ * igb_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
**/
static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
@@ -3481,8 +3501,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
}
/**
- * igb_clean_all_tx_rings - Free Tx Buffers for all queues
- * @adapter: board private structure
+ * igb_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
**/
static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
{
@@ -3493,10 +3513,10 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
}
/**
- * igb_free_rx_resources - Free Rx Resources
- * @rx_ring: ring to clean the resources from
+ * igb_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
*
- * Free all receive software resources
+ * Free all receive software resources
**/
void igb_free_rx_resources(struct igb_ring *rx_ring)
{
@@ -3516,10 +3536,10 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
}
/**
- * igb_free_all_rx_resources - Free Rx Resources for All Queues
- * @adapter: board private structure
+ * igb_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
*
- * Free all receive software resources
+ * Free all receive software resources
**/
static void igb_free_all_rx_resources(struct igb_adapter *adapter)
{
@@ -3530,8 +3550,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
}
/**
- * igb_clean_rx_ring - Free Rx Buffers per Queue
- * @rx_ring: ring to free buffers from
+ * igb_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
**/
static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
@@ -3573,8 +3593,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
}
/**
- * igb_clean_all_rx_rings - Free Rx Buffers for all queues
- * @adapter: board private structure
+ * igb_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
**/
static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
{
@@ -3585,11 +3605,11 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
}
/**
- * igb_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
+ * igb_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative on failure
**/
static int igb_set_mac(struct net_device *netdev, void *p)
{
@@ -3605,19 +3625,19 @@ static int igb_set_mac(struct net_device *netdev, void *p)
/* set the correct pool for the new PF MAC address in entry 0 */
igb_rar_set_qsel(adapter, hw->mac.addr, 0,
- adapter->vfs_allocated_count);
+ adapter->vfs_allocated_count);
return 0;
}
/**
- * igb_write_mc_addr_list - write multicast addresses to MTA
- * @netdev: network interface device structure
+ * igb_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
*
- * Writes multicast address list to the MTA hash table.
- * Returns: -ENOMEM on failure
- * 0 on no addresses written
- * X on writing X addresses to MTA
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
**/
static int igb_write_mc_addr_list(struct net_device *netdev)
{
@@ -3650,13 +3670,13 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
}
/**
- * igb_write_uc_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
+ * igb_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
*
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
**/
static int igb_write_uc_addr_list(struct net_device *netdev)
{
@@ -3677,8 +3697,8 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
if (!rar_entries)
break;
igb_rar_set_qsel(adapter, ha->addr,
- rar_entries--,
- vfn);
+ rar_entries--,
+ vfn);
count++;
}
}
@@ -3693,13 +3713,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
}
/**
- * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
- * @netdev: network interface device structure
+ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
*
- * The set_rx_mode entry point is called whenever the unicast or multicast
- * address lists or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper unicast, multicast,
- * promiscuous mode, and all-multi behavior.
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
**/
static void igb_set_rx_mode(struct net_device *netdev)
{
@@ -3716,6 +3736,10 @@ static void igb_set_rx_mode(struct net_device *netdev)
rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
if (netdev->flags & IFF_PROMISC) {
+ u32 mrqc = rd32(E1000_MRQC);
+ /* retain VLAN HW filtering if in VT mode */
+ if (mrqc & E1000_MRQC_ENABLE_VMDQ)
+ rctl |= E1000_RCTL_VFE;
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
} else {
@@ -3723,8 +3747,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_MPE;
vmolr |= E1000_VMOLR_MPME;
} else {
- /*
- * Write addresses to the MTA, if the attempt fails
+ /* Write addresses to the MTA, if the attempt fails
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
@@ -3736,8 +3759,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
vmolr |= E1000_VMOLR_ROMPE;
}
}
- /*
- * Write addresses to available RAR registers, if there is not
+ /* Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
@@ -3750,8 +3772,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
}
wr32(E1000_RCTL, rctl);
- /*
- * In order to support SR-IOV and eventually VMDq it is necessary to set
+ /* In order to support SR-IOV and eventually VMDq it is necessary to set
* the VMOLR to enable the appropriate modes. Without this workaround
* we will have issues with VLAN tag stripping not being done for frames
* that are only arriving because we are the default pool
@@ -3760,7 +3781,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
return;
vmolr |= rd32(E1000_VMOLR(vfn)) &
- ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+ ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
wr32(E1000_VMOLR(vfn), vmolr);
igb_restore_vf_multicasts(adapter);
}
@@ -3805,7 +3826,8 @@ static void igb_spoof_check(struct igb_adapter *adapter)
}
/* Need to wait a few seconds after link up to get diagnostic information from
- * the phy */
+ * the phy
+ */
static void igb_update_phy_info(unsigned long data)
{
struct igb_adapter *adapter = (struct igb_adapter *) data;
@@ -3813,8 +3835,8 @@ static void igb_update_phy_info(unsigned long data)
}
/**
- * igb_has_link - check shared code for link and determine up/down
- * @adapter: pointer to driver private info
+ * igb_has_link - check shared code for link and determine up/down
+ * @adapter: pointer to driver private info
**/
bool igb_has_link(struct igb_adapter *adapter)
{
@@ -3859,17 +3881,16 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
ctrl_ext = rd32(E1000_CTRL_EXT);
if ((hw->phy.media_type == e1000_media_type_copper) &&
- !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
ret = !!(thstat & event);
- }
}
return ret;
}
/**
- * igb_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * igb_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
**/
static void igb_watchdog(unsigned long data)
{
@@ -3881,9 +3902,10 @@ static void igb_watchdog(unsigned long data)
static void igb_watchdog_task(struct work_struct *work)
{
struct igb_adapter *adapter = container_of(work,
- struct igb_adapter,
- watchdog_task);
+ struct igb_adapter,
+ watchdog_task);
struct e1000_hw *hw = &adapter->hw;
+ struct e1000_phy_info *phy = &hw->phy;
struct net_device *netdev = adapter->netdev;
u32 link;
int i;
@@ -3896,8 +3918,8 @@ static void igb_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
hw->mac.ops.get_speed_and_duplex(hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
@@ -3912,6 +3934,11 @@ static void igb_watchdog_task(struct work_struct *work)
(ctrl & E1000_CTRL_RFCE) ? "RX" :
(ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
+ /* check if SmartSpeed worked */
+ igb_check_downshift(hw);
+ if (phy->speed_downgraded)
+ netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
+
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
E1000_THSTAT_LINK_THROTTLE)) {
@@ -3980,7 +4007,8 @@ static void igb_watchdog_task(struct work_struct *work)
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
+ * (Do the reset outside of interrupt context).
+ */
if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
@@ -3993,7 +4021,7 @@ static void igb_watchdog_task(struct work_struct *work)
set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
}
- /* Cause software interrupt to ensure rx ring is cleaned */
+ /* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->msix_entries) {
u32 eics = 0;
for (i = 0; i < adapter->num_q_vectors; i++)
@@ -4020,20 +4048,20 @@ enum latency_range {
};
/**
- * igb_update_ring_itr - update the dynamic ITR value based on packet size
+ * igb_update_ring_itr - update the dynamic ITR value based on packet size
+ * @q_vector: pointer to q_vector
*
- * Stores a new ITR value based on strictly on packet size. This
- * algorithm is less sophisticated than that used in igb_update_itr,
- * due to the difficulty of synchronizing statistics across multiple
- * receive rings. The divisors and thresholds used by this function
- * were determined based on theoretical maximum wire speed and testing
- * data, in order to minimize response time while increasing bulk
- * throughput.
- * This functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
- * NOTE: This function is called only when operating in a multiqueue
- * receive environment.
- * @q_vector: pointer to q_vector
+ * Stores a new ITR value based on strictly on packet size. This
+ * algorithm is less sophisticated than that used in igb_update_itr,
+ * due to the difficulty of synchronizing statistics across multiple
+ * receive rings. The divisors and thresholds used by this function
+ * were determined based on theoretical maximum wire speed and testing
+ * data, in order to minimize response time while increasing bulk
+ * throughput.
+ * This functionality is controlled by the InterruptThrottleRate module
+ * parameter (see igb_param.c)
+ * NOTE: This function is called only when operating in a multiqueue
+ * receive environment.
**/
static void igb_update_ring_itr(struct igb_q_vector *q_vector)
{
@@ -4094,20 +4122,21 @@ clear_counts:
}
/**
- * igb_update_itr - update the dynamic ITR value based on statistics
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
- * NOTE: These calculations are only valid when operating in a single-
- * queue environment.
- * @q_vector: pointer to q_vector
- * @ring_container: ring info to update the itr for
+ * igb_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: pointer to q_vector
+ * @ring_container: ring info to update the itr for
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see igb_param.c)
+ * NOTE: These calculations are only valid when operating in a single-
+ * queue environment.
**/
static void igb_update_itr(struct igb_q_vector *q_vector,
struct igb_ring_container *ring_container)
@@ -4205,12 +4234,12 @@ set_itr_now:
if (new_itr != q_vector->itr_val) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
- * increasing */
+ * increasing
+ */
new_itr = new_itr > q_vector->itr_val ?
- max((new_itr * q_vector->itr_val) /
- (new_itr + (q_vector->itr_val >> 2)),
- new_itr) :
- new_itr;
+ max((new_itr * q_vector->itr_val) /
+ (new_itr + (q_vector->itr_val >> 2)),
+ new_itr) : new_itr;
/* Don't write the value here; it resets the adapter's
* internal timer, and causes us to delay far longer than
* we should between interrupts. Instead, we write the ITR
@@ -4337,8 +4366,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but proto=%x!\n",
- first->protocol);
+ "partial checksum but proto=%x!\n",
+ first->protocol);
}
break;
}
@@ -4361,8 +4390,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ "partial checksum but l4 proto=%x!\n",
+ l4_hdr);
}
break;
}
@@ -4514,8 +4543,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
- /*
- * Force memory writes to complete before letting h/w know there
+ /* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
*
@@ -4536,7 +4564,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
- * at a time, it syncronizes IO on IA64/Altix systems */
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
mmiowb();
return;
@@ -4566,11 +4595,13 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
+ * but since that doesn't exist yet, just open code it.
+ */
smp_mb();
/* We need to check again in a case another CPU has just
- * made room available. */
+ * made room available.
+ */
if (igb_desc_unused(tx_ring) < size)
return -EBUSY;
@@ -4594,7 +4625,6 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
- struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
@@ -4629,15 +4659,18 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
- if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- !(adapter->ptp_tx_skb))) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_flags |= IGB_TX_FLAGS_TSTAMP;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
- adapter->ptp_tx_skb = skb_get(skb);
- adapter->ptp_tx_start = jiffies;
- if (adapter->hw.mac.type == e1000_82576)
- schedule_work(&adapter->ptp_tx_work);
+ if (!(adapter->ptp_tx_skb)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= IGB_TX_FLAGS_TSTAMP;
+
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ if (adapter->hw.mac.type == e1000_82576)
+ schedule_work(&adapter->ptp_tx_work);
+ }
}
if (vlan_tx_tag_present(skb)) {
@@ -4694,8 +4727,7 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- /*
- * The minimum packet size with TCTL.PSP set is 17 so pad the skb
+ /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
if (unlikely(skb->len < 17)) {
@@ -4709,8 +4741,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
}
/**
- * igb_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
+ * igb_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
**/
static void igb_tx_timeout(struct net_device *netdev)
{
@@ -4739,13 +4771,12 @@ static void igb_reset_task(struct work_struct *work)
}
/**
- * igb_get_stats64 - Get System Network Statistics
- * @netdev: network interface device structure
- * @stats: rtnl_link_stats64 pointer
- *
+ * igb_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
**/
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -4758,11 +4789,11 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
}
/**
- * igb_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
+ * igb_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative on failure
**/
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
@@ -4805,10 +4836,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * igb_update_stats - Update the board statistics counters
- * @adapter: board private structure
+ * igb_update_stats - Update the board statistics counters
+ * @adapter: board private structure
**/
-
void igb_update_stats(struct igb_adapter *adapter,
struct rtnl_link_stats64 *net_stats)
{
@@ -4823,8 +4853,7 @@ void igb_update_stats(struct igb_adapter *adapter,
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
- /*
- * Prevent stats update while adapter is being reset, or if the pci
+ /* Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
@@ -4958,7 +4987,8 @@ void igb_update_stats(struct igb_adapter *adapter,
/* Rx Errors */
/* RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC */
+ * our own version based on RUC and ROC
+ */
net_stats->rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
@@ -5017,7 +5047,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
adapter->stats.doosync++;
/* The DMA Out of Sync is also indication of a spoof event
* in IOV mode. Check the Wrong VM Behavior register to
- * see if it is really a spoof event. */
+ * see if it is really a spoof event.
+ */
igb_check_wvbr(adapter);
}
@@ -5091,8 +5122,7 @@ static void igb_update_tx_dca(struct igb_adapter *adapter,
if (hw->mac.type != e1000_82575)
txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
- /*
- * We can enable relaxed ordering for reads, but not writes when
+ /* We can enable relaxed ordering for reads, but not writes when
* DCA is enabled. This is due to a known issue in some chipsets
* which will cause the DCA tag to be cleared.
*/
@@ -5113,8 +5143,7 @@ static void igb_update_rx_dca(struct igb_adapter *adapter,
if (hw->mac.type != e1000_82575)
rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
- /*
- * We can enable relaxed ordering for reads, but not writes when
+ /* We can enable relaxed ordering for reads, but not writes when
* DCA is enabled. This is due to a known issue in some chipsets
* which will cause the DCA tag to be cleared.
*/
@@ -5183,7 +5212,8 @@ static int __igb_notify_dca(struct device *dev, void *data)
case DCA_PROVIDER_REMOVE:
if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
/* without this a class_device is left
- * hanging around in the sysfs model */
+ * hanging around in the sysfs model
+ */
dca_remove_requester(dev);
dev_info(&pdev->dev, "DCA disabled\n");
adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
@@ -5196,12 +5226,12 @@ static int __igb_notify_dca(struct device *dev, void *data)
}
static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
- void *p)
+ void *p)
{
int ret_val;
ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
- __igb_notify_dca);
+ __igb_notify_dca);
return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
}
@@ -5215,40 +5245,10 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
eth_zero_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
- return 0;
-}
-
-static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *vfdev;
- int dev_id;
-
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- dev_id = IGB_82576_VF_DEV_ID;
- break;
- case e1000_i350:
- dev_id = IGB_I350_VF_DEV_ID;
- break;
- default:
- return false;
- }
-
- /* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
- while (vfdev) {
- /* if we don't own it we don't care */
- if (vfdev->is_virtfn && vfdev->physfn == pdev) {
- /* if it is assigned we cannot release it */
- if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
-
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
- }
+ /* By default spoof check is enabled for all VFs */
+ adapter->vf_data[vf].spoofchk_enabled = true;
- return false;
+ return 0;
}
#endif
@@ -5273,7 +5273,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
- IGB_VF_FLAG_MULTI_PROMISC);
+ IGB_VF_FLAG_MULTI_PROMISC);
vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
@@ -5281,8 +5281,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
} else {
- /*
- * if we have hashes and we are clearing a multicast promisc
+ /* if we have hashes and we are clearing a multicast promisc
* flag we need to write the hashes to the MTA as this step
* was previously skipped
*/
@@ -5303,7 +5302,6 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
return -EINVAL;
return 0;
-
}
static int igb_set_vf_multicasts(struct igb_adapter *adapter,
@@ -5510,30 +5508,91 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
"Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
if (test_bit(__IGB_DOWN, &adapter->state)) {
dev_warn(&adapter->pdev->dev,
- "The VF VLAN has been set,"
- " but the PF device is not up.\n");
+ "The VF VLAN has been set, but the PF device is not up.\n");
dev_warn(&adapter->pdev->dev,
- "Bring the PF device up before"
- " attempting to use the VF device.\n");
+ "Bring the PF device up before attempting to use the VF device.\n");
}
} else {
igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
- false, vf);
+ false, vf);
igb_set_vmvir(adapter, vlan, vf);
igb_set_vmolr(adapter, vf, true);
adapter->vf_data[vf].pf_vlan = 0;
adapter->vf_data[vf].pf_qos = 0;
- }
+ }
out:
- return err;
+ return err;
+}
+
+static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u32 reg;
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = rd32(E1000_VLVF(i));
+ if ((reg & E1000_VLVF_VLANID_ENABLE) &&
+ vid == (reg & E1000_VLVF_VLANID_MASK))
+ break;
+ }
+
+ if (i >= E1000_VLVF_ARRAY_SIZE)
+ i = -1;
+
+ return i;
}
static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
{
+ struct e1000_hw *hw = &adapter->hw;
int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+ int err = 0;
+
+ /* If in promiscuous mode we need to make sure the PF also has
+ * the VLAN filter set.
+ */
+ if (add && (adapter->netdev->flags & IFF_PROMISC))
+ err = igb_vlvf_set(adapter, vid, add,
+ adapter->vfs_allocated_count);
+ if (err)
+ goto out;
- return igb_vlvf_set(adapter, vid, add, vf);
+ err = igb_vlvf_set(adapter, vid, add, vf);
+
+ if (err)
+ goto out;
+
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
+ u32 vlvf, bits;
+
+ int regndx = igb_find_vlvf_entry(adapter, vid);
+ if (regndx < 0)
+ goto out;
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ vlvf = bits = rd32(E1000_VLVF(regndx));
+ bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count);
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid &&
+ !test_bit(vid, adapter->active_vlans) &&
+ !bits)
+ igb_vlvf_set(adapter, vid, add,
+ adapter->vfs_allocated_count);
+ }
+
+out:
+ return err;
}
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
@@ -5603,8 +5662,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
{
- /*
- * The VF MAC Address is stored in a packed array of bytes
+ /* The VF MAC Address is stored in a packed array of bytes
* starting at the second 32 bit word of the msg array
*/
unsigned char *addr = (char *)&msg[1];
@@ -5653,11 +5711,9 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
return;
- /*
- * until the vf completes a reset it should not be
+ /* until the vf completes a reset it should not be
* allowed to start any configuration.
*/
-
if (msgbuf[0] == E1000_VF_RESET) {
igb_vf_reset_msg(adapter, vf);
return;
@@ -5677,9 +5733,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
else
dev_warn(&pdev->dev,
- "VF %d attempted to override administratively "
- "set MAC address\nReload the VF driver to "
- "resume operations\n", vf);
+ "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
+ vf);
break;
case E1000_VF_SET_PROMISC:
retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -5694,9 +5749,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
retval = -1;
if (vf_data->pf_vlan)
dev_warn(&pdev->dev,
- "VF %d attempted to override administratively "
- "set VLAN tag\nReload the VF driver to "
- "resume operations\n", vf);
+ "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
+ vf);
else
retval = igb_set_vf_vlan(adapter, msgbuf, vf);
break;
@@ -5765,9 +5819,9 @@ static void igb_set_uta(struct igb_adapter *adapter)
}
/**
- * igb_intr_msi - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
+ * igb_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
**/
static irqreturn_t igb_intr_msi(int irq, void *data)
{
@@ -5810,9 +5864,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
}
/**
- * igb_intr - Legacy Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
+ * igb_intr - Legacy Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
**/
static irqreturn_t igb_intr(int irq, void *data)
{
@@ -5820,11 +5874,13 @@ static irqreturn_t igb_intr(int irq, void *data)
struct igb_q_vector *q_vector = adapter->q_vector[0];
struct e1000_hw *hw = &adapter->hw;
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
- * need for the IMC write */
+ * need for the IMC write
+ */
u32 icr = rd32(E1000_ICR);
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
- * not set, then the adapter didn't send an interrupt */
+ * not set, then the adapter didn't send an interrupt
+ */
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
@@ -5883,15 +5939,15 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
}
/**
- * igb_poll - NAPI Rx polling callback
- * @napi: napi polling structure
- * @budget: count of how many packets we should handle
+ * igb_poll - NAPI Rx polling callback
+ * @napi: napi polling structure
+ * @budget: count of how many packets we should handle
**/
static int igb_poll(struct napi_struct *napi, int budget)
{
struct igb_q_vector *q_vector = container_of(napi,
- struct igb_q_vector,
- napi);
+ struct igb_q_vector,
+ napi);
bool clean_complete = true;
#ifdef CONFIG_IGB_DCA
@@ -5916,10 +5972,10 @@ static int igb_poll(struct napi_struct *napi, int budget)
}
/**
- * igb_clean_tx_irq - Reclaim resources after transmit completes
- * @q_vector: pointer to q_vector containing needed info
+ * igb_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: pointer to q_vector containing needed info
*
- * returns true if ring is completely cleaned
+ * returns true if ring is completely cleaned
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
{
@@ -6025,7 +6081,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
struct e1000_hw *hw = &adapter->hw;
/* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
+ * check with the clearing of time_stamp and movement of i
+ */
clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
@@ -6064,8 +6121,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets &&
- netif_carrier_ok(tx_ring->netdev) &&
- igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ netif_carrier_ok(tx_ring->netdev) &&
+ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -6086,11 +6143,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
/**
- * igb_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
+ * igb_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
*
- * Synchronizes page for reuse by the adapter
+ * Synchronizes page for reuse by the adapter
**/
static void igb_reuse_rx_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *old_buff)
@@ -6150,19 +6207,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
}
/**
- * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
- * @skb: sk_buff to place the data into
+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
*
- * This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
**/
static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
@@ -6203,78 +6260,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}
-static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc)
-{
- struct igb_rx_buffer *rx_buffer;
- struct sk_buff *skb;
- struct page *page;
- void *page_addr;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = IGB_RX_BUFSZ;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(NET_SKB_PAD +
- NET_IP_ALIGN +
- size);
-#endif
-
- /* If we spanned a buffer we have a huge mess so test for it */
- BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
-
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
-
- page_addr = page_address(page) + rx_buffer->page_offset;
-
- /* prefetch first cache line of first page */
- prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
-#endif
-
- /* build an skb to around the page buffer */
- skb = build_skb(page_addr, truesize);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_failed++;
- return NULL;
- }
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
-
- /* update pointers within the skb to store the data */
- skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
- __skb_put(skb, size);
-
- /* pull timestamp out of packet data */
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
- __skb_pull(skb, IGB_TS_HDR_LEN);
- }
-
- if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
- /* hand second half of page back to the ring */
- igb_reuse_rx_page(rx_ring, rx_buffer);
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
- }
-
- /* clear contents of buffer_info */
- rx_buffer->dma = 0;
- rx_buffer->page = NULL;
-
- return skb;
-}
-
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -6305,8 +6290,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
return NULL;
}
- /*
- * we will be copying header into skb->data in
+ /* we will be copying header into skb->data in
* pskb_may_pull so it is in our interest to prefetch
* it now to avoid a possible cache miss
*/
@@ -6354,8 +6338,7 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
if (igb_test_staterr(rx_desc,
E1000_RXDEXT_STATERR_TCPE |
E1000_RXDEXT_STATERR_IPE)) {
- /*
- * work around errata with sctp packets where the TCPE aka
+ /* work around errata with sctp packets where the TCPE aka
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
* packets, (aka let the stack check the crc32c)
*/
@@ -6386,15 +6369,15 @@ static inline void igb_rx_hash(struct igb_ring *ring,
}
/**
- * igb_is_non_eop - process handling of non-EOP buffers
- * @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
+ * igb_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
**/
static bool igb_is_non_eop(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc)
@@ -6414,15 +6397,15 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
}
/**
- * igb_get_headlen - determine size of header for LRO/GRO
- * @data: pointer to the start of the headers
- * @max_len: total length of section to find headers in
+ * igb_get_headlen - determine size of header for LRO/GRO
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
*
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, and GRO offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, and GRO offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
**/
static unsigned int igb_get_headlen(unsigned char *data,
unsigned int max_len)
@@ -6473,7 +6456,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
return hdr.network - data;
/* record next protocol if header is present */
- if (!hdr.ipv4->frag_off)
+ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
nexthdr = hdr.ipv4->protocol;
} else if (protocol == __constant_htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
@@ -6509,8 +6492,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
hdr.network += sizeof(struct udphdr);
}
- /*
- * If everything has gone correctly hdr.network should be the
+ /* If everything has gone correctly hdr.network should be the
* data section of the packet and will be the end of the header.
* If not then it probably represents the end of the last recognized
* header.
@@ -6522,17 +6504,17 @@ static unsigned int igb_get_headlen(unsigned char *data,
}
/**
- * igb_pull_tail - igb specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being adjusted
+ * igb_pull_tail - igb specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being adjusted
*
- * This function is an igb specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
+ * This function is an igb specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
*/
static void igb_pull_tail(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
@@ -6542,8 +6524,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
unsigned char *va;
unsigned int pull_len;
- /*
- * it is valid to use page_address instead of kmap since we are
+ /* it is valid to use page_address instead of kmap since we are
* working with pages allocated out of the lomem pool per
* alloc_page(GFP_ATOMIC)
*/
@@ -6563,8 +6544,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
va += IGB_TS_HDR_LEN;
}
- /*
- * we need the header to contain the greater of either ETH_HLEN or
+ /* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
@@ -6580,24 +6560,23 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
}
/**
- * igb_cleanup_headers - Correct corrupted or empty headers
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being fixed
+ * igb_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
*
- * Address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
*
- * In addition if skb is not at least 60 bytes we need to pad it so that
- * it is large enough to qualify as a valid Ethernet frame.
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
*
- * Returns true if an error was encountered and skb was freed.
+ * Returns true if an error was encountered and skb was freed.
**/
static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
-
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -6624,14 +6603,14 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
}
/**
- * igb_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being populated
+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
*
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, timestamp, protocol, and
- * other fields within the skb.
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
**/
static void igb_process_skb_fields(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
@@ -6645,7 +6624,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
- if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid;
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
@@ -6654,7 +6633,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
else
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -6690,10 +6669,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rmb();
/* retrieve a buffer from the ring */
- if (ring_uses_build_skb(rx_ring))
- skb = igb_build_rx_buffer(rx_ring, rx_desc);
- else
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -6762,8 +6738,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
/* map page for use */
dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- /*
- * if mapping failed free memory back to system since
+ /* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
@@ -6780,17 +6755,9 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}
-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
-{
- if (ring_uses_build_skb(rx_ring))
- return NET_SKB_PAD + NET_IP_ALIGN;
- else
- return 0;
-}
-
/**
- * igb_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * igb_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
**/
void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
{
@@ -6810,13 +6777,10 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
- /*
- * Refresh the desc even if buffer_addrs didn't change
+ /* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
- bi->page_offset +
- igb_rx_offset(rx_ring));
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
@@ -6842,8 +6806,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i;
- /*
- * Force memory writes to complete before letting h/w
+ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
@@ -6928,7 +6891,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl;
- bool enable = !!(features & NETIF_F_HW_VLAN_RX);
+ bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
if (enable) {
/* enable VLAN tag insert/strip */
@@ -6950,7 +6913,8 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
igb_rlpml_set(adapter);
}
-static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6967,7 +6931,8 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0;
}
-static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6993,7 +6958,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
igb_vlan_mode(adapter->netdev, adapter->netdev->features);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- igb_vlan_rx_add_vid(adapter->netdev, vid);
+ igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
@@ -7004,15 +6969,24 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
mac->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work */
+ * for the switch() below to work
+ */
if ((spd & 1) || (dplx & ~1))
goto err_inval;
- /* Fiber NIC's only allow 1000 Gbps Full duplex */
- if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
- spd != SPEED_1000 &&
- dplx != DUPLEX_FULL)
- goto err_inval;
+ /* Fiber NIC's only allow 1000 gbps Full duplex
+ * and 100Mbps Full duplex for 100baseFx sfp
+ */
+ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+ switch (spd + dplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ case SPEED_10 + DUPLEX_FULL:
+ case SPEED_100 + DUPLEX_HALF:
+ goto err_inval;
+ default:
+ break;
+ }
+ }
switch (spd + dplx) {
case SPEED_10 + DUPLEX_HALF:
@@ -7111,7 +7085,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_power_up_link(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
- * would have already happened in close and is redundant. */
+ * would have already happened in close and is redundant.
+ */
igb_release_hw_control(adapter);
pci_disable_device(pdev);
@@ -7173,7 +7148,8 @@ static int igb_resume(struct device *dev)
igb_reset(adapter);
/* let the f/w know that the h/w is now under the control of the
- * driver. */
+ * driver.
+ */
igb_get_hw_control(adapter);
wr32(E1000_WUS, ~0);
@@ -7309,8 +7285,7 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
@@ -7333,13 +7308,13 @@ static void igb_netpoll(struct net_device *netdev)
#endif /* CONFIG_NET_POLL_CONTROLLER */
/**
- * igb_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
+ * igb_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
*
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ **/
static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -7360,12 +7335,12 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
}
/**
- * igb_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
+ * igb_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
*
- * Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igb_resume routine.
- */
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igb_resume routine.
+ **/
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7393,8 +7368,9 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
err = pci_cleanup_aer_uncorrect_error_status(pdev);
if (err) {
- dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
- "failed 0x%0x\n", err);
+ dev_err(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+ err);
/* non-fatal, continue */
}
@@ -7402,12 +7378,12 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
}
/**
- * igb_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
+ * igb_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
*
- * This callback is called when the error recovery driver tells us that
- * its OK to resume normal operation. Implementation resembles the
- * second-half of the igb_resume routine.
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igb_resume routine.
*/
static void igb_io_resume(struct pci_dev *pdev)
{
@@ -7424,12 +7400,13 @@ static void igb_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
/* let the f/w know that the h/w is now under the control of the
- * driver. */
+ * driver.
+ */
igb_get_hw_control(adapter);
}
static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
- u8 qsel)
+ u8 qsel)
{
u32 rar_low, rar_high;
struct e1000_hw *hw = &adapter->hw;
@@ -7438,7 +7415,7 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
* from network order (big endian) to little endian
*/
rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
/* Indicate to hardware the Address is Valid. */
@@ -7456,11 +7433,12 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
}
static int igb_set_vf_mac(struct igb_adapter *adapter,
- int vf, unsigned char *mac_addr)
+ int vf, unsigned char *mac_addr)
{
struct e1000_hw *hw = &adapter->hw;
/* VF MAC addresses start at end of receive addresses and moves
- * torwards the first, as a result a collision should not be possible */
+ * towards the first, as a result a collision should not be possible
+ */
int rar_entry = hw->mac.rar_entry_count - (vf + 1);
memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
@@ -7477,13 +7455,13 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return -EINVAL;
adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
- dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
- " change effective.");
+ dev_info(&adapter->pdev->dev,
+ "Reload the VF driver to make this change effective.");
if (test_bit(__IGB_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
- " but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
- " attempting to use the VF device.\n");
+ dev_warn(&adapter->pdev->dev,
+ "The VF MAC address has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before attempting to use the VF device.\n");
}
return igb_set_vf_mac(adapter, vf, mac);
}
@@ -7510,19 +7488,19 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
/* Calculate the rate factor values to set */
rf_int = link_speed / tx_rate;
rf_dec = (link_speed - (rf_int * tx_rate));
- rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+ rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
+ tx_rate;
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
- bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
- E1000_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
+ E1000_RTTBCNRC_RF_INT_MASK);
bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
} else {
bcnrc_val = 0;
}
wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
- /*
- * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
*/
wr32(E1000_RTTBCNRM, 0x14);
@@ -7544,8 +7522,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
reset_rate = true;
adapter->vf_rate_link_speed = 0;
dev_info(&adapter->pdev->dev,
- "Link speed has been changed. VF Transmit "
- "rate is disabled\n");
+ "Link speed has been changed. VF Transmit rate is disabled\n");
}
for (i = 0; i < adapter->vfs_allocated_count; i++) {
@@ -7553,8 +7530,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
adapter->vf_data[i].tx_rate = 0;
igb_set_vf_rate_limit(&adapter->hw, i,
- adapter->vf_data[i].tx_rate,
- actual_link_speed);
+ adapter->vf_data[i].tx_rate,
+ actual_link_speed);
}
}
@@ -7580,6 +7557,33 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
return 0;
}
+static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
+ bool setting)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg_val, reg_offset;
+
+ if (!adapter->vfs_allocated_count)
+ return -EOPNOTSUPP;
+
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+
+ reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
+ reg_val = rd32(reg_offset);
+ if (setting)
+ reg_val |= ((1 << vf) |
+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ else
+ reg_val &= ~((1 << vf) |
+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ wr32(reg_offset, reg_val);
+
+ adapter->vf_data[vf].spoofchk_enabled = setting;
+ return E1000_SUCCESS;
+}
+
static int igb_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
@@ -7591,6 +7595,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
ivi->tx_rate = adapter->vf_data[vf].tx_rate;
ivi->vlan = adapter->vf_data[vf].pf_vlan;
ivi->qos = adapter->vf_data[vf].pf_qos;
+ ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
return 0;
}
@@ -7603,6 +7608,7 @@ static void igb_vmm_control(struct igb_adapter *adapter)
case e1000_82575:
case e1000_i210:
case e1000_i211:
+ case e1000_i354:
default:
/* replication is not supported for 82575 */
return;
@@ -7625,7 +7631,7 @@ static void igb_vmm_control(struct igb_adapter *adapter)
igb_vmdq_set_loopback_pf(hw, true);
igb_vmdq_set_replication_pf(hw, true);
igb_vmdq_set_anti_spoofing_pf(hw, true,
- adapter->vfs_allocated_count);
+ adapter->vfs_allocated_count);
} else {
igb_vmdq_set_loopback_pf(hw, false);
igb_vmdq_set_replication_pf(hw, false);
@@ -7645,8 +7651,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
/* force threshold to 0. */
wr32(E1000_DMCTXTH, 0);
- /*
- * DMA Coalescing high water mark needs to be greater
+ /* DMA Coalescing high water mark needs to be greater
* than the Rx threshold. Set hwm to PBA - max frame
* size in 16B units, capping it at PBA - 6KB.
*/
@@ -7659,8 +7664,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
& E1000_FCRTC_RTH_COAL_MASK);
wr32(E1000_FCRTC, reg);
- /*
- * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+ /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
* frame size, capping it at PBA - 10KB.
*/
dmac_thr = pba - adapter->max_frame_size / 512;
@@ -7678,11 +7682,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
reg |= (1000 >> 5);
/* Disable BMC-to-OS Watchdog Enable */
- reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
+ if (hw->mac.type != e1000_i354)
+ reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
+
wr32(E1000_DMACR, reg);
- /*
- * no lower threshold to disable
+ /* no lower threshold to disable
* coalescing(smart fifb)-UTRESH=0
*/
wr32(E1000_DMCRTRH, 0);
@@ -7691,15 +7696,13 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
wr32(E1000_DMCTLX, reg);
- /*
- * free space in tx packet buffer to wake from
+ /* free space in tx packet buffer to wake from
* DMA coal
*/
wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
(IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
- /*
- * make low power state decision controlled
+ /* make low power state decision controlled
* by DMA coal
*/
reg = rd32(E1000_PCIEMISC);
@@ -7713,7 +7716,8 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
}
}
-/* igb_read_i2c_byte - Reads 8 bit word over I2C
+/**
+ * igb_read_i2c_byte - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
* @dev_addr: device address
@@ -7721,9 +7725,9 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
*
* Performs byte read operation over I2C interface at
* a specified device address.
- */
+ **/
s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+ u8 dev_addr, u8 *data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
struct i2c_client *this_client = adapter->i2c_client;
@@ -7750,7 +7754,8 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
}
}
-/* igb_write_i2c_byte - Writes 8 bit word over I2C
+/**
+ * igb_write_i2c_byte - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
* @dev_addr: device address
@@ -7758,9 +7763,9 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
*
* Performs byte write operation over I2C interface at
* a specified device address.
- */
+ **/
s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+ u8 dev_addr, u8 data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
struct i2c_client *this_client = adapter->i2c_client;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0a237507ee85..7e8c477b0ab9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1,5 +1,4 @@
-/*
- * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
+/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
*
* Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
*
@@ -27,8 +26,7 @@
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
-/*
- * The 82580 timesync updates the system timer every 8ns by 8ns,
+/* The 82580 timesync updates the system timer every 8ns by 8ns,
* and this update value cannot be reprogrammed.
*
* Neither the 82576 nor the 82580 offer registers wide enough to hold
@@ -77,10 +75,7 @@
#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
-/*
- * SYSTIM read access for the 82576
- */
-
+/* SYSTIM read access for the 82576 */
static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
@@ -97,10 +92,7 @@ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
return val;
}
-/*
- * SYSTIM read access for the 82580
- */
-
+/* SYSTIM read access for the 82580 */
static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
@@ -108,8 +100,7 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
u64 val;
u32 lo, hi, jk;
- /*
- * The timestamp latches on lowest register read. For the 82580
+ /* The timestamp latches on lowest register read. For the 82580
* the lowest register is SYSTIMR instead of SYSTIML. However we only
* need to provide nanosecond resolution, so we just ignore it.
*/
@@ -123,17 +114,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
return val;
}
-/*
- * SYSTIM read access for I210/I211
- */
-
+/* SYSTIM read access for I210/I211 */
static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
{
struct e1000_hw *hw = &adapter->hw;
u32 sec, nsec, jk;
- /*
- * The timestamp latches on lowest register read. For I210/I211, the
+ /* The timestamp latches on lowest register read. For I210/I211, the
* lowest register is SYSTIMR. Since we only need to provide nanosecond
* resolution, we can ignore it.
*/
@@ -150,8 +137,7 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter,
{
struct e1000_hw *hw = &adapter->hw;
- /*
- * Writing the SYSTIMR register is not necessary as it only provides
+ /* Writing the SYSTIMR register is not necessary as it only provides
* sub-nanosecond resolution.
*/
wr32(E1000_SYSTIML, ts->tv_nsec);
@@ -185,6 +171,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
switch (adapter->hw.mac.type) {
case e1000_82576:
case e1000_82580:
+ case e1000_i354:
case e1000_i350:
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -207,10 +194,7 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
}
}
-/*
- * PTP clock operations
- */
-
+/* PTP clock operations */
static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
@@ -387,7 +371,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
*
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
- */
+ **/
void igb_ptp_tx_work(struct work_struct *work)
{
struct igb_adapter *adapter = container_of(work, struct igb_adapter,
@@ -437,7 +421,7 @@ static void igb_ptp_overflow_check(struct work_struct *work)
* dropped an Rx packet that was timestamped when the ring is full. The
* particular error is rare but leaves the device in a state unable to timestamp
* any future packets.
- */
+ **/
void igb_ptp_rx_hang(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -481,7 +465,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
- */
+ **/
void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -506,15 +490,14 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8.
- */
+ **/
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
- /*
- * The timestamp is recorded in little endian format.
+ /* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
@@ -529,7 +512,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
*
* This function is meant to retrieve a timestamp from the internal registers
* of the adapter and store it in the skb.
- */
+ **/
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
@@ -537,8 +520,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct e1000_hw *hw = &adapter->hw;
u64 regval;
- /*
- * If this bit is set, then the RX registers contain the time stamp. No
+ /* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
* read the registers to make them available again. Because only one
* packet can be time stamped at a time, we know that the register
@@ -574,7 +556,6 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
- *
**/
int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
@@ -655,10 +636,9 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
return 0;
}
- /*
- * Per-packet timestamping only works if all packets are
+ /* Per-packet timestamping only works if all packets are
* timestamped, so enable timestamping in all packets as
- * long as one rx filter was configured.
+ * long as one Rx filter was configured.
*/
if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
@@ -756,6 +736,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
break;
case e1000_82580:
+ case e1000_i354:
case e1000_i350:
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
@@ -844,6 +825,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
switch (adapter->hw.mac.type) {
case e1000_82576:
case e1000_82580:
+ case e1000_i354:
case e1000_i350:
cancel_delayed_work_sync(&adapter->ptp_overflow_work);
break;
@@ -888,6 +870,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
break;
case e1000_82580:
+ case e1000_i354:
case e1000_i350:
case e1000_i210:
case e1000_i211:
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d60cd4393415..93eb7ee06d3e 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -116,7 +116,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
else
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
if (test_bit(vid, adapter->active_vlans))
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
napi_gro_receive(&adapter->rx_ring->napi, skb);
@@ -447,7 +447,6 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
-
if (!tx_ring->desc)
goto err;
@@ -488,7 +487,6 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
-
if (!rx_ring->desc)
goto err;
@@ -1232,7 +1230,8 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
e1000_rlpml_set_vf(hw, max_frame_size);
}
-static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1245,7 +1244,8 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0;
}
-static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1264,7 +1264,7 @@ static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
u16 vid;
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- igbvf_vlan_rx_add_vid(adapter->netdev, vid);
+ igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
/**
@@ -2724,9 +2724,9 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM;
netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index ea4808373435..fce3e92f9d11 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -101,8 +101,10 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid);
+static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -332,8 +334,8 @@ ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
* Tx VLAN insertion does not work per HW design when Rx stripping is
* disabled.
*/
- if (!(features & NETIF_F_HW_VLAN_RX))
- features &= ~NETIF_F_HW_VLAN_TX;
+ if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -344,7 +346,7 @@ ixgb_set_features(struct net_device *netdev, netdev_features_t features)
struct ixgb_adapter *adapter = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features;
- if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
+ if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
return 0;
adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
@@ -479,10 +481,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_RXCSUM;
if (pci_using_dac) {
@@ -717,14 +719,11 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = ALIGN(txdr->size, 4096);
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!txdr->desc) {
vfree(txdr->buffer_info);
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
- memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
@@ -807,8 +806,6 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate receive descriptors\n");
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1145,7 +1142,7 @@ ixgb_set_multi(struct net_device *netdev)
}
alloc_failed:
- if (netdev->features & NETIF_F_HW_VLAN_RX)
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
ixgb_vlan_strip_enable(adapter);
else
ixgb_vlan_strip_disable(adapter);
@@ -2085,8 +2082,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
skb->protocol = eth_type_trans(skb, netdev);
if (status & IXGB_RX_DESC_STATUS_VP)
- __vlan_hwaccel_put_tag(skb,
- le16_to_cpu(rx_desc->special));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(rx_desc->special));
netif_receive_skb(skb);
@@ -2159,6 +2156,10 @@ map_skb:
skb->data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2168,7 +2169,8 @@ map_skb:
rx_desc->status = 0;
- if (++i == rx_ring->count) i = 0;
+ if (++i == rx_ring->count)
+ i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
@@ -2209,7 +2211,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
}
static int
-ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
u32 vfta, index;
@@ -2226,7 +2228,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
static int
-ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
u32 vfta, index;
@@ -2248,7 +2250,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
u16 vid;
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- ixgb_vlan_rx_add_vid(adapter->netdev, vid);
+ ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a8e10cff7a89..ca932387a80f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -740,6 +740,11 @@ extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
extern void ixgbe_dbg_init(void);
extern void ixgbe_dbg_exit(void);
+#else
+static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
+static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
+static inline void ixgbe_dbg_init(void) {}
+static inline void ixgbe_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS */
static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index d0113fc97b6f..4a5bfb6b3af0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1305,6 +1305,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.release_swfw_sync = &ixgbe_release_swfw_sync,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
+ .mng_fw_enabled = NULL,
};
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 203a00c24330..0b82d38bc97d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -59,12 +59,34 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+{
+ u32 fwsm, manc, factps;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
+ return false;
+
+ manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+ if (!(manc & IXGBE_MANC_RCV_TCO_EN))
+ return false;
+
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ if (factps & IXGBE_FACTPS_MNGCG)
+ return false;
+
+ return true;
+}
+
static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
- /* enable the laser control functions for SFP+ fiber */
- if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ /* enable the laser control functions for SFP+ fiber
+ * and MNG not enabled
+ */
+ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ !hw->mng_fw_enabled) {
mac->ops.disable_tx_laser =
&ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
@@ -145,9 +167,9 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
}
/* Restart DSP and set SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
- IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
-
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
+ IXGBE_AUTOC_LMS_10G_SERIAL));
+ hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
ret_val = ixgbe_reset_pipeline_82599(hw);
if (got_lock) {
@@ -244,6 +266,8 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
/* Determine 1G link capabilities off of SFP+ type */
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
@@ -563,7 +587,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
return status;
/* Flap the tx laser if it has not already been done */
- hw->mac.ops.flap_tx_laser(hw);
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
/*
* Wait for the controller to acquire link. Per IEEE 802.3ap,
@@ -615,7 +640,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
return status;
/* Flap the tx laser if it has not already been done */
- hw->mac.ops.flap_tx_laser(hw);
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
/* Wait for the link partner to also set speed */
msleep(100);
@@ -777,12 +803,9 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
s32 status = 0;
- u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc, pma_pmd_1g, link_mode, start_autoc;
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 start_autoc = autoc;
u32 orig_autoc = 0;
- u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
- u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
u32 links_reg;
u32 i;
@@ -805,9 +828,14 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
- orig_autoc = hw->mac.orig_autoc;
+ autoc = hw->mac.orig_autoc;
else
- orig_autoc = autoc;
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ orig_autoc = autoc;
+ start_autoc = hw->mac.cached_autoc;
+ link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+ pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
@@ -861,6 +889,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* Restart link */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ hw->mac.cached_autoc = autoc;
ixgbe_reset_pipeline_82599(hw);
if (got_lock)
@@ -932,7 +961,8 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
s32 status;
- u32 ctrl, i, autoc, autoc2;
+ u32 ctrl, i, autoc2;
+ u32 curr_lms;
bool link_up = false;
/* Call adapter stop to disable tx/rx and clear interrupts */
@@ -964,6 +994,13 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
hw->phy.ops.reset(hw);
+ /* remember AUTOC from before we reset */
+ if (hw->mac.cached_autoc)
+ curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
+ else
+ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
+ IXGBE_AUTOC_LMS_MASK;
+
mac_reset_top:
/*
* Issue global reset to the MAC. Needs to be SW reset if link is up.
@@ -1012,14 +1049,35 @@ mac_reset_top:
* stored off yet. Otherwise restore the stored original
* values since the reset operation sets back to defaults.
*/
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+
+ /* Enable link if disabled in NVM */
+ if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
if (hw->mac.orig_link_settings_stored == false) {
- hw->mac.orig_autoc = autoc;
+ hw->mac.orig_autoc = hw->mac.cached_autoc;
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
- if (autoc != hw->mac.orig_autoc) {
+
+ /* If MNG FW is running on a multi-speed device that
+ * doesn't autoneg with out driver support we need to
+ * leave LMS in the state it was before we MAC reset.
+ * Likewise if we support WoL we don't want change the
+ * LMS state either.
+ */
+ if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
+ hw->wol_enabled)
+ hw->mac.orig_autoc =
+ (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
+ curr_lms;
+
+ if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
/* Need SW/FW semaphore around AUTOC writes if LESM is
* on, likewise reset_pipeline requires us to hold
* this lock as it also writes to AUTOC.
@@ -1035,6 +1093,7 @@ mac_reset_top:
}
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ hw->mac.cached_autoc = hw->mac.orig_autoc;
ixgbe_reset_pipeline_82599(hw);
if (got_lock)
@@ -2135,10 +2194,19 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
**/
s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
- s32 i, autoc_reg, ret_val;
- s32 anlp1_reg = 0;
+ s32 ret_val;
+ u32 anlp1_reg = 0;
+ u32 i, autoc_reg, autoc2_reg;
+
+ /* Enable link if disabled in NVM */
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+ autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ }
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg = hw->mac.cached_autoc;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
@@ -2216,7 +2284,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.release_swfw_sync = &ixgbe_release_swfw_sync,
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
-
+ .mng_fw_enabled = &ixgbe_mng_enabled,
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 99e472ebaa75..9bcdeb89af5a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -592,6 +592,36 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
return 0;
}
+enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status)
+{
+ switch (link_status & IXGBE_PCI_LINK_WIDTH) {
+ case IXGBE_PCI_LINK_WIDTH_1:
+ return ixgbe_bus_width_pcie_x1;
+ case IXGBE_PCI_LINK_WIDTH_2:
+ return ixgbe_bus_width_pcie_x2;
+ case IXGBE_PCI_LINK_WIDTH_4:
+ return ixgbe_bus_width_pcie_x4;
+ case IXGBE_PCI_LINK_WIDTH_8:
+ return ixgbe_bus_width_pcie_x8;
+ default:
+ return ixgbe_bus_width_unknown;
+ }
+}
+
+enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
+{
+ switch (link_status & IXGBE_PCI_LINK_SPEED) {
+ case IXGBE_PCI_LINK_SPEED_2500:
+ return ixgbe_bus_speed_2500;
+ case IXGBE_PCI_LINK_SPEED_5000:
+ return ixgbe_bus_speed_5000;
+ case IXGBE_PCI_LINK_SPEED_8000:
+ return ixgbe_bus_speed_8000;
+ default:
+ return ixgbe_bus_speed_unknown;
+ }
+}
+
/**
* ixgbe_get_bus_info_generic - Generic set PCI bus info
* @hw: pointer to hardware structure
@@ -610,35 +640,8 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
&link_status);
- switch (link_status & IXGBE_PCI_LINK_WIDTH) {
- case IXGBE_PCI_LINK_WIDTH_1:
- hw->bus.width = ixgbe_bus_width_pcie_x1;
- break;
- case IXGBE_PCI_LINK_WIDTH_2:
- hw->bus.width = ixgbe_bus_width_pcie_x2;
- break;
- case IXGBE_PCI_LINK_WIDTH_4:
- hw->bus.width = ixgbe_bus_width_pcie_x4;
- break;
- case IXGBE_PCI_LINK_WIDTH_8:
- hw->bus.width = ixgbe_bus_width_pcie_x8;
- break;
- default:
- hw->bus.width = ixgbe_bus_width_unknown;
- break;
- }
-
- switch (link_status & IXGBE_PCI_LINK_SPEED) {
- case IXGBE_PCI_LINK_SPEED_2500:
- hw->bus.speed = ixgbe_bus_speed_2500;
- break;
- case IXGBE_PCI_LINK_SPEED_5000:
- hw->bus.speed = ixgbe_bus_speed_5000;
- break;
- default:
- hw->bus.speed = ixgbe_bus_speed_unknown;
- break;
- }
+ hw->bus.width = ixgbe_convert_bus_width(link_status);
+ hw->bus.speed = ixgbe_convert_bus_speed(link_status);
mac->ops.set_lan_id(hw);
@@ -1125,7 +1128,7 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
}
for (i = 0; i < words; i++) {
- eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+ eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
IXGBE_EEPROM_RW_REG_START;
IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index bc3948ead6e0..22eee38868f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -40,6 +40,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
+enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index c3f1afd86906..d3754722adb4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -231,6 +231,10 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_sfp_type_lr:
case ixgbe_sfp_type_srlr_core0:
case ixgbe_sfp_type_srlr_core1:
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ case ixgbe_sfp_type_1g_lx_core0:
+ case ixgbe_sfp_type_1g_lx_core1:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
@@ -246,12 +250,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
break;
- case ixgbe_sfp_type_1g_sx_core0:
- case ixgbe_sfp_type_1g_sx_core1:
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
- break;
case ixgbe_sfp_type_unknown:
default:
ecmd->supported |= SUPPORTED_FIBRE;
@@ -442,7 +440,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
/* General Registers */
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
@@ -1611,16 +1610,9 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 reg_data;
- /* X540 needs to set the MACC.FLU bit to force link up */
- if (adapter->hw.mac.type == ixgbe_mac_X540) {
- reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
- reg_data |= IXGBE_MACC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
- }
- /* right now we only support MAC loopback in the driver */
- reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* Setup MAC loopback */
+ reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
reg_data |= IXGBE_HLREG0_LPBK;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
@@ -1628,10 +1620,19 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
- reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- reg_data &= ~IXGBE_AUTOC_LMS_MASK;
- reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
+ /* X540 needs to set the MACC.FLU bit to force link up */
+ if (adapter->hw.mac.type == ixgbe_mac_X540) {
+ reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
+ reg_data |= IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
+ } else {
+ if (hw->mac.orig_autoc) {
+ reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
+ } else {
+ return 10;
+ }
+ }
IXGBE_WRITE_FLUSH(hw);
usleep_range(10000, 20000);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index db5611ae407e..d30fbdd81fca 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.11.33-k"
+#define DRV_VERSION "3.13.10-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2013 Intel Corporation.";
@@ -149,6 +149,52 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
+ u32 reg, u16 *value)
+{
+ int pos = 0;
+ struct pci_dev *parent_dev;
+ struct pci_bus *parent_bus;
+
+ parent_bus = adapter->pdev->bus->parent;
+ if (!parent_bus)
+ return -1;
+
+ parent_dev = parent_bus->self;
+ if (!parent_dev)
+ return -1;
+
+ pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return -1;
+
+ pci_read_config_word(parent_dev, pos + reg, value);
+ return 0;
+}
+
+static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 link_status = 0;
+ int err;
+
+ hw->bus.type = ixgbe_bus_type_pci_express;
+
+ /* Get the negotiated link width and speed from PCI config space of the
+ * parent, as this device is behind a switch
+ */
+ err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
+
+ /* assume caller will handle error case */
+ if (err)
+ return err;
+
+ hw->bus.width = ixgbe_convert_bus_width(link_status);
+ hw->bus.speed = ixgbe_convert_bus_speed(link_status);
+
+ return 0;
+}
+
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -1337,7 +1383,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
return hdr.network - data;
/* record next protocol if header is present */
- if (!hdr.ipv4->frag_off)
+ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
nexthdr = hdr.ipv4->protocol;
} else if (protocol == __constant_htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
@@ -1442,10 +1488,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
- if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -2049,6 +2095,9 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
*/
/* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2;
+ if (timepassed_us == 0)
+ return;
+
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
@@ -2405,6 +2454,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
* with the write to EICR.
*/
eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+
+ /* The lower 16bits of the EICR register are for the queue interrupts
+ * which should be masked here in order to not accidently clear them if
+ * the bits are high when ixgbe_msix_other is called. There is a race
+ * condition otherwise which results in possible performance loss
+ * especially if the ixgbe_msix_other interrupt is triggering
+ * consistently (as it would when PPS is turned on for the X540 device)
+ */
+ eicr &= 0xFFFF0000;
+
IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
if (eicr & IXGBE_EICR_LSC)
@@ -3421,7 +3480,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
hw->mac.ops.enable_rx_dma(hw, rxctrl);
}
-static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3433,7 +3493,8 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0;
}
-static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3538,10 +3599,10 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
{
u16 vid;
- ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
+ ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
+ ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
/**
@@ -3676,7 +3737,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- if (netdev->features & NETIF_F_HW_VLAN_RX)
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
ixgbe_vlan_strip_enable(adapter);
else
ixgbe_vlan_strip_disable(adapter);
@@ -5077,14 +5138,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
+ rtnl_lock();
if (netif_running(netdev)) {
- rtnl_lock();
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
- rtnl_unlock();
}
+ rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter);
@@ -6425,9 +6486,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_tx_buffer *first;
int tso;
u32 tx_flags = 0;
-#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
-#endif
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = skb->protocol;
u8 hdr_len = 0;
@@ -6439,12 +6498,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
-#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
@@ -6983,7 +7039,7 @@ static int ixgbe_set_features(struct net_device *netdev,
break;
}
- if (features & NETIF_F_HW_VLAN_RX)
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
ixgbe_vlan_strip_enable(adapter);
else
ixgbe_vlan_strip_disable(adapter);
@@ -7007,7 +7063,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
int err;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
- return -EOPNOTSUPP;
+ return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
/* Hardware does not support aging addresses so if a
* ndm_state is given only allow permanent addresses
@@ -7038,44 +7094,6 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int err = -EOPNOTSUPP;
-
- if (ndm->ndm_state & NUD_PERMANENT) {
- pr_info("%s: FDB only supports static addresses\n",
- ixgbe_driver_name);
- return -EINVAL;
- }
-
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (is_unicast_ether_addr(addr))
- err = dev_uc_del(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_del(dev, addr);
- else
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- int idx)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
-
- return idx;
-}
-
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh)
{
@@ -7171,8 +7189,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_features = ixgbe_set_features,
.ndo_fix_features = ixgbe_fix_features,
.ndo_fdb_add = ixgbe_ndo_fdb_add,
- .ndo_fdb_del = ixgbe_ndo_fdb_del,
- .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
.ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
};
@@ -7202,9 +7218,19 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
/* only support first port */
if (hw->bus.func != 0)
break;
+ case IXGBE_SUBDEV_ID_82599_SP_560FLR:
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
case IXGBE_SUBDEV_ID_82599_ECNA_DP:
+ case IXGBE_SUBDEV_ID_82599_LOM_SFP:
+ is_wol_supported = 1;
+ break;
+ }
+ break;
+ case IXGBE_DEV_ID_82599EN_SFP:
+ /* Only this subdevice supports WOL */
+ switch (subdevice_id) {
+ case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
is_wol_supported = 1;
break;
}
@@ -7369,6 +7395,10 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ /* Cache if MNG FW is up so we don't have to read the REG later */
+ if (hw->mac.ops.mng_fw_enabled)
+ hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
+
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -7425,9 +7455,9 @@ skip_sriov:
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
@@ -7521,7 +7551,9 @@ skip_sriov:
/* WOL not supported for all devices */
adapter->wol = 0;
hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
- if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device))
+ hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
+ pdev->subsystem_device);
+ if (hw->wol_enabled)
adapter->wol = IXGBE_WUFC_MAG;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
@@ -7532,10 +7564,13 @@ skip_sriov:
/* pick up the PCI bus settings for reporting later */
hw->mac.ops.get_bus_info(hw);
+ if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP)
+ ixgbe_get_parent_bus_info(adapter);
/* print bus type/speed/width info */
e_dev_info("(PCI Express:%s:%s) %pM\n",
- (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
+ (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
+ hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
"Unknown"),
(hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
@@ -7615,9 +7650,13 @@ skip_sriov:
e_err(probe, "failed to allocate sysfs resources\n");
#endif /* CONFIG_IXGBE_HWMON */
-#ifdef CONFIG_DEBUG_FS
ixgbe_dbg_adapter_init(adapter);
-#endif /* CONFIG_DEBUG_FS */
+
+ /* Need link setup for MNG FW, else wait for IXGBE_UP */
+ if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
+ hw->mac.ops.setup_link(hw,
+ IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
+ true);
return 0;
@@ -7653,9 +7692,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
-#ifdef CONFIG_DEBUG_FS
ixgbe_dbg_adapter_exit(adapter);
-#endif /*CONFIG_DEBUG_FS */
set_bit(__IXGBE_DOWN, &adapter->state);
cancel_work_sync(&adapter->service_task);
@@ -7918,16 +7955,19 @@ static int __init ixgbe_init_module(void)
pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
pr_info("%s\n", ixgbe_copyright);
-#ifdef CONFIG_DEBUG_FS
ixgbe_dbg_init();
-#endif /* CONFIG_DEBUG_FS */
+
+ ret = pci_register_driver(&ixgbe_driver);
+ if (ret) {
+ ixgbe_dbg_exit();
+ return ret;
+ }
#ifdef CONFIG_IXGBE_DCA
dca_register_notify(&dca_notifier);
#endif
- ret = pci_register_driver(&ixgbe_driver);
- return ret;
+ return 0;
}
module_init(ixgbe_init_module);
@@ -7945,9 +7985,7 @@ static void __exit ixgbe_exit_module(void)
#endif
pci_unregister_driver(&ixgbe_driver);
-#ifdef CONFIG_DEBUG_FS
ixgbe_dbg_exit();
-#endif /* CONFIG_DEBUG_FS */
rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 060d2ad2ac96..e5691ccbce9d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -956,6 +956,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_sx_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_lx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_lx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1043,6 +1050,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (comp_codes_10g == 0 &&
!(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
@@ -1058,10 +1067,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->mac.ops.get_device_caps(hw, &enforce_sfp);
if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
- !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = 0;
@@ -1125,10 +1136,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* SR modules
*/
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
sfp_type == ixgbe_sfp_type_1g_sx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
sfp_type == ixgbe_sfp_type_1g_sx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d44b4d21268c..1e7d587c4e57 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -35,7 +35,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
-#ifdef NETIF_F_HW_VLAN_TX
+#ifdef NETIF_F_HW_VLAN_CTAG_TX
#include <linux/if_vlan.h>
#endif
@@ -661,13 +661,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
bool enable = ((event_mask & 0x10000000U) != 0);
if (enable) {
- eth_random_addr(vf_mac_addr);
- e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
- vfn, vf_mac_addr);
- /*
- * Store away the VF "permananet" MAC address, it will ask
- * for it later.
- */
+ eth_zero_addr(vf_mac_addr);
memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
}
@@ -688,7 +682,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_vf_reset_event(adapter, vf);
/* set vf mac address */
- ixgbe_set_vf_mac(adapter, vf, vf_mac);
+ if (!is_zero_ether_addr(vf_mac))
+ ixgbe_set_vf_mac(adapter, vf, vf_mac);
vf_shift = vf % 32;
reg_offset = vf / 32;
@@ -729,8 +724,16 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
/* reply to reset with ack and vf mac address */
- msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(addr, vf_mac, ETH_ALEN);
+ msgbuf[0] = IXGBE_VF_RESET;
+ if (!is_zero_ether_addr(vf_mac)) {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+ } else {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ dev_warn(&adapter->pdev->dev,
+ "VF %d has no MAC address assigned, you may have to assign one manually\n",
+ vf);
+ }
/*
* Piggyback the multicast filter type so VF can compute the
@@ -1049,6 +1052,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
if (vlan || qos) {
+ if (adapter->vfinfo[vf].pf_vlan)
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan,
+ vf);
+ if (err)
+ goto out;
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err)
goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6652e96c352d..70c6aa3d3f95 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -56,10 +56,13 @@
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
+#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
+#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
@@ -729,6 +732,13 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_LSWFW 0x15014
+/* Management Bit Fields and Masks */
+#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */
+
+/* Firmware Semaphore Register */
+#define IXGBE_FWSM_MODE_MASK 0xE
+#define IXGBE_FWSM_FW_MODE_PT 0x4
+
/* ARC Subsystem registers */
#define IXGBE_HICR 0x15F00
#define IXGBE_FWSTS 0x15F0C
@@ -1019,6 +1029,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
/* FACTPS */
+#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */
#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */
/* MHADD Bit Masks */
@@ -1582,6 +1593,7 @@ enum {
#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
#define IXGBE_MACC_FLU 0x00000001
#define IXGBE_MACC_FSV_10G 0x00030000
@@ -1827,6 +1839,7 @@ enum {
#define IXGBE_PCI_LINK_SPEED 0xF
#define IXGBE_PCI_LINK_SPEED_2500 0x1
#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_LINK_SPEED_8000 0x3
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
@@ -2600,6 +2613,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_cu_core1 = 10,
ixgbe_sfp_type_1g_sx_core0 = 11,
ixgbe_sfp_type_1g_sx_core1 = 12,
+ ixgbe_sfp_type_1g_lx_core0 = 13,
+ ixgbe_sfp_type_1g_lx_core1 = 14,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -2650,6 +2665,7 @@ enum ixgbe_bus_speed {
ixgbe_bus_speed_133 = 133,
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
+ ixgbe_bus_speed_8000 = 8000,
ixgbe_bus_speed_reserved
};
@@ -2859,6 +2875,7 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ bool (*mng_fw_enabled)(struct ixgbe_hw *hw);
};
struct ixgbe_phy_operations {
@@ -2912,6 +2929,7 @@ struct ixgbe_mac_info {
u32 max_tx_queues;
u32 max_rx_queues;
u32 orig_autoc;
+ u32 cached_autoc;
u32 orig_autoc2;
bool orig_link_settings_stored;
bool autotry_restart;
@@ -2986,6 +3004,8 @@ struct ixgbe_hw {
bool adapter_stopped;
bool force_full_reset;
bool allow_unsupported_sfp;
+ bool mng_fw_enabled;
+ bool wol_enabled;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 66c5e946284e..389324f5929a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -854,6 +854,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
+ .mng_fw_enabled = NULL,
};
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fc0af9a3bb35..fff0d9867529 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
+ union ixgbe_adv_tx_desc *next_to_watch;
u16 length;
- u16 next_to_watch;
u16 mapped_as_page;
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 2b6cb5ca48ee..1f5166ad6bb5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgbevf_pci_tbl[] = {
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
- board_82599_vf},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
- board_X540_vf},
-
+static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
/* required last entry */
{0, }
};
@@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_adapter *adapter = q_vector->adapter;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int i, eop, count = 0;
+ unsigned int i, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
return true;
i = tx_ring->next_to_clean;
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ eop_desc = tx_buffer_info->next_to_watch;
- while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
+ do {
bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
- /* eop could change between read and DD-check */
- if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
- goto cont_loop;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer_info->next_to_watch = NULL;
+
for ( ; !cleaned; count++) {
struct sk_buff *skb;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- cleaned = (i == eop);
+ cleaned = (tx_desc == eop_desc);
skb = tx_buffer_info->skb;
if (cleaned && skb) {
@@ -234,12 +240,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
i++;
if (i == tx_ring->count)
i = 0;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
}
-cont_loop:
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
- }
+ eop_desc = tx_buffer_info->next_to_watch;
+ } while (count < tx_ring->count);
tx_ring->next_to_clean = i;
@@ -285,7 +291,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
- __vlan_hwaccel_put_tag(skb, tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
@@ -1173,7 +1179,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
}
}
-static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1198,7 +1205,8 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return err;
}
-static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1221,7 +1229,8 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
u16 vid;
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
+ ixgbevf_vlan_rx_add_vid(adapter->netdev,
+ htons(ETH_P_8021Q), vid);
}
static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
@@ -2046,6 +2055,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
int err;
/* PCI config space info */
@@ -2065,18 +2075,26 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
- "PF still in reset state, assigning new address\n");
- eth_hw_addr_random(adapter->netdev);
- memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
- adapter->netdev->addr_len);
+ "PF still in reset state. Is the PF interface up?\n");
} else {
err = hw->mac.ops.init_hw(hw);
if (err) {
pr_err("init_shared_code failed: %d\n", err);
goto out;
}
- memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
- adapter->netdev->addr_len);
+ err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+ if (err)
+ dev_info(&pdev->dev, "Error reading MAC address\n");
+ else if (is_zero_ether_addr(adapter->hw.mac.addr))
+ dev_info(&pdev->dev,
+ "MAC address not assigned by administrator.\n");
+ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ }
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_info(&pdev->dev, "Assigning random MAC address\n");
+ eth_hw_addr_random(netdev);
+ memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
}
/* lock to protect mailbox accesses */
@@ -2425,9 +2443,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
- hw_dbg(&adapter->hw,
- "Unable to allocate memory for "
- "the receive descriptor ring\n");
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
goto alloc_failed;
@@ -2822,8 +2837,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
}
static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- unsigned int first)
+ struct sk_buff *skb, u32 tx_flags)
{
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int len;
@@ -2848,7 +2862,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
size, DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->next_to_watch = i;
len -= size;
total -= size;
@@ -2878,7 +2891,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->mapped_as_page = true;
- tx_buffer_info->next_to_watch = i;
len -= size;
total -= size;
@@ -2897,8 +2909,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
else
i = i - 1;
tx_ring->tx_buffer_info[i].skb = skb;
- tx_ring->tx_buffer_info[first].next_to_watch = i;
- tx_ring->tx_buffer_info[first].time_stamp = jiffies;
return count;
@@ -2907,7 +2917,6 @@ dma_error:
/* clear timestamp and dma mappings for failed tx_buffer_info map */
tx_buffer_info->dma = 0;
- tx_buffer_info->next_to_watch = 0;
count--;
/* clear timestamp and dma mappings for remaining portion of packet */
@@ -2924,7 +2933,8 @@ dma_error:
}
static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
- int count, u32 paylen, u8 hdr_len)
+ int count, unsigned int first, u32 paylen,
+ u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbevf_tx_buffer *tx_buffer_info;
@@ -2975,6 +2985,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+ tx_ring->tx_buffer_info[first].time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
}
@@ -3066,15 +3086,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_flags |= IXGBE_TX_FLAGS_CSUM;
ixgbevf_tx_queue(tx_ring, tx_flags,
- ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
- skb->len, hdr_len);
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
+ ixgbevf_tx_map(tx_ring, skb, tx_flags),
+ first, skb->len, hdr_len);
writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
@@ -3400,9 +3413,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM;
netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c94557b53df..387b52635bc0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -109,7 +109,12 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
if (ret_val)
return ret_val;
- if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ /* New versions of the PF may NACK the reset return message
+ * to indicate that no MAC address has yet been assigned for
+ * the VF.
+ */
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 0519afa413d2..070a6f1a0577 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1059,7 +1059,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
NET_STAT(jme).rx_bytes += 4;
}
jme->jme_rx(skb);
@@ -3030,8 +3030,8 @@ jme_init_one(struct pci_dev *pdev,
NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_TSO6 |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index edfba9370922..a49e81bdf8e8 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -21,8 +21,8 @@ if NET_VENDOR_MARVELL
config MV643XX_ETH
tristate "Marvell Discovery (643XX) and Orion ethernet support"
depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
- select INET_LRO
select PHYLIB
+ select MVMDIO
---help---
This driver supports the gigabit ethernet MACs in the
Marvell Discovery PPC/MIPS chipset family (MV643XX) and
@@ -33,19 +33,17 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
+ select PHYLIB
---help---
This driver supports the MDIO interface found in the network
interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
Dove, Armada 370 and Armada XP).
- For now, this driver is only needed for the MVNETA driver
- (used on Armada 370 and XP), but it could be used in the
- future by the MV643XX_ETH driver.
+ This driver is used by the MV643XX_ETH and MVNETA drivers.
config MVNETA
tristate "Marvell Armada 370/XP network interface support"
depends on MACH_ARMADA_370_XP
- select PHYLIB
select MVMDIO
---help---
This driver supports the network interface units in the
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 7f63b4aac434..5c4a7765ff0e 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -2,8 +2,8 @@
# Makefile for the Marvell device drivers.
#
-obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
obj-$(CONFIG_MVMDIO) += mvmdio.o
+obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
obj-$(CONFIG_MVNETA) += mvneta.o
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 6562c736a1d8..d0afeea181fb 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -20,6 +20,8 @@
* Copyright (C) 2007-2008 Marvell Semiconductor
* Lennert Buytenhek <buytenh@marvell.com>
*
+ * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
@@ -54,8 +56,8 @@
#include <linux/phy.h>
#include <linux/mv643xx_eth.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/types.h>
-#include <linux/inet_lro.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -67,14 +69,6 @@ static char mv643xx_eth_driver_version[] = "1.4";
* Registers shared between all ports.
*/
#define PHY_ADDR 0x0000
-#define SMI_REG 0x0004
-#define SMI_BUSY 0x10000000
-#define SMI_READ_VALID 0x08000000
-#define SMI_OPCODE_READ 0x04000000
-#define SMI_OPCODE_WRITE 0x00000000
-#define ERR_INT_CAUSE 0x0080
-#define ERR_INT_SMI_DONE 0x00000010
-#define ERR_INT_MASK 0x0084
#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
@@ -264,25 +258,6 @@ struct mv643xx_eth_shared_private {
void __iomem *base;
/*
- * Points at the right SMI instance to use.
- */
- struct mv643xx_eth_shared_private *smi;
-
- /*
- * Provides access to local SMI interface.
- */
- struct mii_bus *smi_bus;
-
- /*
- * If we have access to the error interrupt pin (which is
- * somewhat misnamed as it not only reflects internal errors
- * but also reflects SMI completion), use that to wait for
- * SMI access completion instead of polling the SMI busy bit.
- */
- int err_interrupt;
- wait_queue_head_t smi_busy_wait;
-
- /*
* Per-port MBUS window access register value.
*/
u32 win_protect;
@@ -293,7 +268,7 @@ struct mv643xx_eth_shared_private {
int extended_rx_coal_limit;
int tx_bw_control;
int tx_csum_limit;
-
+ struct clk *clk;
};
#define TX_BW_CONTROL_ABSENT 0
@@ -341,12 +316,6 @@ struct mib_counters {
u32 rx_overrun;
};
-struct lro_counters {
- u32 lro_aggregated;
- u32 lro_flushed;
- u32 lro_no_desc;
-};
-
struct rx_queue {
int index;
@@ -360,9 +329,6 @@ struct rx_queue {
dma_addr_t rx_desc_dma;
int rx_desc_area_size;
struct sk_buff **rx_skb;
-
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_arr[8];
};
struct tx_queue {
@@ -398,8 +364,6 @@ struct mv643xx_eth_private {
spinlock_t mib_counters_lock;
struct mib_counters mib_counters;
- struct lro_counters lro_counters;
-
struct work_struct tx_timeout_task;
struct napi_struct napi;
@@ -435,9 +399,7 @@ struct mv643xx_eth_private {
/*
* Hardware-specific parameters.
*/
-#if defined(CONFIG_HAVE_CLK)
struct clk *clk;
-#endif
unsigned int t_clk;
};
@@ -530,42 +492,12 @@ static void txq_maybe_wake(struct tx_queue *txq)
}
}
-
-/* rx napi ******************************************************************/
-static int
-mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
- u64 *hdr_flags, void *priv)
-{
- unsigned long cmd_sts = (unsigned long)priv;
-
- /*
- * Make sure that this packet is Ethernet II, is not VLAN
- * tagged, is IPv4, has a valid IP header, and is TCP.
- */
- if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
- RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK |
- RX_PKT_IS_VLAN_TAGGED)) !=
- (RX_IP_HDR_OK | RX_PKT_IS_IPV4 |
- RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4))
- return -1;
-
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, ip_hdrlen(skb));
- *iphdr = ip_hdr(skb);
- *tcph = tcp_hdr(skb);
- *hdr_flags = LRO_IPV4 | LRO_TCP;
-
- return 0;
-}
-
static int rxq_process(struct rx_queue *rxq, int budget)
{
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
struct net_device_stats *stats = &mp->dev->stats;
- int lro_flush_needed;
int rx;
- lro_flush_needed = 0;
rx = 0;
while (rx < budget && rxq->rx_desc_count) {
struct rx_desc *rx_desc;
@@ -626,12 +558,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, mp->dev);
- if (skb->dev->features & NETIF_F_LRO &&
- skb->ip_summed == CHECKSUM_UNNECESSARY) {
- lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts);
- lro_flush_needed = 1;
- } else
- netif_receive_skb(skb);
+ napi_gro_receive(&mp->napi, skb);
continue;
@@ -651,9 +578,6 @@ err:
dev_kfree_skb(skb);
}
- if (lro_flush_needed)
- lro_flush_all(&rxq->lro_mgr);
-
if (rx < budget)
mp->work_rx &= ~(1 << rxq->index);
@@ -1120,97 +1044,6 @@ out_write:
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
}
-static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
-{
- struct mv643xx_eth_shared_private *msp = dev_id;
-
- if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
- writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
- wake_up(&msp->smi_busy_wait);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static int smi_is_done(struct mv643xx_eth_shared_private *msp)
-{
- return !(readl(msp->base + SMI_REG) & SMI_BUSY);
-}
-
-static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
-{
- if (msp->err_interrupt == NO_IRQ) {
- int i;
-
- for (i = 0; !smi_is_done(msp); i++) {
- if (i == 10)
- return -ETIMEDOUT;
- msleep(10);
- }
-
- return 0;
- }
-
- if (!smi_is_done(msp)) {
- wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
- msecs_to_jiffies(100));
- if (!smi_is_done(msp))
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
-{
- struct mv643xx_eth_shared_private *msp = bus->priv;
- void __iomem *smi_reg = msp->base + SMI_REG;
- int ret;
-
- if (smi_wait_ready(msp)) {
- pr_warn("SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
-
- if (smi_wait_ready(msp)) {
- pr_warn("SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- ret = readl(smi_reg);
- if (!(ret & SMI_READ_VALID)) {
- pr_warn("SMI bus read not valid\n");
- return -ENODEV;
- }
-
- return ret & 0xffff;
-}
-
-static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
-{
- struct mv643xx_eth_shared_private *msp = bus->priv;
- void __iomem *smi_reg = msp->base + SMI_REG;
-
- if (smi_wait_ready(msp)) {
- pr_warn("SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- writel(SMI_OPCODE_WRITE | (reg << 21) |
- (addr << 16) | (val & 0xffff), smi_reg);
-
- if (smi_wait_ready(msp)) {
- pr_warn("SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-
/* statistics ***************************************************************/
static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
{
@@ -1236,26 +1069,6 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
return stats;
}
-static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp)
-{
- u32 lro_aggregated = 0;
- u32 lro_flushed = 0;
- u32 lro_no_desc = 0;
- int i;
-
- for (i = 0; i < mp->rxq_count; i++) {
- struct rx_queue *rxq = mp->rxq + i;
-
- lro_aggregated += rxq->lro_mgr.stats.aggregated;
- lro_flushed += rxq->lro_mgr.stats.flushed;
- lro_no_desc += rxq->lro_mgr.stats.no_desc;
- }
-
- mp->lro_counters.lro_aggregated = lro_aggregated;
- mp->lro_counters.lro_flushed = lro_flushed;
- mp->lro_counters.lro_no_desc = lro_no_desc;
-}
-
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
{
return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
@@ -1419,10 +1232,6 @@ struct mv643xx_eth_stats {
{ #m, FIELD_SIZEOF(struct mib_counters, m), \
-1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
-#define LROSTAT(m) \
- { #m, FIELD_SIZEOF(struct lro_counters, m), \
- -1, offsetof(struct mv643xx_eth_private, lro_counters.m) }
-
static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
SSTAT(rx_packets),
SSTAT(tx_packets),
@@ -1464,9 +1273,6 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
MIBSTAT(late_collision),
MIBSTAT(rx_discard),
MIBSTAT(rx_overrun),
- LROSTAT(lro_aggregated),
- LROSTAT(lro_flushed),
- LROSTAT(lro_no_desc),
};
static int
@@ -1523,6 +1329,34 @@ mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
return 0;
}
+static void
+mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ wol->supported = 0;
+ wol->wolopts = 0;
+ if (mp->phy)
+ phy_ethtool_get_wol(mp->phy, wol);
+}
+
+static int
+mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int err;
+
+ if (mp->phy == NULL)
+ return -EOPNOTSUPP;
+
+ err = phy_ethtool_set_wol(mp->phy, wol);
+ /* Given that mv643xx_eth works without the marvell-specific PHY driver,
+ * this debugging hint is useful to have.
+ */
+ if (err == -EOPNOTSUPP)
+ netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
+ return err;
+}
+
static int
mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -1668,7 +1502,6 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
mv643xx_eth_get_stats(dev);
mib_counters_update(mp);
- mv643xx_eth_grab_lro_stats(mp);
for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
const struct mv643xx_eth_stats *stat;
@@ -1708,6 +1541,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
.get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
.get_sset_count = mv643xx_eth_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_wol = mv643xx_eth_get_wol,
+ .set_wol = mv643xx_eth_set_wol,
};
@@ -1939,19 +1774,6 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct rx_desc);
}
- rxq->lro_mgr.dev = mp->dev;
- memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats));
- rxq->lro_mgr.features = LRO_F_NAPI;
- rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
- rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr);
- rxq->lro_mgr.max_aggr = 32;
- rxq->lro_mgr.frag_align_pad = 0;
- rxq->lro_mgr.lro_arr = rxq->lro_arr;
- rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header;
-
- memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr));
-
return 0;
@@ -2635,66 +2457,26 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
struct mv643xx_eth_shared_private *msp;
const struct mbus_dram_target_info *dram;
struct resource *res;
- int ret;
if (!mv643xx_eth_version_printed++)
pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
mv643xx_eth_driver_version);
- ret = -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL)
- goto out;
+ return -EINVAL;
- ret = -ENOMEM;
- msp = kzalloc(sizeof(*msp), GFP_KERNEL);
+ msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
if (msp == NULL)
- goto out;
+ return -ENOMEM;
msp->base = ioremap(res->start, resource_size(res));
if (msp->base == NULL)
- goto out_free;
-
- /*
- * Set up and register SMI bus.
- */
- if (pd == NULL || pd->shared_smi == NULL) {
- msp->smi_bus = mdiobus_alloc();
- if (msp->smi_bus == NULL)
- goto out_unmap;
-
- msp->smi_bus->priv = msp;
- msp->smi_bus->name = "mv643xx_eth smi";
- msp->smi_bus->read = smi_bus_read;
- msp->smi_bus->write = smi_bus_write,
- snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
- pdev->name, pdev->id);
- msp->smi_bus->parent = &pdev->dev;
- msp->smi_bus->phy_mask = 0xffffffff;
- if (mdiobus_register(msp->smi_bus) < 0)
- goto out_free_mii_bus;
- msp->smi = msp;
- } else {
- msp->smi = platform_get_drvdata(pd->shared_smi);
- }
-
- msp->err_interrupt = NO_IRQ;
- init_waitqueue_head(&msp->smi_busy_wait);
+ return -ENOMEM;
- /*
- * Check whether the error interrupt is hooked up.
- */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res != NULL) {
- int err;
-
- err = request_irq(res->start, mv643xx_eth_err_irq,
- IRQF_SHARED, "mv643xx_eth", msp);
- if (!err) {
- writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
- msp->err_interrupt = res->start;
- }
- }
+ msp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(msp->clk))
+ clk_prepare_enable(msp->clk);
/*
* (Re-)program MBUS remapping windows if we are asked to.
@@ -2710,30 +2492,15 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, msp);
return 0;
-
-out_free_mii_bus:
- mdiobus_free(msp->smi_bus);
-out_unmap:
- iounmap(msp->base);
-out_free:
- kfree(msp);
-out:
- return ret;
}
static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
- struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
- if (pd == NULL || pd->shared_smi == NULL) {
- mdiobus_unregister(msp->smi_bus);
- mdiobus_free(msp->smi_bus);
- }
- if (msp->err_interrupt != NO_IRQ)
- free_irq(msp->err_interrupt, msp);
iounmap(msp->base);
- kfree(msp);
+ if (!IS_ERR(msp->clk))
+ clk_disable_unprepare(msp->clk);
return 0;
}
@@ -2794,14 +2561,21 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->txq_count = pd->tx_queue_count ? : 1;
}
+static void mv643xx_eth_adjust_link(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ mv643xx_adjust_pscr(mp);
+}
+
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
- struct mii_bus *bus = mp->shared->smi->smi_bus;
struct phy_device *phydev;
int start;
int num;
int i;
+ char phy_id[MII_BUS_ID_SIZE + 3];
if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
start = phy_addr_get(mp) & 0x1f;
@@ -2811,17 +2585,19 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
num = 1;
}
- phydev = NULL;
+ /* Attempt to connect to the PHY using orion-mdio */
+ phydev = ERR_PTR(-ENODEV);
for (i = 0; i < num; i++) {
int addr = (start + i) & 0x1f;
- if (bus->phy_map[addr] == NULL)
- mdiobus_scan(bus, addr);
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ "orion-mdio-mii", addr);
- if (phydev == NULL) {
- phydev = bus->phy_map[addr];
- if (phydev != NULL)
- phy_addr_set(mp, addr);
+ phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
+ PHY_INTERFACE_MODE_GMII);
+ if (!IS_ERR(phydev)) {
+ phy_addr_set(mp, addr);
+ break;
}
}
@@ -2834,8 +2610,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
phy_reset(mp);
- phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII);
-
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
@@ -2932,22 +2706,27 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
* it to override the default.
*/
mp->t_clk = 133000000;
-#if defined(CONFIG_HAVE_CLK)
- mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
+ mp->clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(mp->clk)) {
clk_prepare_enable(mp->clk);
mp->t_clk = clk_get_rate(mp->clk);
}
-#endif
+
set_params(mp, pd);
netif_set_real_num_tx_queues(dev, mp->txq_count);
netif_set_real_num_rx_queues(dev, mp->rxq_count);
- if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
+ if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
mp->phy = phy_scan(mp, pd->phy_addr);
- if (mp->phy != NULL)
+ if (IS_ERR(mp->phy)) {
+ err = PTR_ERR(mp->phy);
+ if (err == -ENODEV)
+ err = -EPROBE_DEFER;
+ goto out;
+ }
phy_init(mp, pd->speed, pd->duplex);
+ }
SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
@@ -2982,8 +2761,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_RXCSUM | NETIF_F_LRO;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
@@ -3014,12 +2792,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
return 0;
out:
-#if defined(CONFIG_HAVE_CLK)
- if (!IS_ERR(mp->clk)) {
+ if (!IS_ERR(mp->clk))
clk_disable_unprepare(mp->clk);
- clk_put(mp->clk);
- }
-#endif
free_netdev(dev);
return err;
@@ -3034,12 +2808,8 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
phy_detach(mp->phy);
cancel_work_sync(&mp->tx_timeout_task);
-#if defined(CONFIG_HAVE_CLK)
- if (!IS_ERR(mp->clk)) {
+ if (!IS_ERR(mp->clk))
clk_disable_unprepare(mp->clk);
- clk_put(mp->clk);
- }
-#endif
free_netdev(mp->dev);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 77b7c80262f4..e2f662660313 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -24,10 +24,14 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/phy.h>
-#include <linux/of_address.h>
-#include <linux/of_mdio.h>
+#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of_mdio.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#define MVMDIO_SMI_DATA_SHIFT 0
#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
@@ -36,33 +40,59 @@
#define MVMDIO_SMI_WRITE_OPERATION 0
#define MVMDIO_SMI_READ_VALID BIT(27)
#define MVMDIO_SMI_BUSY BIT(28)
+#define MVMDIO_ERR_INT_CAUSE 0x007C
+#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
+#define MVMDIO_ERR_INT_MASK 0x0080
struct orion_mdio_dev {
struct mutex lock;
- void __iomem *smireg;
+ void __iomem *regs;
+ struct clk *clk;
+ /*
+ * If we have access to the error interrupt pin (which is
+ * somewhat misnamed as it not only reflects internal errors
+ * but also reflects SMI completion), use that to wait for
+ * SMI access completion instead of polling the SMI busy bit.
+ */
+ int err_interrupt;
+ wait_queue_head_t smi_busy_wait;
};
+static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
+{
+ return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
+}
+
/* Wait for the SMI unit to be ready for another operation
*/
static int orion_mdio_wait_ready(struct mii_bus *bus)
{
struct orion_mdio_dev *dev = bus->priv;
int count;
- u32 val;
- count = 0;
- while (1) {
- val = readl(dev->smireg);
- if (!(val & MVMDIO_SMI_BUSY))
- break;
+ if (dev->err_interrupt <= 0) {
+ count = 0;
+ while (1) {
+ if (orion_mdio_smi_is_done(dev))
+ break;
- if (count > 100) {
- dev_err(bus->parent, "Timeout: SMI busy for too long\n");
- return -ETIMEDOUT;
- }
+ if (count > 100) {
+ dev_err(bus->parent,
+ "Timeout: SMI busy for too long\n");
+ return -ETIMEDOUT;
+ }
- udelay(10);
- count++;
+ udelay(10);
+ count++;
+ }
+ } else {
+ if (!orion_mdio_smi_is_done(dev)) {
+ wait_event_timeout(dev->smi_busy_wait,
+ orion_mdio_smi_is_done(dev),
+ msecs_to_jiffies(100));
+ if (!orion_mdio_smi_is_done(dev))
+ return -ETIMEDOUT;
+ }
}
return 0;
@@ -87,12 +117,12 @@ static int orion_mdio_read(struct mii_bus *bus, int mii_id,
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
MVMDIO_SMI_READ_OPERATION),
- dev->smireg);
+ dev->regs);
/* Wait for the value to become available */
count = 0;
while (1) {
- val = readl(dev->smireg);
+ val = readl(dev->regs);
if (val & MVMDIO_SMI_READ_VALID)
break;
@@ -129,7 +159,7 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
MVMDIO_SMI_WRITE_OPERATION |
(value << MVMDIO_SMI_DATA_SHIFT)),
- dev->smireg);
+ dev->regs);
mutex_unlock(&dev->lock);
@@ -141,13 +171,34 @@ static int orion_mdio_reset(struct mii_bus *bus)
return 0;
}
+static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
+{
+ struct orion_mdio_dev *dev = dev_id;
+
+ if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) &
+ MVMDIO_ERR_INT_SMI_DONE) {
+ writel(~MVMDIO_ERR_INT_SMI_DONE,
+ dev->regs + MVMDIO_ERR_INT_CAUSE);
+ wake_up(&dev->smi_busy_wait);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
static int orion_mdio_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct resource *r;
struct mii_bus *bus;
struct orion_mdio_dev *dev;
int i, ret;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No SMI register address given\n");
+ return -ENODEV;
+ }
+
bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
if (!bus) {
dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
@@ -172,36 +223,66 @@ static int orion_mdio_probe(struct platform_device *pdev)
bus->irq[i] = PHY_POLL;
dev = bus->priv;
- dev->smireg = of_iomap(pdev->dev.of_node, 0);
- if (!dev->smireg) {
- dev_err(&pdev->dev, "No SMI register address given in DT\n");
- kfree(bus->irq);
- mdiobus_free(bus);
- return -ENODEV;
+ dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!dev->regs) {
+ dev_err(&pdev->dev, "Unable to remap SMI register\n");
+ ret = -ENODEV;
+ goto out_mdio;
+ }
+
+ init_waitqueue_head(&dev->smi_busy_wait);
+
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(dev->clk))
+ clk_prepare_enable(dev->clk);
+
+ dev->err_interrupt = platform_get_irq(pdev, 0);
+ if (dev->err_interrupt != -ENXIO) {
+ ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
+ orion_mdio_err_irq,
+ IRQF_SHARED, pdev->name, dev);
+ if (ret)
+ goto out_mdio;
+
+ writel(MVMDIO_ERR_INT_SMI_DONE,
+ dev->regs + MVMDIO_ERR_INT_MASK);
}
mutex_init(&dev->lock);
- ret = of_mdiobus_register(bus, np);
+ if (pdev->dev.of_node)
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ else
+ ret = mdiobus_register(bus);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
- iounmap(dev->smireg);
- kfree(bus->irq);
- mdiobus_free(bus);
- return ret;
+ goto out_mdio;
}
platform_set_drvdata(pdev, bus);
return 0;
+
+out_mdio:
+ if (!IS_ERR(dev->clk))
+ clk_disable_unprepare(dev->clk);
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return ret;
}
static int orion_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct orion_mdio_dev *dev = bus->priv;
+
+ writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
mdiobus_unregister(bus);
kfree(bus->irq);
mdiobus_free(bus);
+ if (!IS_ERR(dev->clk))
+ clk_disable_unprepare(dev->clk);
+
return 0;
}
@@ -225,3 +306,4 @@ module_platform_driver(orion_mdio_driver);
MODULE_DESCRIPTION("Marvell MDIO interface driver");
MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:orion-mdio");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index cd345b8969bc..c96678555233 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -374,7 +374,6 @@ static int rxq_number = 8;
static int txq_number = 8;
static int rxq_def;
-static int txq_def;
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"
@@ -1475,7 +1474,8 @@ error:
static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
+ u16 txq_id = skb_get_queue_mapping(skb);
+ struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
struct mvneta_tx_desc *tx_desc;
struct netdev_queue *nq;
int frags = 0;
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
frags = skb_shinfo(skb)->nr_frags + 1;
- nq = netdev_get_tx_queue(dev, txq_def);
+ nq = netdev_get_tx_queue(dev, txq_id);
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -1969,13 +1969,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
&rxq->descs_phys, GFP_KERNEL);
- if (rxq->descs == NULL) {
- netdev_err(pp->dev,
- "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
- rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
- rxq->size);
+ if (rxq->descs == NULL)
return -ENOMEM;
- }
BUG_ON(rxq->descs !=
PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
@@ -2029,13 +2024,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
&txq->descs_phys, GFP_KERNEL);
- if (txq->descs == NULL) {
- netdev_err(pp->dev,
- "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
- txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
- txq->size);
+ if (txq->descs == NULL)
return -ENOMEM;
- }
/* Make sure descriptor address is cache line size aligned */
BUG_ON(txq->descs !=
@@ -2689,7 +2679,7 @@ static int mvneta_probe(struct platform_device *pdev)
return -EINVAL;
}
- dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
+ dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
if (!dev)
return -ENOMEM;
@@ -2771,16 +2761,17 @@ static int mvneta_probe(struct platform_device *pdev)
netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
goto err_deinit;
}
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->priv_flags |= IFF_UNICAST_FLT;
-
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
platform_set_drvdata(pdev, pp->dev);
@@ -2843,4 +2834,3 @@ module_param(rxq_number, int, S_IRUGO);
module_param(txq_number, int, S_IRUGO);
module_param(rxq_def, int, S_IRUGO);
-module_param(txq_def, int, S_IRUGO);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 037ed866c22f..339bb323cb0c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -584,12 +584,14 @@ static int init_hash_table(struct pxa168_eth_private *pep)
*/
if (pep->htpr == NULL) {
pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma, GFP_KERNEL);
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (pep->htpr == NULL)
return -ENOMEM;
+ } else {
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
}
- memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
wrl(pep, HTPR, pep->htpr_dma);
return 0;
}
@@ -1023,13 +1025,11 @@ static int rxq_init(struct net_device *dev)
size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size;
pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma, GFP_KERNEL);
- if (!pep->p_rx_desc_area) {
- printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
- dev->name, size);
+ &pep->rx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!pep->p_rx_desc_area)
goto out;
- }
- memset((void *)pep->p_rx_desc_area, 0, size);
+
/* initialize the next_desc_ptr links in the Rx descriptors ring */
p_rx_desc = pep->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) {
@@ -1086,13 +1086,10 @@ static int txq_init(struct net_device *dev)
size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size;
pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma, GFP_KERNEL);
- if (!pep->p_tx_desc_area) {
- printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
- dev->name, size);
+ &pep->tx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!pep->p_tx_desc_area)
goto out;
- }
- memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
p_tx_desc = pep->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index fc07ca35721b..256ae789c143 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
- tp = space - 2048/8;
+ tp = space - 8192/8;
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
} else {
@@ -1421,14 +1421,14 @@ static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
- if (features & NETIF_F_HW_VLAN_RX)
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON);
else
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_OFF);
- if (features & NETIF_F_HW_VLAN_TX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_TX) {
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_ON);
@@ -2713,7 +2713,7 @@ static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
struct sk_buff *skb;
skb = sky2->rx_ring[sky2->rx_next].skb;
- __vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length));
}
static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
@@ -4406,7 +4406,7 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
if (changed & NETIF_F_RXHASH)
rx_set_rss(dev, features);
- if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
sky2_vlan_mode(dev, features);
return 0;
@@ -4793,7 +4793,8 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
dev->hw_features |= NETIF_F_RXHASH;
if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
- dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features |= SKY2_VLAN_OFFLOADS;
}
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 615ac63ea860..ec6dcd80152b 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2074,7 +2074,7 @@ enum {
GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
-#define GMAC_DEF_MSK GM_IS_TX_FF_UR
+#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
};
/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 293127d28b33..3e9c70f15b42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -6,5 +6,5 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
- en_resources.o en_netdev.o en_selftest.o
+ en_resources.o en_netdev.o en_selftest.o en_clock.o
mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index fdc5f23d8e9f..1df56cc50ee9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1490,6 +1490,69 @@ out:
return ret;
}
+static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
+{
+ int port, err;
+ struct mlx4_vport_state *vp_admin;
+ struct mlx4_vport_oper_state *vp_oper;
+
+ for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ vp_oper->state = *vp_admin;
+ if (MLX4_VGT != vp_admin->default_vlan) {
+ err = __mlx4_register_vlan(&priv->dev, port,
+ vp_admin->default_vlan, &(vp_oper->vlan_idx));
+ if (err) {
+ vp_oper->vlan_idx = NO_INDX;
+ mlx4_warn((&priv->dev),
+ "No vlan resorces slave %d, port %d\n",
+ slave, port);
+ return err;
+ }
+ mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n",
+ (int)(vp_oper->state.default_vlan),
+ vp_oper->vlan_idx, slave, port);
+ }
+ if (vp_admin->spoofchk) {
+ vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
+ port,
+ vp_admin->mac);
+ if (0 > vp_oper->mac_idx) {
+ err = vp_oper->mac_idx;
+ vp_oper->mac_idx = NO_INDX;
+ mlx4_warn((&priv->dev),
+ "No mac resorces slave %d, port %d\n",
+ slave, port);
+ return err;
+ }
+ mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
+ vp_oper->state.mac, vp_oper->mac_idx, slave, port);
+ }
+ }
+ return 0;
+}
+
+static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
+{
+ int port;
+ struct mlx4_vport_oper_state *vp_oper;
+
+ for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ if (NO_INDX != vp_oper->vlan_idx) {
+ __mlx4_unregister_vlan(&priv->dev,
+ port, vp_oper->vlan_idx);
+ vp_oper->vlan_idx = NO_INDX;
+ }
+ if (NO_INDX != vp_oper->mac_idx) {
+ __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
+ vp_oper->mac_idx = NO_INDX;
+ }
+ }
+ return;
+}
+
static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
u16 param, u8 toggle)
{
@@ -1510,6 +1573,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
+ mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
slave_state[slave].event_eq[i].token = 0;
@@ -1556,6 +1620,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
goto reset_slave;
slave_state[slave].vhcr_dma |= param;
+ if (mlx4_master_activate_admin_state(priv, slave))
+ goto reset_slave;
slave_state[slave].active = true;
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
break;
@@ -1732,6 +1798,18 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
if (!priv->mfunc.master.slave_state)
goto err_comm;
+ priv->mfunc.master.vf_admin =
+ kzalloc(dev->num_slaves *
+ sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
+ if (!priv->mfunc.master.vf_admin)
+ goto err_comm_admin;
+
+ priv->mfunc.master.vf_oper =
+ kzalloc(dev->num_slaves *
+ sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
+ if (!priv->mfunc.master.vf_oper)
+ goto err_comm_oper;
+
for (i = 0; i < dev->num_slaves; ++i) {
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
@@ -1752,6 +1830,10 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
goto err_slaves;
}
INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+ priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
+ priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
+ priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
+ priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
}
spin_lock_init(&s_state->lock);
}
@@ -1800,6 +1882,10 @@ err_slaves:
for (port = 1; port <= MLX4_MAX_PORTS; port++)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}
+ kfree(priv->mfunc.master.vf_oper);
+err_comm_oper:
+ kfree(priv->mfunc.master.vf_admin);
+err_comm_admin:
kfree(priv->mfunc.master.slave_state);
err_comm:
iounmap(priv->mfunc.comm);
@@ -1837,10 +1923,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
&priv->mfunc.vhcr_dma,
GFP_KERNEL);
- if (!priv->mfunc.vhcr) {
- mlx4_err(dev, "Couldn't allocate VHCR.\n");
+ if (!priv->mfunc.vhcr)
goto err_hcr;
- }
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
@@ -1876,6 +1960,8 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}
kfree(priv->mfunc.master.slave_state);
+ kfree(priv->mfunc.master.vf_admin);
+ kfree(priv->mfunc.master.vf_oper);
}
iounmap(priv->mfunc.comm);
@@ -1986,3 +2072,115 @@ u32 mlx4_comm_get_version(void)
{
return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
}
+
+static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
+{
+ if ((vf < 0) || (vf >= dev->num_vfs)) {
+ mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
+ return -EINVAL;
+ }
+
+ return vf+1;
+}
+
+int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_vport_state *s_info;
+ int slave;
+
+ if (!mlx4_is_master(dev))
+ return -EPROTONOSUPPORT;
+
+ slave = mlx4_get_slave_indx(dev, vf);
+ if (slave < 0)
+ return -EINVAL;
+
+ s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
+ s_info->mac = mac;
+ mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
+ vf, port, s_info->mac);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
+
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_vport_state *s_info;
+ int slave;
+
+ if ((!mlx4_is_master(dev)) ||
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
+ return -EPROTONOSUPPORT;
+
+ if ((vlan > 4095) || (qos > 7))
+ return -EINVAL;
+
+ slave = mlx4_get_slave_indx(dev, vf);
+ if (slave < 0)
+ return -EINVAL;
+
+ s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
+ if ((0 == vlan) && (0 == qos))
+ s_info->default_vlan = MLX4_VGT;
+ else
+ s_info->default_vlan = vlan;
+ s_info->default_qos = qos;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
+
+int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_vport_state *s_info;
+ int slave;
+
+ if ((!mlx4_is_master(dev)) ||
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
+ return -EPROTONOSUPPORT;
+
+ slave = mlx4_get_slave_indx(dev, vf);
+ if (slave < 0)
+ return -EINVAL;
+
+ s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
+ s_info->spoofchk = setting;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
+
+int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_vport_state *s_info;
+ int slave;
+
+ if (!mlx4_is_master(dev))
+ return -EPROTONOSUPPORT;
+
+ slave = mlx4_get_slave_indx(dev, vf);
+ if (slave < 0)
+ return -EINVAL;
+
+ s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
+ ivf->vf = vf;
+
+ /* need to convert it to a func */
+ ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
+ ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
+ ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
+ ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
+ ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
+ ivf->mac[5] = ((s_info->mac) & 0xff);
+
+ ivf->vlan = s_info->default_vlan;
+ ivf->qos = s_info->default_qos;
+ ivf->tx_rate = s_info->tx_rate;
+ ivf->spoofchk = s_info->spoofchk;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 0706623cfb96..004e4231af67 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -240,9 +240,10 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
__mlx4_cq_free_icm(dev, cqn);
}
-int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
- struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- unsigned vector, int collapsed)
+int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
+ struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
+ struct mlx4_cq *cq, unsigned vector, int collapsed,
+ int timestamp_en)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
@@ -276,6 +277,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
memset(cq_context, 0, sizeof *cq_context);
cq_context->flags = cpu_to_be32(!!collapsed << 18);
+ if (timestamp_en)
+ cq_context->flags |= cpu_to_be32(1 << 19);
+
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
new file mode 100644
index 000000000000..fd6441071319
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/device.h>
+
+#include "mlx4_en.h"
+
+int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int port_up = 0;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
+ mlx4_en_free_resources(priv);
+
+ en_warn(priv, "Changing Time Stamp configuration\n");
+
+ priv->hwtstamp_config.tx_type = tx_type;
+ priv->hwtstamp_config.rx_filter = rx_filter;
+
+ if (rx_filter != HWTSTAMP_FILTER_NONE)
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+ err = mlx4_en_alloc_resources(priv);
+ if (err) {
+ en_err(priv, "Failed reallocating port resources\n");
+ goto out;
+ }
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port\n");
+ }
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ netdev_features_change(dev);
+ return err;
+}
+
+/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
+{
+ struct mlx4_en_dev *mdev =
+ container_of(tc, struct mlx4_en_dev, cycles);
+ struct mlx4_dev *dev = mdev->dev;
+
+ return mlx4_read_clock(dev) & tc->mask;
+}
+
+u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
+{
+ u64 hi, lo;
+ struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
+
+ lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo);
+ hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16;
+
+ return hi | lo;
+}
+
+void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ struct skb_shared_hwtstamps *hwts,
+ u64 timestamp)
+{
+ u64 nsec;
+
+ nsec = timecounter_cyc2time(&mdev->clock, timestamp);
+
+ memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
+ hwts->hwtstamp = ns_to_ktime(nsec);
+}
+
+void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+{
+ struct mlx4_dev *dev = mdev->dev;
+ u64 ns;
+
+ memset(&mdev->cycles, 0, sizeof(mdev->cycles));
+ mdev->cycles.read = mlx4_en_read_clock;
+ mdev->cycles.mask = CLOCKSOURCE_MASK(48);
+ /* Using shift to make calculation more accurate. Since current HW
+ * clock frequency is 427 MHz, and cycles are given using a 48 bits
+ * register, the biggest shift when calculating using u64, is 14
+ * (max_cycles * multiplier < 2^64)
+ */
+ mdev->cycles.shift = 14;
+ mdev->cycles.mult =
+ clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+
+ timecounter_init(&mdev->clock, &mdev->cycles,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Calculate period in seconds to call the overflow watchdog - to make
+ * sure counter is checked at least once every wrap around.
+ */
+ ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask);
+ do_div(ns, NSEC_PER_SEC / 2 / HZ);
+ mdev->overflow_period = ns;
+}
+
+void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
+{
+ bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
+ mdev->overflow_period);
+
+ if (timeout) {
+ timecounter_read(&mdev->clock);
+ mdev->last_overflow_check = jiffies;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index b8d0854a7ad1..1e6c594d6d04 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -77,6 +77,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
char name[25];
+ int timestamp_en = 0;
struct cpu_rmap *rmap =
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap;
@@ -123,8 +124,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (!cq->is_tx)
cq->size = priv->rx_ring[cq->ring].actual_size;
- err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
- cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
+ if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
+ (!cq->is_tx && priv->hwtstamp_config.rx_filter))
+ timestamp_en = 1;
+
+ err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
+ &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
+ cq->vector, 0, timestamp_en);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b799ab12a291..0f91222ea3d7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -186,7 +186,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
{
- return DCB_CAP_DCBX_VER_IEEE;
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
}
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -253,3 +253,11 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
};
+
+const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
+ .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+
+ .getdcbx = mlx4_en_dcbnl_getdcbx,
+ .setdcbx = mlx4_en_dcbnl_setdcbx,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 00f25b5f297f..bcf4d118e98c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1147,6 +1147,35 @@ out:
return err;
}
+static int mlx4_en_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int ret;
+
+ ret = ethtool_op_get_ts_info(dev, info);
+ if (ret)
+ return ret;
+
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
+ info->so_timestamping |=
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ }
+
+ return ret;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -1173,6 +1202,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_rxfh_indir = mlx4_en_set_rxfh_indir,
.get_channels = mlx4_en_get_channels,
.set_channels = mlx4_en_set_channels,
+ .get_ts_info = mlx4_en_get_ts_info,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index fc27800e9c38..a5c9df07a7d0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -300,6 +300,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
mdev->pndev[i] = NULL;
}
+
+ /* Initialize time stamp mechanism */
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_init_timestamp(mdev);
+
return mdev;
err_mr:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f278b10ef714..a69a908614e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -356,7 +356,8 @@ static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
}
#endif
-static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -381,7 +382,8 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0;
}
-static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -411,8 +413,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
{
- unsigned int i;
- for (i = ETH_ALEN - 1; i; --i) {
+ int i;
+ for (i = ETH_ALEN - 1; i >= 0; --i) {
dst_mac[i] = src_mac & 0xff;
src_mac >>= 8;
}
@@ -1359,6 +1361,27 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
+/* mlx4_en_service_task - Run service task for tasks that needed to be done
+ * periodically
+ */
+static void mlx4_en_service_task(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+ service_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up) {
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_ptp_overflow_check(mdev);
+
+ queue_delayed_work(mdev->workqueue, &priv->service_task,
+ SERVICE_TASK_DELAY);
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
static void mlx4_en_linkstate(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
@@ -1863,6 +1886,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
cancel_delayed_work(&priv->stats_task);
+ cancel_delayed_work(&priv->service_task);
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
@@ -1914,6 +1938,75 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* device doesn't support time stamping */
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
+ return -EINVAL;
+
+ /* TX HW timestamp */
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* RX HW timestamp */
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
+ config.tx_type = HWTSTAMP_TX_OFF;
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return mlx4_en_hwtstamp_ioctl(dev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -1931,77 +2024,40 @@ static int mlx4_en_set_features(struct net_device *netdev,
}
-static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 flags)
+static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_dev *mdev = priv->mdev->dev;
- int err;
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+ u64 mac_u64 = mlx4_en_mac_to_u64(mac);
- if (!mlx4_is_mfunc(mdev))
- return -EOPNOTSUPP;
-
- /* Hardware does not support aging addresses, allow only
- * permanent addresses if ndm_state is given
- */
- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- en_info(priv, "Add FDB only supports static addresses\n");
+ if (!is_valid_ether_addr(mac))
return -EINVAL;
- }
-
- if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
- err = dev_uc_add_excl(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_add_excl(dev, addr);
- else
- err = -EINVAL;
-
- /* Only return duplicate errors if NLM_F_EXCL is set */
- if (err == -EEXIST && !(flags & NLM_F_EXCL))
- err = 0;
- return err;
+ return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
}
-static int mlx4_en_fdb_del(struct ndmsg *ndm,
- struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr)
+static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_dev *mdev = priv->mdev->dev;
- int err;
-
- if (!mlx4_is_mfunc(mdev))
- return -EOPNOTSUPP;
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- en_info(priv, "Del FDB only supports static addresses\n");
- return -EINVAL;
- }
+ return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
+}
- if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
- err = dev_uc_del(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_del(dev, addr);
- else
- err = -EINVAL;
+static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
- return err;
+ return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
}
-static int mlx4_en_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev, int idx)
+static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_dev *mdev = priv->mdev->dev;
+ struct mlx4_en_priv *en_priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
- if (mlx4_is_mfunc(mdev))
- idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
-
- return idx;
+ return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
}
static const struct net_device_ops mlx4_netdev_ops = {
@@ -2014,9 +2070,37 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
+ .ndo_do_ioctl = mlx4_en_ioctl,
+ .ndo_tx_timeout = mlx4_en_tx_timeout,
+ .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx4_en_netpoll,
+#endif
+ .ndo_set_features = mlx4_en_set_features,
+ .ndo_setup_tc = mlx4_en_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx4_en_filter_rfs,
+#endif
+};
+
+static const struct net_device_ops mlx4_netdev_ops_master = {
+ .ndo_open = mlx4_en_open,
+ .ndo_stop = mlx4_en_close,
+ .ndo_start_xmit = mlx4_en_xmit,
+ .ndo_select_queue = mlx4_en_select_queue,
+ .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_set_rx_mode = mlx4_en_set_rx_mode,
+ .ndo_set_mac_address = mlx4_en_set_mac,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = mlx4_en_change_mtu,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = mlx4_en_set_vf_mac,
+ .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
+ .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
+ .ndo_get_vf_config = mlx4_en_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
#endif
@@ -2025,9 +2109,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
- .ndo_fdb_add = mlx4_en_fdb_add,
- .ndo_fdb_del = mlx4_en_fdb_del,
- .ndo_fdb_dump = mlx4_en_fdb_dump,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2088,9 +2169,16 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+ INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
#ifdef CONFIG_MLX4_EN_DCB
- if (!mlx4_is_slave(priv->mdev->dev))
- dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
+ if (!mlx4_is_slave(priv->mdev->dev)) {
+ if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
+ dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
+ } else {
+ en_info(priv, "enabling only PFC DCB ops\n");
+ dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
+ }
+ }
#endif
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
@@ -2122,6 +2210,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
spin_lock_init(&priv->filters_lock);
#endif
+ /* Initialize time stamping config */
+ priv->hwtstamp_config.flags = 0;
+ priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
@@ -2134,7 +2227,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/*
* Initialize netdev entry points
*/
- dev->netdev_ops = &mlx4_netdev_ops;
+ if (mlx4_is_master(priv->mdev->dev))
+ dev->netdev_ops = &mlx4_netdev_ops_master;
+ else
+ dev->netdev_ops = &mlx4_netdev_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -2152,8 +2248,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features |= NETIF_F_LOOPBACK;
if (mdev->dev->caps.steering_mode ==
@@ -2199,6 +2295,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
mlx4_en_set_default_moderation(priv);
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ queue_delayed_work(mdev->workqueue, &priv->service_task,
+ SERVICE_TASK_DELAY);
+
return 0;
out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 10c24c784b70..91f2b2c43c12 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -42,6 +42,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int user_prio, struct mlx4_qp_context *context)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
memset(context, 0, sizeof *context);
context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
@@ -65,6 +66,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
+ if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
+ context->param3 |= cpu_to_be32(1 << 30);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c7f856308e1a..02aee1ebd203 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -320,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
}
ring->buf = ring->wqres.buf.direct.buf;
+ ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
+
return 0;
err_hwq:
@@ -554,6 +556,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
@@ -565,6 +568,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int polled = 0;
int ip_summed;
int factor = priv->cqe_factor;
+ u64 timestamp;
if (!priv->port_up)
return 0;
@@ -669,19 +673,27 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->data_len = length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
+ if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
- __vlan_hwaccel_put_tag(gro_skb, vid);
+ __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
}
if (dev->features & NETIF_F_RXHASH)
gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
skb_record_rx_queue(gro_skb, cq->ring);
- napi_gro_frags(&cq->napi);
+ if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+ mlx4_en_fill_hwtstamps(mdev,
+ skb_hwtstamps(gro_skb),
+ timestamp);
+ }
+
+ napi_gro_frags(&cq->napi);
goto next;
}
@@ -714,9 +726,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (dev->features & NETIF_F_RXHASH)
skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
- if (be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK)
- __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
+ if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_VLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+
+ if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+ mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
+ timestamp);
+ }
/* Push it up the stack */
netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 3488c6d9e6b5..2448f0d669e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -58,10 +58,9 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
/* build the pkt before xmit */
skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
- if (!skb) {
- en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
+ if (!skb)
return -ENOMEM;
- }
+
skb_reserve(skb, NET_IP_ALIGN);
ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 49308cc65ee7..4e6877a032a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -118,6 +118,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
} else
ring->bf_enabled = true;
+ ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+
return 0;
err_map:
@@ -192,8 +194,9 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner)
+ int index, u8 owner, u64 timestamp)
{
+ struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
@@ -204,6 +207,12 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
int i;
__be32 *ptr = (__be32 *)tx_desc;
__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+ struct skb_shared_hwtstamps hwts;
+
+ if (timestamp) {
+ mlx4_en_fill_hwtstamps(mdev, &hwts, timestamp);
+ skb_tstamp_tx(skb, &hwts);
+ }
/* Optimize the common case when there are no wraparounds */
if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
@@ -289,7 +298,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
while (ring->cons != ring->prod) {
ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
- !!(ring->cons & ring->size));
+ !!(ring->cons & ring->size), 0);
ring->cons += ring->last_nr_txbb;
cnt++;
}
@@ -318,6 +327,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
u32 packets = 0;
u32 bytes = 0;
int factor = priv->cqe_factor;
+ u64 timestamp = 0;
if (!priv->port_up)
return;
@@ -341,11 +351,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
do {
txbbs_skipped += ring->last_nr_txbb;
ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+ if (ring->tx_info[ring_index].ts_requested)
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+
/* free next descriptor */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
priv, ring, ring_index,
!!((ring->cons + txbbs_skipped) &
- ring->size));
+ ring->size), timestamp);
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
} while (ring_index != new_index);
@@ -629,6 +642,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
+ /*
+ * For timestamping add flag to skb_shinfo and
+ * set flag for further reference
+ */
+ if (ring->hwtstamp_tx_type == HWTSTAMP_TX_ON &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_info->ts_requested = 1;
+ }
+
/* Prepare ctrl segement apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
@@ -729,6 +752,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (bounce)
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
+ skb_tx_timestamp(skb);
+
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
op_own |= htonl((bf_index & 0xffff) << 8);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6245579962d..b147bdd40768 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -91,7 +91,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[ 8] = "P_Key violation counter",
[ 9] = "Q_Key violation counter",
[10] = "VMM",
- [12] = "DPDP",
+ [12] = "Dual Port Different Protocol (DPDP) support",
[15] = "Big LSO headers",
[16] = "MW support",
[17] = "APM support",
@@ -109,6 +109,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[41] = "Unicast VEP steering support",
[42] = "Multicast VEP steering support",
[48] = "Counters support",
+ [53] = "Port ETS Scheduler support",
+ [55] = "Port link type sensing support",
[59] = "Port management change event support",
[61] = "64 byte EQE support",
[62] = "64 byte CQE support",
@@ -128,7 +130,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
[3] = "Device manage flow steering support",
- [4] = "Automatic mac reassignment support"
+ [4] = "Automatic MAC reassignment support",
+ [5] = "Time stamping support"
};
int i;
@@ -442,6 +445,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
+#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
@@ -464,6 +468,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
+#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
@@ -558,6 +563,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
dev_cap->flags = flags | (u64)ext_flags << 32;
@@ -648,6 +656,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(dev_cap->max_counters, outbox,
QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
+ MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
+ if (field32 & (1 << 26))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
+ if (field32 & (1 << 20))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
+
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
for (i = 1; i <= dev_cap->num_ports; ++i) {
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
@@ -777,6 +791,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+ /* For guests, disable timestamp */
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
+ field &= 0x7f;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
+
/* For guests, report Blueflame disabled */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
field &= 0x7f;
@@ -804,6 +823,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
u64 def_mac;
u8 port_type;
u16 short_field;
@@ -821,6 +841,9 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
/* set slave default_mac address */
MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
def_mac += slave << 8;
+ /* if config MAC in DB use it */
+ if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
+ def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
/* get port type - currently only eth is enabled */
@@ -1006,6 +1029,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_COMM_BASE_OFFSET 0x40
#define QUERY_FW_COMM_BAR_OFFSET 0x48
+#define QUERY_FW_CLOCK_OFFSET 0x50
+#define QUERY_FW_CLOCK_BAR 0x58
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
@@ -1080,6 +1106,12 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
fw->comm_bar, fw->comm_base);
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
+ MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
+ MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
+ fw->clock_bar = (fw->clock_bar >> 6) * 2;
+ mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
+ fw->clock_bar, fw->clock_offset);
+
/*
* Round up number of system pages needed in case
* MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
@@ -1367,6 +1399,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
u8 byte_field;
#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
+#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -1381,6 +1414,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
goto out;
MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+ MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
/* QPC/EEC/CQC/EQC/RDMARC attributes */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 151c2bb380a6..fdf41665a059 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -162,6 +162,7 @@ struct mlx4_init_hca_param {
u64 global_caps;
u16 log_mc_entry_sz;
u16 log_mc_hash_sz;
+ u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
u8 log_num_qps;
u8 log_num_srqs;
u8 log_num_cqs;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 16abde20e1fc..0d32a82458bf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -513,6 +513,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+ dev->caps.hca_core_clock = hca_param.hca_core_clock;
+
memset(&dev_cap, 0, sizeof(dev_cap));
dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
err = mlx4_dev_cap(dev, &dev_cap);
@@ -1226,8 +1228,53 @@ static void unmap_bf_area(struct mlx4_dev *dev)
io_mapping_free(mlx4_priv(dev)->bf_mapping);
}
+cycle_t mlx4_read_clock(struct mlx4_dev *dev)
+{
+ u32 clockhi, clocklo, clockhi1;
+ cycle_t cycles;
+ int i;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ for (i = 0; i < 10; i++) {
+ clockhi = swab32(readl(priv->clock_mapping));
+ clocklo = swab32(readl(priv->clock_mapping + 4));
+ clockhi1 = swab32(readl(priv->clock_mapping));
+ if (clockhi == clockhi1)
+ break;
+ }
+
+ cycles = (u64) clockhi << 32 | (u64) clocklo;
+
+ return cycles;
+}
+EXPORT_SYMBOL_GPL(mlx4_read_clock);
+
+
+static int map_internal_clock(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ priv->clock_mapping =
+ ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) +
+ priv->fw.clock_offset, MLX4_CLOCK_SIZE);
+
+ if (!priv->clock_mapping)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void unmap_internal_clock(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (priv->clock_mapping)
+ iounmap(priv->clock_mapping);
+}
+
static void mlx4_close_hca(struct mlx4_dev *dev)
{
+ unmap_internal_clock(dev);
unmap_bf_area(dev);
if (mlx4_is_slave(dev))
mlx4_slave_exit(dev);
@@ -1445,6 +1492,37 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
goto err_free_icm;
}
+ /*
+ * If TS is supported by FW
+ * read HCA frequency by QUERY_HCA command
+ */
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
+ memset(&init_hca, 0, sizeof(init_hca));
+ err = mlx4_QUERY_HCA(dev, &init_hca);
+ if (err) {
+ mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
+ } else {
+ dev->caps.hca_core_clock =
+ init_hca.hca_core_clock;
+ }
+
+ /* In case we got HCA frequency 0 - disable timestamping
+ * to avoid dividing by zero
+ */
+ if (!dev->caps.hca_core_clock) {
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
+ mlx4_err(dev,
+ "HCA frequency is 0. Timestamping is not supported.");
+ } else if (map_internal_clock(dev)) {
+ /*
+ * Map internal clock,
+ * in case of failure disable timestamping
+ */
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
+ mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
+ }
+ }
} else {
err = mlx4_init_slave(dev);
if (err) {
@@ -1478,6 +1556,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return 0;
unmap_bf:
+ unmap_internal_clock(dev);
unmap_bf_area(dev);
err_close:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 52685524708d..ffc78d2cb0cf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1125,28 +1125,11 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
return err;
}
-int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- u8 port, int block_mcast_loopback,
- enum mlx4_protocol prot, u64 *reg_id)
+int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], u8 port,
+ int block_mcast_loopback,
+ enum mlx4_protocol prot, u64 *reg_id)
{
-
- switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_A0:
- if (prot == MLX4_PROT_ETH)
- return 0;
-
- case MLX4_STEERING_MODE_B0:
- if (prot == MLX4_PROT_ETH)
- gid[7] |= (MLX4_MC_STEER << 1);
-
- if (mlx4_is_mfunc(dev))
- return mlx4_QP_ATTACH(dev, qp, gid, 1,
- block_mcast_loopback, prot);
- return mlx4_qp_attach_common(dev, qp, gid,
- block_mcast_loopback, prot,
- MLX4_MC_STEER);
-
- case MLX4_STEERING_MODE_DEVICE_MANAGED: {
struct mlx4_spec_list spec = { {NULL} };
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
@@ -1180,8 +1163,32 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
list_add_tail(&spec.list, &rule.list);
return mlx4_flow_attach(dev, &rule, reg_id);
- }
+}
+int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ u8 port, int block_mcast_loopback,
+ enum mlx4_protocol prot, u64 *reg_id)
+{
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ if (prot == MLX4_PROT_ETH)
+ return 0;
+
+ case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_MC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+ return mlx4_qp_attach_common(dev, qp, gid,
+ block_mcast_loopback, prot,
+ MLX4_MC_STEER);
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
+ block_mcast_loopback,
+ prot, reg_id);
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index d738454116a0..eac3dae10efe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -87,7 +87,8 @@ enum {
MLX4_HCR_SIZE = 0x0001c,
MLX4_CLR_INT_SIZE = 0x00008,
MLX4_SLAVE_COMM_BASE = 0x0,
- MLX4_COMM_PAGESIZE = 0x1000
+ MLX4_COMM_PAGESIZE = 0x1000,
+ MLX4_CLOCK_SIZE = 0x00008
};
enum {
@@ -403,6 +404,7 @@ struct mlx4_fw {
u64 clr_int_base;
u64 catas_offset;
u64 comm_base;
+ u64 clock_offset;
struct mlx4_icm *fw_icm;
struct mlx4_icm *aux_icm;
u32 catas_size;
@@ -410,6 +412,7 @@ struct mlx4_fw {
u8 clr_int_bar;
u8 catas_bar;
u8 comm_bar;
+ u8 clock_bar;
};
struct mlx4_comm {
@@ -470,6 +473,30 @@ struct mlx4_slave_state {
enum slave_port_state port_state[MLX4_MAX_PORTS + 1];
};
+#define MLX4_VGT 4095
+#define NO_INDX (-1)
+
+struct mlx4_vport_state {
+ u64 mac;
+ u16 default_vlan;
+ u8 default_qos;
+ u32 tx_rate;
+ bool spoofchk;
+};
+
+struct mlx4_vf_admin_state {
+ struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1];
+};
+
+struct mlx4_vport_oper_state {
+ struct mlx4_vport_state state;
+ int mac_idx;
+ int vlan_idx;
+};
+struct mlx4_vf_oper_state {
+ struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1];
+};
+
struct slave_list {
struct mutex mutex;
struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
@@ -500,6 +527,8 @@ struct mlx4_master_qp0_state {
struct mlx4_mfunc_master_ctx {
struct mlx4_slave_state *slave_state;
+ struct mlx4_vf_admin_state *vf_admin;
+ struct mlx4_vf_oper_state *vf_oper;
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
int init_port_ref[MLX4_MAX_PORTS + 1];
u16 max_mtu[MLX4_MAX_PORTS + 1];
@@ -826,6 +855,7 @@ struct mlx4_priv {
struct list_head bf_list;
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
+ void __iomem *clock_mapping;
int reserved_mtts;
int fs_hash_mode;
u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
@@ -1127,6 +1157,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
/* resource tracker functions*/
@@ -1190,6 +1222,10 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer);
+int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], u8 port,
+ int block_mcast_loopback,
+ enum mlx4_protocol prot, u64 *reg_id);
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f710b7ce0dcb..b1d7657b2bf5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -40,6 +40,7 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
#ifdef CONFIG_MLX4_EN_DCB
#include <linux/dcbnl.h>
#endif
@@ -77,6 +78,7 @@
#define STAMP_SHIFT 31
#define STAMP_VAL 0x7fffffff
#define STATS_DELAY (HZ / 4)
+#define SERVICE_TASK_DELAY (HZ / 4)
#define MAX_NUM_OF_FS_RULES 256
#define MLX4_EN_FILTER_HASH_SHIFT 4
@@ -207,6 +209,7 @@ struct mlx4_en_tx_info {
u8 linear;
u8 data_offset;
u8 inl;
+ u8 ts_requested;
};
@@ -262,6 +265,7 @@ struct mlx4_en_tx_ring {
struct mlx4_bf bf;
bool bf_enabled;
struct netdev_queue *tx_queue;
+ int hwtstamp_tx_type;
};
struct mlx4_en_rx_desc {
@@ -288,6 +292,7 @@ struct mlx4_en_rx_ring {
unsigned long packets;
unsigned long csum_ok;
unsigned long csum_none;
+ int hwtstamp_rx_filter;
};
struct mlx4_en_cq {
@@ -348,6 +353,10 @@ struct mlx4_en_dev {
u32 priv_pdn;
spinlock_t uar_lock;
u8 mac_removed[MLX4_MAX_PORTS + 1];
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ unsigned long last_overflow_check;
+ unsigned long overflow_period;
};
@@ -512,6 +521,7 @@ struct mlx4_en_priv {
struct work_struct watchdog_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
+ struct delayed_work service_task;
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
@@ -525,6 +535,7 @@ struct mlx4_en_priv {
struct device *ddev;
int base_tx_qpn;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
+ struct hwtstamp_config hwtstamp_config;
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
@@ -624,6 +635,7 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
#ifdef CONFIG_MLX4_EN_DCB
extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
+extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
#endif
int mlx4_en_setup_tc(struct net_device *dev, u8 up);
@@ -636,9 +648,21 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
#define MLX4_EN_NUM_SELF_TEST 5
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
u64 mlx4_en_mac_to_u64(u8 *addr);
+void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
/*
- * Globals
+ * Functions for time stamping
+ */
+u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
+void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ struct skb_shared_hwtstamps *hwts,
+ u64 timestamp);
+void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
+int mlx4_en_timestamp_config(struct net_device *dev,
+ int tx_type,
+ int rx_filter);
+
+/* Globals
*/
extern const struct ethtool_ops mlx4_en_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 10c57c86388b..946e0af5faef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -32,6 +32,7 @@
#include <linux/errno.h>
#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <linux/export.h>
#include <linux/mlx4/cmd.h>
@@ -140,8 +141,9 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- /* MAC already registered, Must not have duplicates */
- err = -EEXIST;
+ /* MAC already registered, increment ref count */
+ err = i;
+ ++table->refs[i];
goto out;
}
}
@@ -164,7 +166,7 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
table->entries[free] = 0;
goto out;
}
-
+ table->refs[free] = 1;
err = free;
++table->total;
out:
@@ -205,12 +207,16 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
struct mlx4_mac_table *table = &info->mac_table;
int index;
- index = find_index(dev, table, mac);
-
mutex_lock(&table->mutex);
+ index = find_index(dev, table, mac);
if (validate_index(dev, table, index))
goto out;
+ if (--table->refs[index]) {
+ mlx4_dbg(dev, "Have more references for index %d,"
+ "no need to modify mac table\n", index);
+ goto out;
+ }
table->entries[index] = 0;
mlx4_set_port_mac_table(dev, port, table->entries);
@@ -304,7 +310,7 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
}
EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
-static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -378,7 +384,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -517,7 +523,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
/* Mtu is configured as the max MTU among all the
* the functions on the port. */
mtu = be16_to_cpu(gen_context->mtu);
- mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+ mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
prev_mtu = slave_st->mtu[port];
slave_st->mtu[port] = mtu;
if (mtu > master->max_mtu[port])
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1391b52f443a..e12e0d2e0ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -75,6 +75,7 @@ struct res_gid {
u8 gid[16];
enum mlx4_protocol prot;
enum mlx4_steer_type steer;
+ u64 reg_id;
};
enum res_qp_states {
@@ -352,6 +353,47 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
}
}
+static int update_vport_qp_param(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *inbox,
+ u8 slave)
+{
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_priv *priv;
+ u32 qp_type;
+ int port;
+
+ port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
+ priv = mlx4_priv(dev);
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+
+ if (MLX4_VGT != vp_oper->state.default_vlan) {
+ qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ if (MLX4_QP_ST_RC == qp_type)
+ return -EINVAL;
+
+ qpc->pri_path.vlan_index = vp_oper->vlan_idx;
+ qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/
+ qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
+ qpc->pri_path.sched_queue &= 0xC7;
+ qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
+ mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
+ be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
+ (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
+ vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
+ (int)(qpc->pri_path.fl));
+ }
+ if (vp_oper->state.spoofchk) {
+ qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
+ qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
+ mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n",
+ be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
+ (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
+ vp_oper->mac_idx);
+ }
+ return 0;
+}
+
static int mpt_mask(struct mlx4_dev *dev)
{
return dev->caps.num_mpts - 1;
@@ -2797,6 +2839,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, qpc);
+ err = update_vport_qp_param(dev, inbox, slave);
+ if (err)
+ return err;
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
@@ -2934,7 +2979,7 @@ static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
u8 *gid, enum mlx4_protocol prot,
- enum mlx4_steer_type steer)
+ enum mlx4_steer_type steer, u64 reg_id)
{
struct res_gid *res;
int err;
@@ -2951,6 +2996,7 @@ static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
memcpy(res->gid, gid, 16);
res->prot = prot;
res->steer = steer;
+ res->reg_id = reg_id;
list_add_tail(&res->list, &rqp->mcg_list);
err = 0;
}
@@ -2961,7 +3007,7 @@ static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
u8 *gid, enum mlx4_protocol prot,
- enum mlx4_steer_type steer)
+ enum mlx4_steer_type steer, u64 *reg_id)
{
struct res_gid *res;
int err;
@@ -2971,6 +3017,7 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
if (!res || res->prot != prot || res->steer != steer)
err = -EINVAL;
else {
+ *reg_id = res->reg_id;
list_del(&res->list);
kfree(res);
err = 0;
@@ -2980,6 +3027,37 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
return err;
}
+static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_loopback, enum mlx4_protocol prot,
+ enum mlx4_steer_type type, u64 *reg_id)
+{
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
+ block_loopback, prot,
+ reg_id);
+ case MLX4_STEERING_MODE_B0:
+ return mlx4_qp_attach_common(dev, qp, gid,
+ block_loopback, prot, type);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol prot, enum mlx4_steer_type type,
+ u64 reg_id)
+{
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ return mlx4_flow_detach(dev, reg_id);
+ case MLX4_STEERING_MODE_B0:
+ return mlx4_qp_detach_common(dev, qp, gid, prot, type);
+ default:
+ return -EINVAL;
+ }
+}
+
int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -2992,14 +3070,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
int err;
int qpn;
struct res_qp *rqp;
+ u64 reg_id = 0;
int attach = vhcr->op_modifier;
int block_loopback = vhcr->in_modifier >> 31;
u8 steer_type_mask = 2;
enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0)
- return -EINVAL;
-
qpn = vhcr->in_modifier & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err)
@@ -3007,30 +3083,32 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
qp.qpn = qpn;
if (attach) {
- err = add_mcg_res(dev, slave, rqp, gid, prot, type);
- if (err)
+ err = qp_attach(dev, &qp, gid, block_loopback, prot,
+ type, &reg_id);
+ if (err) {
+ pr_err("Fail to attach rule to qp 0x%x\n", qpn);
goto ex_put;
-
- err = mlx4_qp_attach_common(dev, &qp, gid,
- block_loopback, prot, type);
+ }
+ err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
if (err)
- goto ex_rem;
+ goto ex_detach;
} else {
- err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
+ err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
if (err)
goto ex_put;
- err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
- }
+ err = qp_detach(dev, &qp, gid, prot, type, reg_id);
+ if (err)
+ pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
+ qpn, reg_id);
+ }
put_res(dev, slave, qpn, RES_QP);
- return 0;
+ return err;
-ex_rem:
- /* ignore error return below, already in error */
- (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
+ex_detach:
+ qp_detach(dev, &qp, gid, prot, type, reg_id);
ex_put:
put_res(dev, slave, qpn, RES_QP);
-
return err;
}
@@ -3266,9 +3344,16 @@ static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
struct mlx4_qp qp; /* dummy for calling attach/detach */
list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
- qp.qpn = rqp->local_qpn;
- (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
- rgid->steer);
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ mlx4_flow_detach(dev, rgid->reg_id);
+ break;
+ case MLX4_STEERING_MODE_B0:
+ qp.qpn = rqp->local_qpn;
+ (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
+ rgid->prot, rgid->steer);
+ break;
+ }
list_del(&rgid->list);
kfree(rgid);
}
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 07a6ebc47c92..b6c60fdef4ff 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1622,25 +1622,7 @@ static struct platform_driver ks8695_driver = {
.resume = ks8695_drv_resume,
};
-/* Module interface */
-
-static int __init
-ks8695_init(void)
-{
- printk(KERN_INFO "%s Ethernet driver, V%s\n",
- MODULENAME, MODULEVERSION);
-
- return platform_driver_register(&ks8695_driver);
-}
-
-static void __exit
-ks8695_cleanup(void)
-{
- platform_driver_unregister(&ks8695_driver);
-}
-
-module_init(ks8695_init);
-module_exit(ks8695_cleanup);
+module_platform_driver(ks8695_driver);
MODULE_AUTHOR("Simtec Electronics");
MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 33bcb63d56a2..727b546a9eb8 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
for (; rxfc != 0; rxfc--) {
rxh = ks8851_rdreg32(ks, KS_RXFHSR);
rxstat = rxh & 0xffff;
- rxlen = rxh >> 16;
+ rxlen = (rxh >> 16) & 0xfff;
netif_dbg(ks, rx_status, ks->netdev,
"rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
@@ -1364,37 +1364,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
/* driver bus management functions */
-#ifdef CONFIG_PM
-static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+
+static int ks8851_suspend(struct device *dev)
{
- struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
- struct net_device *dev = ks->netdev;
+ struct ks8851_net *ks = dev_get_drvdata(dev);
+ struct net_device *netdev = ks->netdev;
- if (netif_running(dev)) {
- netif_device_detach(dev);
- ks8851_net_stop(dev);
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ ks8851_net_stop(netdev);
}
return 0;
}
-static int ks8851_resume(struct spi_device *spi)
+static int ks8851_resume(struct device *dev)
{
- struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
- struct net_device *dev = ks->netdev;
+ struct ks8851_net *ks = dev_get_drvdata(dev);
+ struct net_device *netdev = ks->netdev;
- if (netif_running(dev)) {
- ks8851_net_open(dev);
- netif_device_attach(dev);
+ if (netif_running(netdev)) {
+ ks8851_net_open(netdev);
+ netif_device_attach(netdev);
}
return 0;
}
-#else
-#define ks8851_suspend NULL
-#define ks8851_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ks8851_pm_ops, ks8851_suspend, ks8851_resume);
+
static int ks8851_probe(struct spi_device *spi)
{
struct net_device *ndev;
@@ -1456,7 +1456,7 @@ static int ks8851_probe(struct spi_device *spi)
SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
SET_NETDEV_DEV(ndev, &spi->dev);
- dev_set_drvdata(&spi->dev, ks);
+ spi_set_drvdata(spi, ks);
ndev->if_port = IF_PORT_100BASET;
ndev->netdev_ops = &ks8851_netdev_ops;
@@ -1516,7 +1516,7 @@ err_irq:
static int ks8851_remove(struct spi_device *spi)
{
- struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
+ struct ks8851_net *priv = spi_get_drvdata(spi);
if (netif_msg_drv(priv))
dev_info(&spi->dev, "remove\n");
@@ -1532,25 +1532,12 @@ static struct spi_driver ks8851_driver = {
.driver = {
.name = "ks8851",
.owner = THIS_MODULE,
+ .pm = &ks8851_pm_ops,
},
.probe = ks8851_probe,
.remove = ks8851_remove,
- .suspend = ks8851_suspend,
- .resume = ks8851_resume,
};
-
-static int __init ks8851_init(void)
-{
- return spi_register_driver(&ks8851_driver);
-}
-
-static void __exit ks8851_exit(void)
-{
- spi_unregister_driver(&ks8851_driver);
-}
-
-module_init(ks8851_init);
-module_exit(ks8851_exit);
+module_spi_driver(ks8851_driver);
MODULE_DESCRIPTION("KS8851 Network driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index a343066f7b43..ddaf138ce0d4 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -792,20 +792,35 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
frame_hdr = ks->frame_head_info;
while (ks->frame_cnt--) {
+ if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
+ frame_hdr->len >= RX_BUF_SIZE ||
+ frame_hdr->len <= 0)) {
+
+ /* discard an invalid packet */
+ ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
+ netdev->stats.rx_dropped++;
+ if (!(frame_hdr->sts & RXFSHR_RXFV))
+ netdev->stats.rx_frame_errors++;
+ else
+ netdev->stats.rx_length_errors++;
+ frame_hdr++;
+ continue;
+ }
+
skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
- if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
- (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
+ if (likely(skb)) {
skb_reserve(skb, 2);
/* read data block including CRC 4 bytes */
ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
- skb_put(skb, frame_hdr->len);
+ skb_put(skb, frame_hdr->len - 4);
skb->protocol = eth_type_trans(skb, netdev);
netif_rx(skb);
+ /* exclude CRC size */
+ netdev->stats.rx_bytes += frame_hdr->len - 4;
+ netdev->stats.rx_packets++;
} else {
- pr_err("%s: err:skb alloc\n", __func__);
ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
- if (skb)
- dev_kfree_skb_irq(skb);
+ netdev->stats.rx_dropped++;
}
frame_hdr++;
}
@@ -877,6 +892,8 @@ static irqreturn_t ks_irq(int irq, void *pw)
ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
}
+ if (unlikely(status & IRQ_RXOI))
+ ks->netdev->stats.rx_over_errors++;
/* this should be the last in IRQ handler*/
ks_restore_cmd_reg(ks);
return IRQ_HANDLED;
@@ -1015,6 +1032,9 @@ static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
ks_write_qmu(ks, skb->data, skb->len);
+ /* add tx statistics */
+ netdev->stats.tx_bytes += skb->len;
+ netdev->stats.tx_packets++;
dev_kfree_skb(skb);
} else
retv = NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 5d98a9f7bfc7..c7b40aa21f22 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1566,7 +1566,7 @@ static int enc28j60_probe(struct spi_device *spi)
INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler);
INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler);
INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler);
- dev_set_drvdata(&spi->dev, priv); /* spi to priv reference */
+ spi_set_drvdata(spi, priv); /* spi to priv reference */
SET_NETDEV_DEV(dev, &spi->dev);
if (!enc28j60_chipset_init(dev)) {
@@ -1618,7 +1618,7 @@ error_alloc:
static int enc28j60_remove(struct spi_device *spi)
{
- struct enc28j60_net *priv = dev_get_drvdata(&spi->dev);
+ struct enc28j60_net *priv = spi_get_drvdata(spi);
if (netif_msg_drv(priv))
printk(KERN_DEBUG DRV_NAME ": remove\n");
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 4f9937e026e5..7be9788ed0f6 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1281,7 +1281,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
va = addr;
va += MXGEFW_PAD;
veh = (struct vlan_ethhdr *)va;
- if ((dev->features & NETIF_F_HW_VLAN_RX) == NETIF_F_HW_VLAN_RX &&
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
+ NETIF_F_HW_VLAN_CTAG_RX &&
veh->h_vlan_proto == htons(ETH_P_8021Q)) {
/* fixup csum if needed */
if (skb->ip_summed == CHECKSUM_COMPLETE) {
@@ -1289,7 +1290,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
skb->csum = csum_sub(skb->csum, vsum);
}
/* pop tag */
- __vlan_hwaccel_put_tag(skb, ntohs(veh->h_vlan_TCI));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
skb->len -= VLAN_HLEN;
skb->data_len -= VLAN_HLEN;
@@ -3592,10 +3593,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
&ss->rx_done.bus,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (ss->rx_done.entry == NULL)
goto abort;
- memset(ss->rx_done.entry, 0, bytes);
bytes = sizeof(*ss->fw_stats);
ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
&ss->fw_stats_bus,
@@ -3888,8 +3888,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->mtu = myri10ge_initial_mtu;
netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
- /* fake NETIF_F_HW_VLAN_RX for good GRO performance */
- netdev->hw_features |= NETIF_F_HW_VLAN_RX;
+ /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features;
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index b0b361546365..c20766c2f65b 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -175,13 +175,13 @@ static int sonic_probe1(struct net_device *dev)
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
- if ((lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
- dev_name(lp->device));
+ lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr,
+ GFP_KERNEL);
+ if (lp->descriptors == NULL)
goto out;
- }
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 0ffde69c8d01..346a4e025c34 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -202,13 +202,13 @@ static int macsonic_init(struct net_device *dev)
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
- if ((lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
- dev_name(lp->device));
+ lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr,
+ GFP_KERNEL);
+ if (lp->descriptors == NULL)
return -ENOMEM;
- }
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 77c070de621e..d3b47003a575 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -911,7 +911,7 @@ static void rx_irq(struct net_device *ndev)
unsigned short tag;
tag = ntohs(extsts & EXTSTS_VTG_MASK);
- __vlan_hwaccel_put_tag(skb, tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
}
#endif
rx_rc = netif_rx(skb);
@@ -2193,7 +2193,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
#ifdef NS83820_VLAN_ACCEL_SUPPORT
/* We also support hardware vlan acceleration */
- ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
#endif
if (using_dac) {
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 46795e403467..1bd419dbda6d 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -424,7 +424,6 @@ static void sonic_rx(struct net_device *dev)
/* Malloc up new buffer. */
new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (new_skb == NULL) {
- printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
break;
}
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 5e4748e855f6..c2e0256fe3df 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -197,14 +197,12 @@ static int __init sonic_probe1(struct net_device *dev)
* We also allocate extra space for a pointer to allow freeing
* this structure later on (in xtsonic_cleanup_module()).
*/
- lp->descriptors =
- dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr, GFP_KERNEL);
-
+ lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr,
+ GFP_KERNEL);
if (lp->descriptors == NULL) {
- printk(KERN_ERR "%s: couldn't alloc DMA memory for "
- " descriptors.\n", dev_name(lp->device));
err = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bfd887382e19..51b00941302c 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -80,6 +80,7 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <net/tcp.h>
+#include <net/checksum.h>
#include <asm/div64.h>
#include <asm/irq.h>
@@ -7919,7 +7920,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO;
dev->features |= dev->hw_features |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (sp->device_type & XFRAME_II_DEVICE) {
dev->hw_features |= NETIF_F_UFO;
if (ufo)
@@ -8337,16 +8338,13 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
{
struct iphdr *ip = lro->iph;
struct tcphdr *tcp = lro->tcph;
- __sum16 nchk;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
/* Update L3 header */
+ csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
ip->tot_len = htons(lro->total_len);
- ip->check = 0;
- nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
- ip->check = nchk;
/* Update L4 header */
tcp->ack_seq = lro->tcp_ack;
@@ -8557,7 +8555,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
skb->protocol = eth_type_trans(skb, dev);
if (vlan_tag && sp->vlan_strip_flag)
- __vlan_hwaccel_put_tag(skb, vlan_tag);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
if (sp->config.napi)
netif_receive_skb(skb);
else
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 794444e09492..cbfaed5f2f8d 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -312,7 +312,7 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
if (ext_info->vlan &&
ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
- __vlan_hwaccel_put_tag(skb, ext_info->vlan);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
napi_gro_receive(ring->napi_p, skb);
vxge_debug_entryexit(VXGE_TRACE,
@@ -3300,12 +3300,13 @@ static void vxge_tx_watchdog(struct net_device *dev)
/**
* vxge_vlan_rx_add_vid
* @dev: net device pointer.
+ * @proto: vlan protocol
* @vid: vid
*
* Add the vlan id to the devices vlan id table
*/
static int
-vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct vxgedev *vdev = netdev_priv(dev);
struct vxge_vpath *vpath;
@@ -3323,14 +3324,15 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
}
/**
- * vxge_vlan_rx_add_vid
+ * vxge_vlan_rx_kill_vid
* @dev: net device pointer.
+ * @proto: vlan protocol
* @vid: vid
*
* Remove the vlan id from the device's vlan id table
*/
static int
-vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct vxgedev *vdev = netdev_priv(dev);
struct vxge_vpath *vpath;
@@ -3415,12 +3417,12 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_HW_VLAN_TX;
+ NETIF_F_HW_VLAN_CTAG_TX;
if (vdev->config.rth_steering != NO_STEERING)
ndev->hw_features |= NETIF_F_RXHASH;
ndev->features |= ndev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &vxge_netdev_ops;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 63e7af44366f..cb9e63831500 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -152,8 +152,6 @@ static void netx_eth_receive(struct net_device *ndev)
skb = netdev_alloc_skb(ndev, len);
if (unlikely(skb == NULL)) {
- printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
- ndev->name);
ndev->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 162da8975b05..3df8287b7452 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -287,23 +287,16 @@ static int w90p910_init_desc(struct net_device *dev)
ether = netdev_priv(dev);
pdev = ether->pdev;
- ether->tdesc = (struct tran_pdesc *)
- dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
- &ether->tdesc_phys, GFP_KERNEL);
-
- if (!ether->tdesc) {
- dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
+ ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
+ &ether->tdesc_phys, GFP_KERNEL);
+ if (!ether->tdesc)
return -ENOMEM;
- }
-
- ether->rdesc = (struct recv_pdesc *)
- dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
- &ether->rdesc_phys, GFP_KERNEL);
+ ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
+ &ether->rdesc_phys, GFP_KERNEL);
if (!ether->rdesc) {
- dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
- ether->tdesc, ether->tdesc_phys);
+ ether->tdesc, ether->tdesc_phys);
return -ENOMEM;
}
@@ -737,7 +730,6 @@ static void netdev_rx(struct net_device *dev)
data = ether->rdesc->recv_buf[ether->cur_rx];
skb = netdev_alloc_skb(dev, length + 2);
if (!skb) {
- dev_err(&pdev->dev, "get skb buffer error\n");
ether->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 0b8de12bcbca..b003fe53c8e2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2200,6 +2200,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ring_desc *start_tx;
struct ring_desc *prev_tx;
struct nv_skb_map *prev_tx_ctx;
+ struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
unsigned long flags;
/* add fragments to entries count */
@@ -2261,12 +2262,31 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
do {
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
+ if (!start_tx_ctx)
+ start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
+
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
np->put_tx_ctx->dma = skb_frag_dma_map(
&np->pci_dev->dev,
frag, offset,
bcnt,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
+
+ /* Unwind the mapped fragments */
+ do {
+ nv_unmap_txskb(np, start_tx_ctx);
+ if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
+ tmp_tx_ctx = np->first_tx_ctx;
+ } while (tmp_tx_ctx != np->put_tx_ctx);
+ kfree_skb(skb);
+ np->put_tx_ctx = start_tx_ctx;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ return NETDEV_TX_OK;
+ }
+
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
@@ -2327,7 +2347,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
struct ring_desc_ex *start_tx;
struct ring_desc_ex *prev_tx;
struct nv_skb_map *prev_tx_ctx;
- struct nv_skb_map *start_tx_ctx;
+ struct nv_skb_map *start_tx_ctx = NULL;
+ struct nv_skb_map *tmp_tx_ctx = NULL;
unsigned long flags;
/* add fragments to entries count */
@@ -2392,11 +2413,29 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
prev_tx = put_tx;
prev_tx_ctx = np->put_tx_ctx;
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
+ if (!start_tx_ctx)
+ start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
np->put_tx_ctx->dma = skb_frag_dma_map(
&np->pci_dev->dev,
frag, offset,
bcnt,
DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
+
+ /* Unwind the mapped fragments */
+ do {
+ nv_unmap_txskb(np, start_tx_ctx);
+ if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
+ tmp_tx_ctx = np->first_tx_ctx;
+ } while (tmp_tx_ctx != np->put_tx_ctx);
+ kfree_skb(skb);
+ np->put_tx_ctx = start_tx_ctx;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ return NETDEV_TX_OK;
+ }
np->put_tx_ctx->dma_len = bcnt;
np->put_tx_ctx->dma_single = 0;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
@@ -2922,15 +2961,15 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
/*
- * There's need to check for NETIF_F_HW_VLAN_RX here.
- * Even if vlan rx accel is disabled,
+ * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
+ * here. Even if vlan rx accel is disabled,
* NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
*/
- if (dev->features & NETIF_F_HW_VLAN_RX &&
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
napi_gro_receive(&np->napi, skb);
u64_stats_update_begin(&np->swstats_rx_syncp);
@@ -4777,7 +4816,7 @@ static netdev_features_t nv_fix_features(struct net_device *dev,
netdev_features_t features)
{
/* vlan is dependent on rx checksum offload */
- if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
features |= NETIF_F_RXCSUM;
return features;
@@ -4789,12 +4828,12 @@ static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
spin_lock_irq(&np->lock);
- if (features & NETIF_F_HW_VLAN_RX)
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
else
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
- if (features & NETIF_F_HW_VLAN_TX)
+ if (features & NETIF_F_HW_VLAN_CTAG_TX)
np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
else
np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
@@ -4831,7 +4870,7 @@ static int nv_set_features(struct net_device *dev, netdev_features_t features)
spin_unlock_irq(&np->lock);
}
- if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
+ if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
nv_vlan_mode(dev, features);
return 0;
@@ -5025,7 +5064,6 @@ static int nv_loopback_test(struct net_device *dev)
pkt_len = ETH_DATA_LEN;
tx_skb = netdev_alloc_skb(dev, pkt_len);
if (!tx_skb) {
- netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n");
ret = 0;
goto out;
}
@@ -5667,7 +5705,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
np->vlanctl_bits = 0;
if (id->driver_data & DEV_HAS_VLAN) {
np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
- dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
}
dev->features |= dev->hw_features;
@@ -5958,7 +5997,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
"csum " : "",
- dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
+ dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX) ?
"vlan " : "",
dev->features & (NETIF_F_LOOPBACK) ?
"loopback " : "",
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index efa29b712d5f..55a5548d6add 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1409,9 +1409,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
dma_alloc_coherent(&pldat->pdev->dev,
pldat->dma_buff_size, &dma_handle,
GFP_KERNEL);
-
if (pldat->dma_buff_base_v == NULL) {
- dev_err(&pdev->dev, "error getting DMA region.\n");
ret = -ENOMEM;
goto err_out_free_irq;
}
@@ -1434,13 +1432,11 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
/* Get MAC address from current HW setting (POR state is all zeros) */
__lpc_get_mac(pldat, ndev->dev_addr);
-#ifdef CONFIG_OF_NET
if (!is_valid_ether_addr(ndev->dev_addr)) {
const char *macaddr = of_get_mac_address(pdev->dev.of_node);
if (macaddr)
memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
}
-#endif
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 73ce7dd6b954..0c1c65a9ce5e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1469,13 +1469,11 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
- &rx_ring->rx_buff_pool_logic,
- GFP_KERNEL);
- if (!rx_ring->rx_buff_pool) {
- pr_err("Unable to allocate memory for the receive pool buffer\n");
+ &rx_ring->rx_buff_pool_logic,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!rx_ring->rx_buff_pool)
return -ENOMEM;
- }
- memset(rx_ring->rx_buff_pool, 0, size);
+
rx_ring->rx_buff_pool_size = size;
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
@@ -1774,13 +1772,12 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!tx_ring->desc) {
vfree(tx_ring->buffer_info);
- pr_err("Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
- memset(tx_ring->desc, 0, tx_ring->size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -1820,14 +1817,12 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
-
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!rx_ring->desc) {
- pr_err("Unable to allocate memory for the receive descriptor ring\n");
vfree(rx_ring->buffer_info);
return -ENOMEM;
}
- memset(rx_ring->desc, 0, rx_ring->size);
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
for (desNo = 0; desNo < rx_ring->count; desNo++) {
@@ -2268,7 +2263,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
if (err) {
adapter->rx_buffer_len = old_rx_buffer_len;
pch_gbe_up(adapter);
- return -ENOMEM;
+ return err;
} else {
netdev->mtu = new_mtu;
adapter->hw.mac.max_frame_size = max_frame;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index b1cfbb75ff1e..a5f0b5da6149 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -441,12 +441,11 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
RX_RING_SIZE * sizeof(u64),
- &ring->buf_dma, GFP_KERNEL);
+ &ring->buf_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!ring->buffers)
goto out_ring_desc;
- memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
-
write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index a8669adecc97..0e1797295a48 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -35,6 +35,16 @@ config QLCNIC
This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
devices.
+config QLCNIC_SRIOV
+ bool "QLOGIC QLCNIC 83XX family SR-IOV Support"
+ depends on QLCNIC && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QLE83XX Converged Ethernet devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index eb3dfdbb642b..322a36b76727 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -955,9 +955,10 @@ typedef struct nx_mac_list_s {
uint8_t mac_addr[ETH_ALEN+2];
} nx_mac_list_t;
-struct nx_vlan_ip_list {
+struct nx_ip_list {
struct list_head list;
__be32 ip_addr;
+ bool master;
};
/*
@@ -1605,7 +1606,7 @@ struct netxen_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct list_head mac_list;
- struct list_head vlan_ip_list;
+ struct list_head ip_list;
spinlock_t tx_clean_lock;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 4782dcfde736..7692dfd4f262 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
+#include <net/checksum.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
@@ -1641,9 +1642,8 @@ netxen_process_lro(struct netxen_adapter *adapter,
th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
th->psh = push;
th->seq = htonl(seq_number);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 501f49207da5..af951f343ff6 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -90,7 +90,7 @@ static irqreturn_t netxen_intr(int irq, void *data);
static irqreturn_t netxen_msi_intr(int irq, void *data);
static irqreturn_t netxen_msix_intr(int irq, void *data);
-static void netxen_free_vlan_ip_list(struct netxen_adapter *);
+static void netxen_free_ip_list(struct netxen_adapter *, bool);
static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats);
@@ -1345,7 +1345,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
}
if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
- netdev->hw_features |= NETIF_F_HW_VLAN_TX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
netdev->hw_features |= NETIF_F_LRO;
@@ -1450,7 +1450,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
- INIT_LIST_HEAD(&adapter->vlan_ip_list);
+ INIT_LIST_HEAD(&adapter->ip_list);
err = netxen_setup_pci_map(adapter);
if (err)
@@ -1585,7 +1585,7 @@ static void netxen_nic_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->tx_timeout_task);
- netxen_free_vlan_ip_list(adapter);
+ netxen_free_ip_list(adapter, false);
netxen_nic_detach(adapter);
nx_decr_dev_ref_cnt(adapter);
@@ -3137,62 +3137,77 @@ netxen_destip_supported(struct netxen_adapter *adapter)
}
static void
-netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{
- struct nx_vlan_ip_list *cur;
- struct list_head *head = &adapter->vlan_ip_list;
+ struct nx_ip_list *cur, *tmp_cur;
- while (!list_empty(head)) {
- cur = list_entry(head->next, struct nx_vlan_ip_list, list);
- netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
- list_del(&cur->list);
- kfree(cur);
+ list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
+ if (master) {
+ if (cur->master) {
+ netxen_config_ipaddr(adapter, cur->ip_addr,
+ NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+ } else {
+ netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
}
-
}
-static void
-netxen_list_config_vlan_ip(struct netxen_adapter *adapter,
+
+static bool
+netxen_list_config_ip(struct netxen_adapter *adapter,
struct in_ifaddr *ifa, unsigned long event)
{
struct net_device *dev;
- struct nx_vlan_ip_list *cur, *tmp_cur;
+ struct nx_ip_list *cur, *tmp_cur;
struct list_head *head;
+ bool ret = false;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
if (dev == NULL)
- return;
-
- if (!is_vlan_dev(dev))
- return;
+ goto out;
switch (event) {
case NX_IP_UP:
- list_for_each(head, &adapter->vlan_ip_list) {
- cur = list_entry(head, struct nx_vlan_ip_list, list);
+ list_for_each(head, &adapter->ip_list) {
+ cur = list_entry(head, struct nx_ip_list, list);
if (cur->ip_addr == ifa->ifa_address)
- return;
+ goto out;
}
- cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC);
+ cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
if (cur == NULL)
- return;
-
+ goto out;
+ if (dev->priv_flags & IFF_802_1Q_VLAN)
+ dev = vlan_dev_real_dev(dev);
+ cur->master = !!netif_is_bond_master(dev);
cur->ip_addr = ifa->ifa_address;
- list_add_tail(&cur->list, &adapter->vlan_ip_list);
+ list_add_tail(&cur->list, &adapter->ip_list);
+ netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
+ ret = true;
break;
case NX_IP_DOWN:
list_for_each_entry_safe(cur, tmp_cur,
- &adapter->vlan_ip_list, list) {
+ &adapter->ip_list, list) {
if (cur->ip_addr == ifa->ifa_address) {
list_del(&cur->list);
kfree(cur);
+ netxen_config_ipaddr(adapter, ifa->ifa_address,
+ NX_IP_DOWN);
+ ret = true;
break;
}
}
}
+out:
+ return ret;
}
+
static void
netxen_config_indev_addr(struct netxen_adapter *adapter,
struct net_device *dev, unsigned long event)
@@ -3209,14 +3224,10 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
for_ifa(indev) {
switch (event) {
case NETDEV_UP:
- netxen_config_ipaddr(adapter,
- ifa->ifa_address, NX_IP_UP);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
+ netxen_list_config_ip(adapter, ifa, NX_IP_UP);
break;
case NETDEV_DOWN:
- netxen_config_ipaddr(adapter,
- ifa->ifa_address, NX_IP_DOWN);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
+ netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
break;
default:
break;
@@ -3231,23 +3242,78 @@ netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct nx_vlan_ip_list *pos, *tmp_pos;
+ struct nx_ip_list *pos, *tmp_pos;
unsigned long ip_event;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
netxen_config_indev_addr(adapter, netdev, event);
- list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) {
+ list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
}
}
+static inline bool
+netxen_config_checkdev(struct net_device *dev)
+{
+ struct netxen_adapter *adapter;
+
+ if (!is_netxen_netdev(dev))
+ return false;
+ adapter = netdev_priv(dev);
+ if (!adapter)
+ return false;
+ if (!netxen_destip_supported(adapter))
+ return false;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return false;
+
+ return true;
+}
+
+/**
+ * netxen_config_master - configure addresses based on master
+ * @dev: netxen device
+ * @event: netdev event
+ */
+static void netxen_config_master(struct net_device *dev, unsigned long event)
+{
+ struct net_device *master, *slave;
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ rcu_read_lock();
+ master = netdev_master_upper_dev_get_rcu(dev);
+ /*
+ * This is the case where the netxen nic is being
+ * enslaved and is dev_open()ed in bond_enslave()
+ * Now we should program the bond's (and its vlans')
+ * addresses in the netxen NIC.
+ */
+ if (master && netif_is_bond_master(master) &&
+ !netif_is_bond_slave(dev)) {
+ netxen_config_indev_addr(adapter, master, event);
+ for_each_netdev_rcu(&init_net, slave)
+ if (slave->priv_flags & IFF_802_1Q_VLAN &&
+ vlan_dev_real_dev(slave) == master)
+ netxen_config_indev_addr(adapter, slave, event);
+ }
+ rcu_read_unlock();
+ /*
+ * This is the case where the netxen nic is being
+ * released and is dev_close()ed in bond_release()
+ * just before IFF_BONDING is stripped.
+ */
+ if (!master && dev->priv_flags & IFF_BONDING)
+ netxen_free_ip_list(adapter, true);
+}
+
static int netxen_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
struct net_device *dev = (struct net_device *)ptr;
struct net_device *orig_dev = dev;
+ struct net_device *slave;
recheck:
if (dev == NULL)
@@ -3257,19 +3323,28 @@ recheck:
dev = vlan_dev_real_dev(dev);
goto recheck;
}
-
- if (!is_netxen_netdev(dev))
- goto done;
-
- adapter = netdev_priv(dev);
-
- if (!adapter)
- goto done;
-
- if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
- goto done;
-
- netxen_config_indev_addr(adapter, orig_dev, event);
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_config_indev_addr(adapter,
+ orig_dev, event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ /* Act only if the actual netxen is the target */
+ if (orig_dev == dev)
+ netxen_config_master(dev, event);
+ netxen_config_indev_addr(adapter, orig_dev, event);
+ }
+ }
done:
return NOTIFY_DONE;
}
@@ -3279,12 +3354,12 @@ netxen_inetaddr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
- struct net_device *dev;
-
+ struct net_device *dev, *slave;
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ unsigned long ip_event;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
-
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
recheck:
if (dev == NULL)
goto done;
@@ -3293,31 +3368,24 @@ recheck:
dev = vlan_dev_real_dev(dev);
goto recheck;
}
-
- if (!is_netxen_netdev(dev))
- goto done;
-
- adapter = netdev_priv(dev);
-
- if (!adapter || !netxen_destip_supported(adapter))
- goto done;
-
- if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
- goto done;
-
- switch (event) {
- case NETDEV_UP:
- netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
- break;
- case NETDEV_DOWN:
- netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
- break;
- default:
- break;
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
}
-
done:
return NOTIFY_DONE;
}
@@ -3334,7 +3402,7 @@ static void
netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
static void
-netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{ }
#endif
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 8fd38cb6d26a..91a8fcd6c246 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -312,7 +312,6 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
qdev->lrg_buffer_len);
if (unlikely(!lrg_buf_cb->skb)) {
- netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
qdev->lrg_buf_skb_check++;
} else {
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 7722a203e388..4b1fb3faa3b7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -8,4 +8,6 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
- qlcnic_minidump.o
+ qlcnic_minidump.o qlcnic_sriov_common.o
+
+qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index ba3c72fce1f2..90c253b145ef 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,9 +37,9 @@
#include "qlcnic_83xx_hw.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 1
-#define _QLCNIC_LINUX_SUBVERSION 35
-#define QLCNIC_LINUX_VERSIONID "5.1.35"
+#define _QLCNIC_LINUX_MINOR 2
+#define _QLCNIC_LINUX_SUBVERSION 42
+#define QLCNIC_LINUX_VERSIONID "5.2.42"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -347,8 +347,14 @@ struct qlcnic_rx_buffer {
* Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
* adjusted based on configured MTU.
*/
-#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
-#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define QLCNIC_INTR_COAL_TYPE_RX 1
+#define QLCNIC_INTR_COAL_TYPE_TX 2
+
+#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS 256
+
+#define QLCNIC_DEF_INTR_COALESCE_TX_TIME_US 64
+#define QLCNIC_DEF_INTR_COALESCE_TX_PACKETS 64
#define QLCNIC_INTR_DEFAULT 0x04
#define QLCNIC_CONFIG_INTR_COALESCE 3
@@ -359,6 +365,8 @@ struct qlcnic_nic_intr_coalesce {
u8 sts_ring_mask;
u16 rx_packets;
u16 rx_time_us;
+ u16 tx_packets;
+ u16 tx_time_us;
u16 flag;
u32 timer_out;
};
@@ -449,6 +457,7 @@ struct qlcnic_hardware_context {
struct qlc_83xx_idc idc;
struct qlc_83xx_fw_info fw_info;
struct qlcnic_intrpt_config *intr_tbl;
+ struct qlcnic_sriov *sriov;
u32 *reg_tbl;
u32 *ext_reg_tbl;
u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
@@ -510,13 +519,13 @@ struct qlcnic_host_sds_ring {
int irq;
dma_addr_t phys_addr;
- char name[IFNAMSIZ+4];
+ char name[IFNAMSIZ + 12];
} ____cacheline_internodealigned_in_smp;
struct qlcnic_host_tx_ring {
int irq;
void __iomem *crb_intr_mask;
- char name[IFNAMSIZ+4];
+ char name[IFNAMSIZ + 12];
u16 ctx_id;
u32 producer;
u32 sw_consumer;
@@ -896,6 +905,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_FW_RESET_OWNER 0x2000
#define QLCNIC_FW_HANG 0x4000
#define QLCNIC_FW_LRO_MSS_CAP 0x8000
+#define QLCNIC_TX_INTR_SHARED 0x10000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -914,7 +924,10 @@ struct qlcnic_ipaddr {
#define __QLCNIC_AER 5
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
-#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_SRIOV_ENABLE 10
+#define __QLCNIC_SRIOV_CAPABLE 11
+#define __QLCNIC_MBX_POLL_ENABLE 12
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -935,7 +948,7 @@ struct qlcnic_ipaddr {
struct qlcnic_filter {
struct hlist_node fnode;
u8 faddr[ETH_ALEN];
- __le16 vlan_id;
+ u16 vlan_id;
unsigned long ftime;
};
@@ -972,9 +985,11 @@ struct qlcnic_adapter {
u8 fw_fail_cnt;
u8 tx_timeo_cnt;
u8 need_fw_reset;
+ u8 reset_ctx_cnt;
u16 is_up;
- u16 pvid;
+ u16 rx_pvid;
+ u16 tx_pvid;
u32 irq;
u32 heartbeat;
@@ -1006,9 +1021,11 @@ struct qlcnic_adapter {
struct workqueue_struct *qlcnic_wq;
struct delayed_work fw_work;
struct delayed_work idc_aen_work;
+ struct delayed_work mbx_poll_work;
struct qlcnic_filter_hash fhash;
struct qlcnic_filter_hash rx_fhash;
+ struct list_head vf_mc_list;
spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
@@ -1051,7 +1068,11 @@ struct qlcnic_info_le {
u8 total_pf;
u8 total_rss_engines;
__le16 max_vports;
- u8 reserved2[64];
+ __le16 linkstate_reg_offset;
+ __le16 bit_offsets;
+ __le16 max_local_ipv6_addrs;
+ __le16 max_remote_ipv6_addrs;
+ u8 reserved2[56];
} __packed;
struct qlcnic_info {
@@ -1083,6 +1104,10 @@ struct qlcnic_info {
u8 total_pf;
u8 total_rss_engines;
u16 max_vports;
+ u16 linkstate_reg_offset;
+ u16 bit_offsets;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
};
struct qlcnic_pci_info_le {
@@ -1348,6 +1373,7 @@ struct _cdrp_cmd {
struct qlcnic_cmd_args {
struct _cdrp_cmd req;
struct _cdrp_cmd rsp;
+ int op_type;
};
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1430,9 +1456,10 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
-int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *);
+void __qlcnic_set_multi(struct net_device *, u16);
+int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
-void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *);
@@ -1455,7 +1482,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
-int qlcnic_validate_max_rss(u8, u8);
+int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1509,8 +1536,13 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *);
int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_reset_npar_config(struct qlcnic_adapter *);
int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
-void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int,
- __le16);
+void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
+int qlcnic_read_mac_addr(struct qlcnic_adapter *);
+int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+void qlcnic_sriov_vf_schedule_multi(struct net_device *);
+void qlcnic_vf_add_mc_list(struct net_device *, u16);
+
/*
* QLOGIC Board information
*/
@@ -1567,11 +1599,14 @@ struct qlcnic_hardware_ops {
int (*create_rx_ctx) (struct qlcnic_adapter *);
int (*create_tx_ctx) (struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *, int);
+ void (*del_rx_ctx) (struct qlcnic_adapter *);
+ void (*del_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int (*setup_link_event) (struct qlcnic_adapter *, int);
int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *);
- int (*change_macvlan) (struct qlcnic_adapter *, u8*, __le16, u8);
+ int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
void (*napi_enable) (struct qlcnic_adapter *);
void (*napi_disable) (struct qlcnic_adapter *);
void (*config_intr_coal) (struct qlcnic_adapter *);
@@ -1580,8 +1615,9 @@ struct qlcnic_hardware_ops {
int (*config_loopback) (struct qlcnic_adapter *, u8);
int (*clear_loopback) (struct qlcnic_adapter *, u8);
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
- void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, __le16);
+ void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
int (*get_board_info) (struct qlcnic_adapter *);
+ void (*free_mac_list) (struct qlcnic_adapter *);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1635,7 +1671,10 @@ static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+ if (adapter->ahw->hw_ops->mbx_cmd)
+ return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+
+ return -EIO;
}
static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
@@ -1655,12 +1694,14 @@ static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
{
- adapter->ahw->hw_ops->add_sysfs(adapter);
+ if (adapter->ahw->hw_ops->add_sysfs)
+ adapter->ahw->hw_ops->add_sysfs(adapter);
}
static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
{
- adapter->ahw->hw_ops->remove_sysfs(adapter);
+ if (adapter->ahw->hw_ops->remove_sysfs)
+ adapter->ahw->hw_ops->remove_sysfs(adapter);
}
static inline void
@@ -1681,6 +1722,17 @@ static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
}
+static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->del_rx_ctx(adapter);
+}
+
+static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr)
+{
+ return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr);
+}
+
static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
int enable)
{
@@ -1706,7 +1758,7 @@ static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter,
}
static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter,
- u8 *addr, __le16 id, u8 cmd)
+ u8 *addr, u16 id, u8 cmd)
{
return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd);
}
@@ -1765,7 +1817,7 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
}
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 *addr, __le16 id)
+ u64 *addr, u16 id)
{
adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
}
@@ -1775,15 +1827,22 @@ static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
return adapter->ahw->hw_ops->get_board_info(adapter);
}
+static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->free_mac_list(adapter);
+}
+
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
u32 key)
{
- adapter->nic_ops->request_reset(adapter, key);
+ if (adapter->nic_ops->request_reset)
+ adapter->nic_ops->request_reset(adapter, key);
}
static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
{
- adapter->nic_ops->cancel_idc_work(adapter);
+ if (adapter->nic_ops->cancel_idc_work)
+ adapter->nic_ops->cancel_idc_work(adapter);
}
static inline irqreturn_t
@@ -1819,6 +1878,7 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
writel(0xfbff, adapter->tgt_mask_reg);
}
+extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops;
extern const struct ethtool_ops qlcnic_ethtool_ops;
extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
@@ -1830,7 +1890,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
} while (0)
#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
@@ -1840,8 +1902,23 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
- return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+
+ return status;
}
+static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
+{
+ return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false;
+}
+static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index cd5ae8813cb3..ea790a93ee7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -6,6 +6,7 @@
*/
#include "qlcnic.h"
+#include "qlcnic_sriov.h"
#include <linux/if_vlan.h>
#include <linux/ipv6.h>
#include <linux/ethtool.h>
@@ -13,100 +14,7 @@
#define QLCNIC_MAX_TX_QUEUES 1
#define RSS_HASHTYPE_IP_TCP 0x3
-
-/* status descriptor mailbox data
- * @phy_addr: physical address of buffer
- * @sds_ring_size: buffer size
- * @intrpt_id: interrupt id
- * @intrpt_val: source of interrupt
- */
-struct qlcnic_sds_mbx {
- u64 phy_addr;
- u8 rsvd1[16];
- u16 sds_ring_size;
- u16 rsvd2[3];
- u16 intrpt_id;
- u8 intrpt_val;
- u8 rsvd3[5];
-} __packed;
-
-/* receive descriptor buffer data
- * phy_addr_reg: physical address of regular buffer
- * phy_addr_jmb: physical address of jumbo buffer
- * reg_ring_sz: size of regular buffer
- * reg_ring_len: no. of entries in regular buffer
- * jmb_ring_len: no. of entries in jumbo buffer
- * jmb_ring_sz: size of jumbo buffer
- */
-struct qlcnic_rds_mbx {
- u64 phy_addr_reg;
- u64 phy_addr_jmb;
- u16 reg_ring_sz;
- u16 reg_ring_len;
- u16 jmb_ring_sz;
- u16 jmb_ring_len;
-} __packed;
-
-/* host producers for regular and jumbo rings */
-struct __host_producer_mbx {
- u32 reg_buf;
- u32 jmb_buf;
-} __packed;
-
-/* Receive context mailbox data outbox registers
- * @state: state of the context
- * @vport_id: virtual port id
- * @context_id: receive context id
- * @num_pci_func: number of pci functions of the port
- * @phy_port: physical port id
- */
-struct qlcnic_rcv_mbx_out {
- u8 rcv_num;
- u8 sts_num;
- u16 ctx_id;
- u8 state;
- u8 num_pci_func;
- u8 phy_port;
- u8 vport_id;
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
-} __packed;
-
-struct qlcnic_add_rings_mbx_out {
- u8 rcv_num;
- u8 sts_num;
- u16 ctx_id;
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
-} __packed;
-
-/* Transmit context mailbox inbox registers
- * @phys_addr: DMA address of the transmit buffer
- * @cnsmr_index: host consumer index
- * @size: legth of transmit buffer ring
- * @intr_id: interrput id
- * @src: src of interrupt
- */
-struct qlcnic_tx_mbx {
- u64 phys_addr;
- u64 cnsmr_index;
- u16 size;
- u16 intr_id;
- u8 src;
- u8 rsvd[3];
-} __packed;
-
-/* Transmit context mailbox outbox registers
- * @host_prod: host producer index
- * @ctx_id: transmit context id
- * @state: state of the transmit context
- */
-struct qlcnic_tx_mbx_out {
- u32 host_prod;
- u16 ctx_id;
- u8 state;
- u8 rsvd;
-} __packed;
+#define QLC_83XX_FW_MBX_CMD 0
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -156,9 +64,11 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
{QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
{QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
+ {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
+ {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
};
-static const u32 qlcnic_83xx_ext_reg_tbl[] = {
+const u32 qlcnic_83xx_ext_reg_tbl[] = {
0x38CC, /* Global Reset */
0x38F0, /* Wildcard */
0x38FC, /* Informant */
@@ -204,7 +114,7 @@ static const u32 qlcnic_83xx_ext_reg_tbl[] = {
0x34A4, /* QLC_83XX_ASIC_TEMP */
};
-static const u32 qlcnic_83xx_reg_tbl[] = {
+const u32 qlcnic_83xx_reg_tbl[] = {
0x34A8, /* PEG_HALT_STAT1 */
0x34AC, /* PEG_HALT_STAT2 */
0x34B0, /* FW_HEARTBEAT */
@@ -247,6 +157,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
.create_rx_ctx = qlcnic_83xx_create_rx_ctx,
.create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
.setup_link_event = qlcnic_83xx_setup_link_event,
.get_nic_info = qlcnic_83xx_get_nic_info,
.get_pci_info = qlcnic_83xx_get_pci_info,
@@ -260,6 +172,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.config_promisc_mode = qlcnic_83xx_nic_set_promisc,
.change_l2_filter = qlcnic_83xx_change_l2_filter,
.get_board_info = qlcnic_83xx_get_port_info,
+ .free_mac_list = qlcnic_82xx_free_mac_list,
};
static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@ -355,14 +268,20 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
num_intr));
/* account for AEN interrupt MSI-X based interrupts */
num_msix += 1;
- num_msix += adapter->max_drv_tx_rings;
+
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ num_msix += adapter->max_drv_tx_rings;
+
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
return err;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
num_msix = adapter->ahw->num_msix;
- else
+ else {
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
num_msix = 1;
+ }
/* setup interrupt mapping table for fw */
ahw->intr_tbl = vzalloc(num_msix *
sizeof(struct qlcnic_intrpt_config));
@@ -421,12 +340,13 @@ inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
writel(0, adapter->ahw->pci_base0 + mask);
}
-inline void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
{
u32 mask;
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
writel(1, adapter->ahw->pci_base0 + mask);
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
}
static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
@@ -482,7 +402,8 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
if (event & QLCNIC_MBX_ASYNC_EVENT)
- qlcnic_83xx_process_aen(adapter);
+ __qlcnic_83xx_process_aen(adapter);
+
out:
qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
@@ -535,17 +456,15 @@ done:
void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
{
- u32 val = 0, num_msix = adapter->ahw->num_msix - 1;
+ u32 num_msix;
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
if (adapter->flags & QLCNIC_MSIX_ENABLED)
num_msix = adapter->ahw->num_msix - 1;
else
num_msix = 0;
- QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
-
- qlcnic_83xx_disable_mbx_intr(adapter);
-
msleep(20);
synchronize_irq(adapter->msix_entries[num_msix].vector);
free_irq(adapter->msix_entries[num_msix].vector, adapter);
@@ -595,7 +514,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
{
u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
- adapter->ahw->pci_func = val & 0xf;
+ adapter->ahw->pci_func = (val >> 24) & 0xff;
}
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
@@ -707,6 +626,11 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
ahw->fw_hal_version = 2;
qlcnic_get_func_no(adapter);
+ if (qlcnic_sriov_vf_check(adapter)) {
+ qlcnic_sriov_vf_set_ops(adapter);
+ return;
+ }
+
/* Determine function privilege level */
op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
if (op_mode == QLC_83XX_DEFAULT_OPMODE)
@@ -722,6 +646,9 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
ahw->fw_hal_version);
adapter->nic_ops = &qlcnic_vf_ops;
} else {
+ if (pci_find_ext_capability(adapter->pdev,
+ PCI_EXT_CAP_ID_SRIOV))
+ set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state);
adapter->nic_ops = &qlcnic_83xx_ops;
}
}
@@ -755,7 +682,7 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
}
/* Mailbox response for mac rcode */
-static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
{
u32 fw_data;
u8 mac_cmd_rcode;
@@ -769,7 +696,7 @@ static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
return 1;
}
-static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
{
u32 data;
unsigned long wait_time = 0;
@@ -832,7 +759,7 @@ poll:
/* Get the FW response data */
fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
- qlcnic_83xx_process_aen(adapter);
+ __qlcnic_83xx_process_aen(adapter);
mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
if (mbx_val)
goto poll;
@@ -884,6 +811,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
for (i = 0; i < size; i++) {
if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_83XX_FW_MBX_CMD;
mbx->req.num = mbx_tbl[i].in_args;
mbx->rsp.num = mbx_tbl[i].out_args;
mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
@@ -901,10 +829,10 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
- break;
+ return 0;
}
}
- return 0;
+ return -EINVAL;
}
void qlcnic_83xx_idc_aen_work(struct work_struct *work)
@@ -935,7 +863,7 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
return;
}
-void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
u32 event[QLC_83XX_MBX_AEN_CNT];
int i;
@@ -960,6 +888,9 @@ void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
break;
case QLCNIC_MBX_TIME_EXTEND_EVENT:
break;
+ case QLCNIC_MBX_BC_EVENT:
+ qlcnic_sriov_handle_bc_event(adapter, event[1]);
+ break;
case QLCNIC_MBX_SFP_INSERT_EVENT:
dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
QLCNIC_MBX_RSP(event[0]));
@@ -977,6 +908,53 @@ void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
}
+static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 resp, event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ahw->mbx_lock, flags);
+
+ resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
+ if (resp & QLCNIC_SET_OWNER) {
+ event = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ __qlcnic_83xx_process_aen(adapter);
+ }
+
+ spin_unlock_irqrestore(&ahw->mbx_lock, flags);
+}
+
+static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+
+ adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work);
+
+ if (!test_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_83xx_process_aen(adapter);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work,
+ (HZ / 10));
+}
+
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ if (test_and_set_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+
+ INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+}
+
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ if (!test_and_clear_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+ cancel_delayed_work_sync(&adapter->mbx_poll_work);
+}
+
static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
{
int index, i, err, sds_mbx_size;
@@ -1004,7 +982,8 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
- sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
sds_mbx.sds_ring_size = sds->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -1050,6 +1029,32 @@ out:
return err;
}
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = recv_ctx->context_id | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
+}
+
int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
{
int i, err, index, sds_mbx_size, rds_mbx_size;
@@ -1080,9 +1085,17 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
/* set mailbox hdr and capabilities */
qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_CREATE_RX_CTX);
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
cmd.req.arg[1] = cap;
cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
(QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_rx_ctx(adapter,
+ &cmd.req.arg[6]);
/* set up status rings, mbx 8-57/87 */
index = QLC_83XX_HOST_SDS_MBX_IDX;
for (i = 0; i < num_sds; i++) {
@@ -1090,7 +1103,8 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
- sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
sds_mbx.sds_ring_size = sds->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
intrpt_id = ahw->intr_tbl[i].id;
@@ -1110,13 +1124,15 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
rds = &recv_ctx->rds_rings[0];
rds->producer = 0;
memset(&rds_mbx, 0, rds_mbx_size);
- rds_mbx.phy_addr_reg = rds->phys_addr;
+ rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
rds_mbx.reg_ring_sz = rds->dma_size;
rds_mbx.reg_ring_len = rds->num_desc;
/* Jumbo ring */
rds = &recv_ctx->rds_rings[1];
rds->producer = 0;
- rds_mbx.phy_addr_jmb = rds->phys_addr;
+ rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
rds_mbx.jmb_ring_sz = rds->dma_size;
rds_mbx.jmb_ring_len = rds->num_desc;
buf = &cmd.req.arg[index];
@@ -1163,16 +1179,39 @@ out:
return err;
}
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 temp = 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = tx_ring->ctx_id | temp;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx, int ring)
{
int err;
u16 msix_id;
- u32 *buf, intr_mask;
+ u32 *buf, intr_mask, temp = 0;
struct qlcnic_cmd_args cmd;
struct qlcnic_tx_mbx mbx;
struct qlcnic_tx_mbx_out *mbx_out;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 msix_vector;
/* Reset host resources */
tx->producer = 0;
@@ -1182,13 +1221,21 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
/* setup mailbox inbox registerss */
- mbx.phys_addr = tx->phys_addr;
- mbx.cnsmr_index = tx->hw_cons_phys_addr;
+ mbx.phys_addr_low = LSD(tx->phys_addr);
+ mbx.phys_addr_high = MSD(tx->phys_addr);
+ mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
+ mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
mbx.size = tx->num_desc;
- if (adapter->flags & QLCNIC_MSIX_ENABLED)
- msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id;
- else
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ msix_vector = adapter->max_sds_rings + ring;
+ else
+ msix_vector = adapter->max_sds_rings - 1;
+ msix_id = ahw->intr_tbl[msix_vector].id;
+ } else {
msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ }
+
if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
mbx.intr_id = msix_id;
else
@@ -1196,8 +1243,15 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
mbx.src = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
+
cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
- cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES;
+ cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
buf = &cmd.req.arg[6];
memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
/* send the mailbox command*/
@@ -1210,7 +1264,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
tx->ctx_id = mbx_out->ctx_id;
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
@@ -1267,7 +1322,8 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
/* disable and free mailbox interrupt */
- qlcnic_83xx_free_mbx_intr(adapter);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_free_mbx_intr(adapter);
adapter->ahw->loopback_state = 0;
adapter->ahw->hw_ops->setup_link_event(adapter, 1);
}
@@ -1295,12 +1351,14 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
qlcnic_detach(adapter);
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "%s: failed to setup mbx interrupt\n",
- __func__);
- goto out;
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to setup mbx interrupt\n",
+ __func__);
+ goto out;
+ }
}
}
adapter->ahw->diag_test = 0;
@@ -1373,12 +1431,60 @@ mbx_err:
}
}
+int qlcnic_83xx_set_led(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EIO, active = 1;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev,
+ "LED test is not supported in non-privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to set LED blink state\n");
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ active = 0;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to reset LED blink state\n");
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!active || err)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
int enable)
{
struct qlcnic_cmd_args cmd;
int status;
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
if (enable) {
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
cmd.req.arg[1] = BIT_0 | BIT_31;
@@ -1441,24 +1547,35 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
return err;
}
+static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
int err;
- u32 temp;
+ u32 temp = 0;
struct qlcnic_cmd_args cmd;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
- temp = adapter->recv_ctx->context_id << 16;
+ qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
cmd.req.arg[1] = (mode ? 1 : 0) | temp;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err)
dev_info(&adapter->pdev->dev,
"Promiscous mode config failed\n");
- qlcnic_free_mbx_args(&cmd);
+ qlcnic_free_mbx_args(&cmd);
return err;
}
@@ -1490,7 +1607,9 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
/* Poll for link up event before running traffic */
do {
msleep(500);
- qlcnic_83xx_process_aen(adapter);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_process_aen(adapter);
+
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
dev_info(&adapter->pdev->dev,
"Firmware didn't sent link up event to loopback request\n");
@@ -1500,6 +1619,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
}
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+ /* Make sure carrier is off and queue is stopped during loopback */
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+
ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -1544,7 +1669,9 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
/* Wait for Link and IDC Completion AEN */
do {
msleep(300);
- qlcnic_83xx_process_aen(adapter);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_process_aen(adapter);
+
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
dev_err(&adapter->pdev->dev,
"FW did not generate IDC completion AEN\n");
@@ -1584,7 +1711,9 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
/* Wait for Link and IDC Completion AEN */
do {
msleep(300);
- qlcnic_83xx_process_aen(adapter);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_process_aen(adapter);
+
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
dev_err(&adapter->pdev->dev,
"Firmware didn't sent IDC completion AEN\n");
@@ -1598,21 +1727,31 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return status;
}
+static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
int mode)
{
int err;
- u32 temp, temp_ip;
+ u32 temp = 0, temp_ip;
struct qlcnic_cmd_args cmd;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR);
- if (mode == QLCNIC_IP_UP) {
- temp = adapter->recv_ctx->context_id << 16;
+ qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
+
+ if (mode == QLCNIC_IP_UP)
cmd.req.arg[1] = 1 | temp;
- } else {
- temp = adapter->recv_ctx->context_id << 16;
+ else
cmd.req.arg[1] = 2 | temp;
- }
/*
* Adapter needs IP address in network byte order.
@@ -1629,6 +1768,7 @@ void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
dev_err(&adapter->netdev->dev,
"could not notify %s IP 0x%x request\n",
(mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
qlcnic_free_mbx_args(&cmd);
}
@@ -1695,11 +1835,22 @@ int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
}
+static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_macaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
- __le16 vlan_id, u8 op)
+ u16 vlan_id, u8 op)
{
int err;
- u32 *buf;
+ u32 *buf, temp = 0;
struct qlcnic_cmd_args cmd;
struct qlcnic_macvlan_mbx mv;
@@ -1709,11 +1860,21 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
if (err)
return err;
- cmd.req.arg[1] = op | (1 << 8) |
- (adapter->recv_ctx->context_id << 16);
- mv.vlan = le16_to_cpu(vlan_id);
- memcpy(&mv.mac, addr, ETH_ALEN);
+ if (vlan_id)
+ op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+
+ cmd.req.arg[1] = op | (1 << 8);
+ qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
+ cmd.req.arg[1] |= temp;
+ mv.vlan = vlan_id;
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
buf = &cmd.req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -1726,7 +1887,7 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
}
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
- __le16 vlan_id)
+ u16 vlan_id)
{
u8 mac[ETH_ALEN];
memcpy(&mac, addr, ETH_ALEN);
@@ -1776,7 +1937,7 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
{
int err;
- u32 temp;
+ u16 temp;
struct qlcnic_cmd_args cmd;
struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
@@ -1784,10 +1945,18 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
return;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
- cmd.req.arg[1] = 1 | (adapter->recv_ctx->context_id << 16);
+ if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) {
+ temp = adapter->recv_ctx->context_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
+ temp = coal->rx_time_us;
+ cmd.req.arg[2] = coal->rx_packets | temp << 16;
+ } else if (coal->type == QLCNIC_INTR_COAL_TYPE_TX) {
+ temp = adapter->tx_ring->ctx_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
+ temp = coal->tx_time_us;
+ cmd.req.arg[2] = coal->tx_packets | temp << 16;
+ }
cmd.req.arg[3] = coal->flag;
- temp = coal->rx_time_us << 16;
- cmd.req.arg[2] = coal->rx_packets | temp;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS)
dev_info(&adapter->pdev->dev,
@@ -1826,7 +1995,7 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
if (event & QLCNIC_MBX_ASYNC_EVENT)
- qlcnic_83xx_process_aen(adapter);
+ __qlcnic_83xx_process_aen(adapter);
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
writel(0, adapter->ahw->pci_base0 + mask);
@@ -2002,14 +2171,17 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
{
int i, index, err;
- bool type;
u8 max_ints;
- u32 val, temp;
+ u32 val, temp, type;
struct qlcnic_cmd_args cmd;
max_ints = adapter->ahw->num_msix - 1;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
cmd.req.arg[1] = max_ints;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
+
for (i = 0, index = 2; i < max_ints; i++) {
type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
val = type | (adapter->ahw->intr_tbl[i].type << 4);
@@ -2163,7 +2335,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
{
int ret;
u32 cmd;
@@ -2181,7 +2353,7 @@ static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
{
int ret;
@@ -2255,7 +2427,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
return -EIO;
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
- ret = qlcnic_83xx_enable_flash_write_op(adapter);
+ ret = qlcnic_83xx_enable_flash_write(adapter);
if (ret) {
qlcnic_83xx_unlock_flash(adapter);
dev_err(&adapter->pdev->dev,
@@ -2297,7 +2469,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
}
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
- ret = qlcnic_83xx_disable_flash_write_op(adapter);
+ ret = qlcnic_83xx_disable_flash_write(adapter);
if (ret) {
qlcnic_83xx_unlock_flash(adapter);
dev_err(&adapter->pdev->dev,
@@ -2337,8 +2509,8 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
u32 temp;
int ret = -EIO;
- if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) ||
- (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) {
+ if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
+ (count > QLC_83XX_FLASH_WRITE_MAX)) {
dev_err(&adapter->pdev->dev,
"%s: Invalid word count\n", __func__);
return -EIO;
@@ -2616,13 +2788,19 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
{
+ u8 pci_func;
int err;
u32 config = 0, state;
struct qlcnic_cmd_args cmd;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func));
- if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) {
+ if (qlcnic_sriov_vf_check(adapter))
+ pci_func = adapter->portnum;
+ else
+ pci_func = ahw->pci_func;
+
+ state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func));
+ if (!QLC_83xx_FUNC_VAL(state, pci_func)) {
dev_info(&adapter->pdev->dev, "link state down\n");
return config;
}
@@ -2752,6 +2930,9 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
/* fill in MAC rx frame stats */
for (k += 6; k < 80; k += 2)
data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* fill in eSwitch stats */
+ for (; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
break;
case QLC_83XX_STAT_RX:
for (k = 2; k < 8; k += 2)
@@ -2780,6 +2961,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
{
struct qlcnic_cmd_args cmd;
+ struct net_device *netdev = adapter->netdev;
int ret = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
@@ -2789,7 +2971,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_TX, &ret);
if (ret) {
- dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
+ netdev_err(netdev, "Error getting Tx stats\n");
goto out;
}
/* Get MAC stats */
@@ -2799,8 +2981,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_MAC, &ret);
if (ret) {
- dev_info(&adapter->pdev->dev,
- "Error getting Rx stats\n");
+ netdev_err(netdev, "Error getting MAC stats\n");
goto out;
}
/* Get Rx stats */
@@ -2810,8 +2991,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_RX, &ret);
if (ret)
- dev_info(&adapter->pdev->dev,
- "Error getting Tx stats\n");
+ netdev_err(netdev, "Error getting Rx stats\n");
out:
qlcnic_free_mbx_args(&cmd);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 61f81f6c84a9..1f1d85e6f2af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -12,6 +12,8 @@
#include <linux/etherdevice.h>
#include "qlcnic_hw.h"
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+
/* Directly mapped registers */
#define QLC_83XX_CRB_WIN_BASE 0x3800
#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
@@ -86,6 +88,153 @@
#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+/* status descriptor mailbox data
+ * @phy_addr_{low|high}: physical address of buffer
+ * @sds_ring_size: buffer size
+ * @intrpt_id: interrupt id
+ * @intrpt_val: source of interrupt
+ */
+struct qlcnic_sds_mbx {
+ u32 phy_addr_low;
+ u32 phy_addr_high;
+ u32 rsvd1[4];
+#if defined(__LITTLE_ENDIAN)
+ u16 sds_ring_size;
+ u16 rsvd2;
+ u16 rsvd3[2];
+ u16 intrpt_id;
+ u8 intrpt_val;
+ u8 rsvd4;
+#elif defined(__BIG_ENDIAN)
+ u16 rsvd2;
+ u16 sds_ring_size;
+ u16 rsvd3[2];
+ u8 rsvd4;
+ u8 intrpt_val;
+ u16 intrpt_id;
+#endif
+ u32 rsvd5;
+} __packed;
+
+/* receive descriptor buffer data
+ * phy_addr_reg_{low|high}: physical address of regular buffer
+ * phy_addr_jmb_{low|high}: physical address of jumbo buffer
+ * reg_ring_sz: size of regular buffer
+ * reg_ring_len: no. of entries in regular buffer
+ * jmb_ring_len: no. of entries in jumbo buffer
+ * jmb_ring_sz: size of jumbo buffer
+ */
+struct qlcnic_rds_mbx {
+ u32 phy_addr_reg_low;
+ u32 phy_addr_reg_high;
+ u32 phy_addr_jmb_low;
+ u32 phy_addr_jmb_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 reg_ring_sz;
+ u16 reg_ring_len;
+ u16 jmb_ring_sz;
+ u16 jmb_ring_len;
+#elif defined(__BIG_ENDIAN)
+ u16 reg_ring_len;
+ u16 reg_ring_sz;
+ u16 jmb_ring_len;
+ u16 jmb_ring_sz;
+#endif
+} __packed;
+
+/* host producers for regular and jumbo rings */
+struct __host_producer_mbx {
+ u32 reg_buf;
+ u32 jmb_buf;
+} __packed;
+
+/* Receive context mailbox data outbox registers
+ * @state: state of the context
+ * @vport_id: virtual port id
+ * @context_id: receive context id
+ * @num_pci_func: number of pci functions of the port
+ * @phy_port: physical port id
+ */
+struct qlcnic_rcv_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u8 state;
+ u8 num_pci_func;
+ u8 phy_port;
+ u8 vport_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+ u8 vport_id;
+ u8 phy_port;
+ u8 num_pci_func;
+ u8 state;
+#endif
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+struct qlcnic_add_rings_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+#endif
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+/* Transmit context mailbox inbox registers
+ * @phys_addr_{low|high}: DMA address of the transmit buffer
+ * @cnsmr_index_{low|high}: host consumer index
+ * @size: legth of transmit buffer ring
+ * @intr_id: interrput id
+ * @src: src of interrupt
+ */
+struct qlcnic_tx_mbx {
+ u32 phys_addr_low;
+ u32 phys_addr_high;
+ u32 cnsmr_index_low;
+ u32 cnsmr_index_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 size;
+ u16 intr_id;
+ u8 src;
+ u8 rsvd[3];
+#elif defined(__BIG_ENDIAN)
+ u16 intr_id;
+ u16 size;
+ u8 rsvd[3];
+ u8 src;
+#endif
+} __packed;
+
+/* Transmit context mailbox outbox registers
+ * @host_prod: host producer index
+ * @ctx_id: transmit context id
+ * @state: state of the transmit context
+ */
+
+struct qlcnic_tx_mbx_out {
+ u32 host_prod;
+#if defined(__LITTLE_ENDIAN)
+ u16 ctx_id;
+ u8 state;
+ u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 state;
+ u16 ctx_id;
+#endif
+} __packed;
+
struct qlcnic_intrpt_config {
u8 type;
u8 enabled;
@@ -94,8 +243,23 @@ struct qlcnic_intrpt_config {
};
struct qlcnic_macvlan_mbx {
- u8 mac[ETH_ALEN];
+#if defined(__LITTLE_ENDIAN)
+ u8 mac_addr0;
+ u8 mac_addr1;
+ u8 mac_addr2;
+ u8 mac_addr3;
+ u8 mac_addr4;
+ u8 mac_addr5;
u16 vlan;
+#elif defined(__BIG_ENDIAN)
+ u8 mac_addr3;
+ u8 mac_addr2;
+ u8 mac_addr1;
+ u8 mac_addr0;
+ u16 vlan;
+ u8 mac_addr5;
+ u8 mac_addr4;
+#endif
};
struct qlc_83xx_fw_info {
@@ -153,6 +317,18 @@ struct qlc_83xx_idc {
char **name;
};
+/* Device States */
+enum qlcnic_83xx_states {
+ QLC_83XX_IDC_DEV_UNKNOWN,
+ QLC_83XX_IDC_DEV_COLD,
+ QLC_83XX_IDC_DEV_INIT,
+ QLC_83XX_IDC_DEV_READY,
+ QLC_83XX_IDC_DEV_NEED_RESET,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT,
+ QLC_83XX_IDC_DEV_FAILED,
+ QLC_83XX_IDC_DEV_QUISCENT
+};
+
#define QLCNIC_MBX_RSP(reg) LSW(reg)
#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
@@ -205,7 +381,7 @@ struct qlc_83xx_idc {
#define QLC_83XX_STAT_MAC 1
#define QLC_83XX_TX_STAT_REGS 14
#define QLC_83XX_RX_STAT_REGS 40
-#define QLC_83XX_MAC_STAT_REGS 80
+#define QLC_83XX_MAC_STAT_REGS 94
#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2)))
#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2))
@@ -226,6 +402,7 @@ struct qlc_83xx_idc {
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
#define QLC_83XX_DEFAULT_MODE 0x0
+#define QLC_83XX_SRIOV_MODE 0x1
#define QLCNIC_BRDTYPE_83XX_10G 0x0083
#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
@@ -242,8 +419,8 @@ struct qlc_83xx_idc {
#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
#define QLC_83XX_FLASH_STATUS_READY 0x6
-#define QLC_83XX_FLASH_BULK_WRITE_MIN 2
-#define QLC_83XX_FLASH_BULK_WRITE_MAX 64
+#define QLC_83XX_FLASH_WRITE_MIN 2
+#define QLC_83XX_FLASH_WRITE_MAX 64
#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
#define QLC_83XX_ERASE_MODE 1
#define QLC_83XX_WRITE_MODE 2
@@ -336,7 +513,7 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, __le16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
@@ -351,11 +528,14 @@ int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *, int);
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
-int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
struct qlcnic_cmd_args *);
@@ -368,6 +548,7 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_handle_aen(int, void *);
int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_intr(int, void *);
irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
@@ -377,7 +558,7 @@ void qlcnic_83xx_disable_intr(struct qlcnic_adapter *,
struct qlcnic_host_sds_ring *);
void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
const struct pci_device_id *);
-void qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8);
@@ -401,7 +582,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
u32, u8 *, int);
-int qlcnic_83xx_init(struct qlcnic_adapter *);
+int qlcnic_83xx_init(struct qlcnic_adapter *, int);
int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
@@ -434,5 +615,12 @@ int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
int qlcnic_83xx_loopback_test(struct net_device *, u8);
int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
+u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 5c033f268ca5..ab1d8d99cbd5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -5,6 +5,7 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include "qlcnic_sriov.h"
#include "qlcnic.h"
#include "qlcnic_hw.h"
@@ -24,13 +25,24 @@
#define QLC_83XX_OPCODE_TMPL_END 0x0080
#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
+/* EPORT control registers */
+#define QLC_83XX_RESET_CONTROL 0x28084E50
+#define QLC_83XX_RESET_REG 0x28084E60
+#define QLC_83XX_RESET_PORT0 0x28084E70
+#define QLC_83XX_RESET_PORT1 0x28084E80
+#define QLC_83XX_RESET_PORT2 0x28084E90
+#define QLC_83XX_RESET_PORT3 0x28084EA0
+#define QLC_83XX_RESET_SRESHIM 0x28084EB0
+#define QLC_83XX_RESET_EPGSHIM 0x28084EC0
+#define QLC_83XX_RESET_ETHERPCS 0x28084ED0
+
static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
-static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
/* Template header */
struct qlc_83xx_reset_hdr {
+#if defined(__LITTLE_ENDIAN)
u16 version;
u16 signature;
u16 size;
@@ -39,14 +51,31 @@ struct qlc_83xx_reset_hdr {
u16 checksum;
u16 init_offset;
u16 start_offset;
+#elif defined(__BIG_ENDIAN)
+ u16 signature;
+ u16 version;
+ u16 entries;
+ u16 size;
+ u16 checksum;
+ u16 hdr_size;
+ u16 start_offset;
+ u16 init_offset;
+#endif
} __packed;
/* Command entry header. */
struct qlc_83xx_entry_hdr {
- u16 cmd;
- u16 size;
- u16 count;
- u16 delay;
+#if defined(__LITTLE_ENDIAN)
+ u16 cmd;
+ u16 size;
+ u16 count;
+ u16 delay;
+#elif defined(__BIG_ENDIAN)
+ u16 size;
+ u16 cmd;
+ u16 delay;
+ u16 count;
+#endif
} __packed;
/* Generic poll command */
@@ -60,10 +89,17 @@ struct qlc_83xx_rmw {
u32 mask;
u32 xor_value;
u32 or_value;
+#if defined(__LITTLE_ENDIAN)
u8 shl;
u8 shr;
u8 index_a;
u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 index_a;
+ u8 shr;
+ u8 shl;
+#endif
} __packed;
/* Generic command with 2 DWORD */
@@ -90,18 +126,6 @@ static const char *const qlc_83xx_idc_states[] = {
"Quiesce"
};
-/* Device States */
-enum qlcnic_83xx_states {
- QLC_83XX_IDC_DEV_UNKNOWN,
- QLC_83XX_IDC_DEV_COLD,
- QLC_83XX_IDC_DEV_INIT,
- QLC_83XX_IDC_DEV_READY,
- QLC_83XX_IDC_DEV_NEED_RESET,
- QLC_83XX_IDC_DEV_NEED_QUISCENT,
- QLC_83XX_IDC_DEV_FAILED,
- QLC_83XX_IDC_DEV_QUISCENT
-};
-
static int
qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter)
{
@@ -137,7 +161,8 @@ static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter,
return -EBUSY;
}
- val = adapter->portnum & 0xf;
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+ val |= (adapter->portnum & 0xf);
val |= mode << 7;
if (mode)
seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
@@ -376,14 +401,18 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
+
/* Disable mailbox interrupt */
- QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
+ qlcnic_83xx_disable_mbx_intr(adapter);
qlcnic_down(adapter, netdev);
for (i = 0; i < adapter->ahw->num_msix; i++) {
adapter->ahw->intr_tbl[i].id = i;
adapter->ahw->intr_tbl[i].enabled = 0;
adapter->ahw->intr_tbl[i].src = 0;
}
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_reset(adapter);
}
/**
@@ -585,9 +614,15 @@ static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
{
+ int err;
+
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ err = qlcnic_sriov_pf_reinit(adapter);
+ if (err)
+ return err;
+
qlcnic_83xx_enable_mbx_intrpt(adapter);
if (qlcnic_83xx_configure_opmode(adapter)) {
@@ -1350,6 +1385,19 @@ static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter)
qlcnic_83xx_unlock_driver(adapter);
}
+static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
+{
+ QLCWR32(adapter, QLC_83XX_RESET_REG, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT0, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT1, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT2, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT3, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_SRESHIM, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_EPGSHIM, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_ETHERPCS, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_CONTROL, 1);
+}
+
static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
{
u32 heartbeat, peg_status;
@@ -1371,6 +1419,7 @@ static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
if (ret) {
dev_err(&p_dev->pdev->dev, "firmware hang detected\n");
+ qlcnic_83xx_take_eport_out_of_reset(p_dev);
qlcnic_83xx_disable_pause_frames(p_dev);
peg_status = QLC_SHARED_REG_RD32(p_dev,
QLCNIC_PEG_HALT_STATUS1);
@@ -1893,6 +1942,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
qlcnic_get_func_no(adapter);
op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
+ op_mode = QLC_83XX_DEFAULT_OPMODE;
+
if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
@@ -1922,6 +1974,16 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
+ /* VNIC mode is detected by BIT_23 in capabilities. This bit is also
+ * set in case device is SRIOV capable. VNIC and SRIOV are mutually
+ * exclusive. So in case of sriov capable device load driver in
+ * default mode
+ */
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) {
+ ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ return ahw->nic_mode;
+ }
+
if (ahw->capabilities & BIT_23)
ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
else
@@ -1930,7 +1992,7 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
return ahw->nic_mode;
}
-static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
{
int ret;
@@ -2008,10 +2070,13 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
}
}
-int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ if (qlcnic_sriov_vf_check(adapter))
+ return qlcnic_sriov_vf_init(adapter, pci_using_dac);
+
if (qlcnic_83xx_check_hw_status(adapter))
return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index a69097c6b84d..43562c256379 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -382,8 +382,7 @@ out_free_rq:
return err;
}
-static void
-qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
{
int err;
struct qlcnic_cmd_args cmd;
@@ -422,22 +421,20 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL);
+ &rq_phys_addr, GFP_KERNEL | __GFP_ZERO);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL);
+ &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
}
- memset(rq_addr, 0, rq_size);
prq = rq_addr;
- memset(rsp_addr, 0, rsp_size);
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@@ -486,13 +483,13 @@ out_free_rq:
return err;
}
-static void
-qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_args cmd;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+
cmd.req.arg[1] = tx_ring->ctx_id;
if (qlcnic_issue_cmd(adapter, &cmd))
dev_err(&adapter->pdev->dev,
@@ -532,20 +529,15 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
&tx_ring->hw_cons_phys_addr,
GFP_KERNEL);
-
- if (ptr == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ if (ptr == NULL)
return -ENOMEM;
- }
+
tx_ring->hw_consumer = ptr;
/* cmd desc ring */
addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
&tx_ring->phys_addr,
GFP_KERNEL);
-
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate tx desc ring\n");
err = -ENOMEM;
goto err_out_free;
}
@@ -556,11 +548,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
- RCV_DESC_RINGSIZE(rds_ring),
- &rds_ring->phys_addr, GFP_KERNEL);
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate rds ring [%d]\n", ring);
err = -ENOMEM;
goto err_out_free;
}
@@ -572,11 +562,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
sds_ring = &recv_ctx->sds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
- STATUS_DESC_RINGSIZE(sds_ring),
- &sds_ring->phys_addr, GFP_KERNEL);
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate sds ring [%d]\n", ring);
err = -ENOMEM;
goto err_out_free;
}
@@ -616,13 +604,12 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
&dev->tx_ring[ring],
ring);
if (err) {
- qlcnic_fw_cmd_destroy_rx_ctx(dev);
+ qlcnic_fw_cmd_del_rx_ctx(dev);
if (ring == 0)
goto err_out;
for (i = 0; i < ring; i++)
- qlcnic_fw_cmd_destroy_tx_ctx(dev,
- &dev->tx_ring[i]);
+ qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
goto err_out;
}
@@ -644,10 +631,10 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
int ring;
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
- qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ qlcnic_fw_cmd_del_rx_ctx(adapter);
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
- qlcnic_fw_cmd_destroy_tx_ctx(adapter,
- &adapter->tx_ring[ring]);
+ qlcnic_fw_cmd_del_tx_ctx(adapter,
+ &adapter->tx_ring[ring]);
if (qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
@@ -655,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
qlcnic_83xx_config_intrpt(adapter, 0);
}
/* Allow dma queues to drain after context reset */
- mdelay(20);
+ msleep(20);
}
}
@@ -753,10 +740,9 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
size_t nic_size = sizeof(struct qlcnic_info_le);
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!nic_info_addr)
return -ENOMEM;
- memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
@@ -804,11 +790,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
return err;
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!nic_info_addr)
return -ENOMEM;
- memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
nic_info->pci_func = cpu_to_le16(nic->pci_func);
@@ -854,10 +839,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t, GFP_KERNEL);
+ &pci_info_dma_t,
+ GFP_KERNEL | __GFP_ZERO);
if (!pci_info_addr)
return -ENOMEM;
- memset(pci_info_addr, 0, pci_size);
npar = pci_info_addr;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
@@ -949,12 +934,9 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
}
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
- if (!stats_addr) {
- dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
+ &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ if (!stats_addr)
return -ENOMEM;
- }
- memset(stats_addr, 0, stats_size);
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
arg1 |= rx_tx << 15 | stats_size << 16;
@@ -1003,13 +985,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
return -ENOMEM;
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
- if (!stats_addr) {
- dev_err(&adapter->pdev->dev,
- "%s: Unable to allocate memory.\n", __func__);
+ &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ if (!stats_addr)
return -ENOMEM;
- }
- memset(stats_addr, 0, stats_size);
+
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
cmd.req.arg[1] = stats_size << 16;
cmd.req.arg[2] = MSD(stats_dma_t);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5641f8ec49ab..08efb4635007 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -115,6 +115,13 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
"mac_rx_dropped",
"mac_crc_error",
"mac_align_error",
+ "eswitch_frames",
+ "eswitch_bytes",
+ "eswitch_multicast_frames",
+ "eswitch_broadcast_frames",
+ "eswitch_unicast_frames",
+ "eswitch_error_free_frames",
+ "eswitch_error_free_bytes",
};
#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
@@ -149,7 +156,8 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
static inline int qlcnic_82xx_statistics(void)
{
- return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
}
static inline int qlcnic_83xx_statistics(void)
@@ -634,7 +642,7 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->tx_count != channel->max_tx)
return -EINVAL;
- err = qlcnic_validate_max_rss(channel->max_rx, channel->rx_count);
+ err = qlcnic_validate_max_rss(adapter, channel->rx_count);
if (err)
return err;
@@ -858,9 +866,11 @@ clear_diag_irq:
return ret;
}
-#define QLCNIC_ILB_PKT_SIZE 64
-#define QLCNIC_NUM_ILB_PKT 16
-#define QLCNIC_ILB_MAX_RCV_LOOP 10
+#define QLCNIC_ILB_PKT_SIZE 64
+#define QLCNIC_NUM_ILB_PKT 16
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
+#define QLCNIC_LB_PKT_POLL_DELAY_MSEC 1
+#define QLCNIC_LB_PKT_POLL_COUNT 20
static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
{
@@ -897,9 +907,9 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
loop = 0;
do {
- msleep(1);
+ msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC);
qlcnic_process_rcv_ring_diag(sds_ring);
- if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP)
+ if (loop++ > QLCNIC_LB_PKT_POLL_COUNT)
break;
} while (!adapter->ahw->diag_cnt);
@@ -1070,8 +1080,7 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static void
-qlcnic_fill_stats(u64 *data, void *stats, int type)
+static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
{
if (type == QLCNIC_MAC_STATS) {
struct qlcnic_mac_statistics *mac_stats =
@@ -1120,6 +1129,7 @@ qlcnic_fill_stats(u64 *data, void *stats, int type)
*data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
*data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
}
+ return data;
}
static void qlcnic_get_ethtool_stats(struct net_device *dev,
@@ -1147,7 +1157,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
/* Retrieve MAC statistics from firmware */
memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
qlcnic_get_mac_stats(adapter, &mac_stats);
- qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+ data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
@@ -1159,7 +1169,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
if (ret)
return;
- qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
+ data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
if (ret)
@@ -1176,7 +1186,8 @@ static int qlcnic_set_led(struct net_device *dev,
int err = -EIO, active = 1;
if (qlcnic_83xx_check(adapter))
- return -EOPNOTSUPP;
+ return qlcnic_83xx_set_led(dev, state);
+
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
@@ -1292,6 +1303,9 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ethcoal)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_nic_intr_coalesce *coal;
+ u32 rx_coalesce_usecs, rx_max_frames;
+ u32 tx_coalesce_usecs, tx_max_frames;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return -EINVAL;
@@ -1302,8 +1316,8 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
*/
if (ethcoal->rx_coalesce_usecs > 0xffff ||
ethcoal->rx_max_coalesced_frames > 0xffff ||
- ethcoal->tx_coalesce_usecs ||
- ethcoal->tx_max_coalesced_frames ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
ethcoal->rx_coalesce_usecs_irq ||
ethcoal->rx_max_coalesced_frames_irq ||
ethcoal->tx_coalesce_usecs_irq ||
@@ -1323,18 +1337,55 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
ethcoal->tx_max_coalesced_frames_high)
return -EINVAL;
- if (!ethcoal->rx_coalesce_usecs ||
- !ethcoal->rx_max_coalesced_frames) {
- adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
- adapter->ahw->coal.rx_time_us =
- QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
- adapter->ahw->coal.rx_packets =
- QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ coal = &adapter->ahw->coal;
+
+ if (qlcnic_83xx_check(adapter)) {
+ if (!ethcoal->tx_coalesce_usecs ||
+ !ethcoal->tx_max_coalesced_frames ||
+ !ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ coal->flag = QLCNIC_INTR_DEFAULT;
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+ coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ coal->tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ coal->tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ } else {
+ tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
+ tx_max_frames = ethcoal->tx_max_coalesced_frames;
+ rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
+ rx_max_frames = ethcoal->rx_max_coalesced_frames;
+ coal->flag = 0;
+
+ if ((coal->rx_time_us == rx_coalesce_usecs) &&
+ (coal->rx_packets == rx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_TX;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
+ (coal->tx_packets == tx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ } else {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ }
+ }
} else {
- adapter->ahw->coal.flag = 0;
- adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs;
- adapter->ahw->coal.rx_packets =
- ethcoal->rx_max_coalesced_frames;
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ coal->flag = QLCNIC_INTR_DEFAULT;
+ coal->rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ coal->rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ } else {
+ coal->flag = 0;
+ coal->rx_time_us = ethcoal->rx_coalesce_usecs;
+ coal->rx_packets = ethcoal->rx_max_coalesced_frames;
+ }
}
qlcnic_config_intr_coalesce(adapter);
@@ -1352,6 +1403,8 @@ static int qlcnic_get_intr_coalesce(struct net_device *netdev,
ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
+ ethcoal->tx_coalesce_usecs = adapter->ahw->coal.tx_time_us;
+ ethcoal->tx_max_coalesced_frames = adapter->ahw->coal.tx_packets;
return 0;
}
@@ -1537,3 +1590,25 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_dump_data = qlcnic_get_dump_data,
.set_dump = qlcnic_set_dump,
};
+
+const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
+ .get_settings = qlcnic_get_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_channels = qlcnic_get_channels,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .get_wol = qlcnic_get_wol,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 44197ca1456c..c0f0c0d0a790 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -669,7 +669,7 @@ enum {
#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
#define QLCNIC_CMDPEG_CHECK_DELAY 500
#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
-#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
+#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10
#define QLCNIC_MAX_MC_COUNT 38
#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5
@@ -714,7 +714,9 @@ enum {
QLCNIC_MGMT_FUNC = 0,
QLCNIC_PRIV_FUNC = 1,
QLCNIC_NON_PRIV_FUNC = 2,
- QLCNIC_UNKNOWN_FUNC_MODE = 3
+ QLCNIC_SRIOV_PF_FUNC = 3,
+ QLCNIC_SRIOV_VF_FUNC = 4,
+ QLCNIC_UNKNOWN_FUNC_MODE = 5
};
enum {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index f89cc7a3fe6c..6a6512ba9f38 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -423,7 +423,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
}
int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
- __le16 vlan_id, u8 op)
+ u16 vlan_id, u8 op)
{
struct qlcnic_nic_req req;
struct qlcnic_mac_req *mac_req;
@@ -441,7 +441,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
memcpy(mac_req->mac_addr, addr, 6);
vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
- vlan_req->vlan_id = vlan_id;
+ vlan_req->vlan_id = cpu_to_le16(vlan_id);
return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
@@ -468,7 +468,7 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
return err;
}
-int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
{
struct list_head *head;
struct qlcnic_mac_list_s *cur;
@@ -487,7 +487,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
memcpy(cur->mac_addr, addr, ETH_ALEN);
if (qlcnic_sre_macaddr_change(adapter,
- cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
+ cur->mac_addr, vlan, QLCNIC_MAC_ADD)) {
kfree(cur);
return -EIO;
}
@@ -496,7 +496,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
return 0;
}
-void qlcnic_set_multi(struct net_device *netdev)
+void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
@@ -508,8 +508,9 @@ void qlcnic_set_multi(struct net_device *netdev)
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- qlcnic_nic_add_mac(adapter, adapter->mac_addr);
- qlcnic_nic_add_mac(adapter, bcast_addr);
+ if (!qlcnic_sriov_vf_check(adapter))
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
+ qlcnic_nic_add_mac(adapter, bcast_addr, vlan);
if (netdev->flags & IFF_PROMISC) {
if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
@@ -523,23 +524,55 @@ void qlcnic_set_multi(struct net_device *netdev)
goto send_fw_cmd;
}
- if (!netdev_mc_empty(netdev)) {
+ if (!netdev_mc_empty(netdev) && !qlcnic_sriov_vf_check(adapter)) {
netdev_for_each_mc_addr(ha, netdev) {
- qlcnic_nic_add_mac(adapter, ha->addr);
+ qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
}
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_vf_add_mc_list(netdev, vlan);
+
send_fw_cmd:
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) {
- qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = true;
- } else {
- adapter->drv_mac_learn = false;
+ if (!qlcnic_sriov_vf_check(adapter)) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = false;
+ }
}
qlcnic_nic_set_promisc(adapter, mode);
}
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ struct qlcnic_mac_list_s *cur;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+ if (qlcnic_sriov_vf_check(adapter)) {
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev) {
+ cur = kzalloc(sizeof(struct qlcnic_mac_list_s),
+ GFP_ATOMIC);
+ if (cur == NULL)
+ break;
+ memcpy(cur->mac_addr,
+ ha->addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->vf_mc_list);
+ }
+ }
+ qlcnic_sriov_vf_schedule_multi(adapter->netdev);
+ return;
+ }
+ __qlcnic_set_multi(netdev, 0);
+}
+
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
struct qlcnic_nic_req req;
@@ -559,7 +592,7 @@ int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
(struct cmd_desc_type0 *)&req, 1);
}
-void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
{
struct qlcnic_mac_list_s *cur;
struct list_head *head = &adapter->mac_list;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 5b8749eda11f..95b1b5732838 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -83,6 +83,8 @@ enum qlcnic_regs {
#define QLCNIC_CMD_CONFIG_PORT 0x2e
#define QLCNIC_CMD_TEMP_SIZE 0x2f
#define QLCNIC_CMD_GET_TEMP_HDR 0x30
+#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
+#define QLCNIC_CMD_CONFIG_VPORT 0x32
#define QLCNIC_CMD_GET_MAC_STATS 0x37
#define QLCNIC_CMD_SET_DRV_VER 0x38
#define QLCNIC_CMD_CONFIGURE_RSS 0x41
@@ -114,6 +116,7 @@ enum qlcnic_regs {
#define QLCNIC_SET_FAC_DEF_MAC 5
#define QLCNIC_MBX_LINK_EVENT 0x8001
+#define QLCNIC_MBX_BC_EVENT 0x8002
#define QLCNIC_MBX_COMP_EVENT 0x8100
#define QLCNIC_MBX_REQUEST_EVENT 0x8101
#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
@@ -156,7 +159,7 @@ int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
- u64 *uaddr, __le16 vlan_id);
+ u64 *uaddr, u16 vlan_id);
void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter);
int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
@@ -175,7 +178,10 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *tx_ring, int);
-int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0e630061bff3..d3f8797efcc3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -9,6 +9,7 @@
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
+#include <net/checksum.h>
#include "qlcnic.h"
@@ -146,7 +147,10 @@ static inline u8 qlcnic_mac_hash(u64 mac)
static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
u16 handle, u8 ring_id)
{
- if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X)
+ unsigned short device = adapter->pdev->device;
+
+ if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
return handle | (ring_id << 15);
else
return handle;
@@ -158,7 +162,7 @@ static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
}
void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
- int loopback_pkt, __le16 vlan_id)
+ int loopback_pkt, u16 vlan_id)
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
@@ -236,7 +240,7 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
- __le16 vlan_id)
+ u16 vlan_id)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
@@ -261,7 +265,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
- vlan_req->vlan_id = vlan_id;
+ vlan_req->vlan_id = cpu_to_le16(vlan_id);
tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
smp_mb();
@@ -277,7 +281,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
u64 src_addr = 0;
- __le16 vlan_id = 0;
+ u16 vlan_id = 0;
u8 hindex;
if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
@@ -340,14 +344,14 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
flags = FLAGS_VLAN_OOB;
vlan_tci = vlan_tx_tag_get(skb);
}
- if (unlikely(adapter->pvid)) {
+ if (unlikely(adapter->tx_pvid)) {
if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
return -EIO;
if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
goto set_flags;
flags = FLAGS_VLAN_OOB;
- vlan_tci = adapter->pvid;
+ vlan_tci = adapter->tx_pvid;
}
set_flags:
qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
@@ -358,8 +362,7 @@ set_flags:
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
}
opcode = TX_ETHER_PKT;
- if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
- skb_shinfo(skb)->gso_size > 0) {
+ if (skb_is_gso(skb)) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
@@ -976,10 +979,10 @@ static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
skb_pull(skb, VLAN_HLEN);
}
- if (!adapter->pvid)
+ if (!adapter->rx_pvid)
return 0;
- if (*vlan_tag == adapter->pvid) {
+ if (*vlan_tag == adapter->rx_pvid) {
/* Outer vlan tag. Packet should follow non-vlan path */
*vlan_tag = 0xffff;
return 0;
@@ -1025,8 +1028,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
- cpu_to_le16(t_vid));
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
}
if (length > rds_ring->skb_size)
@@ -1046,7 +1048,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
if (vid != 0xffff)
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
napi_gro_receive(&sds_ring->napi, skb);
@@ -1103,8 +1105,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
- cpu_to_le16(t_vid));
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
}
if (timestamp)
@@ -1132,9 +1133,8 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
iph = (struct iphdr *)skb->data;
th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
th->psh = push;
@@ -1150,7 +1150,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
}
if (vid != 0xffff)
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
netif_receive_skb(skb);
adapter->stats.lro_pkts++;
@@ -1497,8 +1497,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
t_vid = 0;
is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
- cpu_to_le16(t_vid));
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
}
if (length > rds_ring->skb_size)
@@ -1515,7 +1514,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
if (vid != 0xffff)
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
napi_gro_receive(&sds_ring->napi, skb);
@@ -1567,8 +1566,7 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
t_vid = 0;
is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
- qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
- cpu_to_le16(t_vid));
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
}
if (qlcnic_83xx_is_tstamp(sts_data[1]))
data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
@@ -1595,9 +1593,8 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
iph = (struct iphdr *)skb->data;
th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
th->psh = push;
@@ -1613,7 +1610,7 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
}
if (vid != 0xffff)
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
netif_receive_skb(skb);
@@ -1692,6 +1689,29 @@ skip:
return count;
}
+static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
{
int tx_complete;
@@ -1769,7 +1789,8 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
qlcnic_83xx_enable_intr(adapter, sds_ring);
}
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
@@ -1796,7 +1817,8 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
napi_disable(&sds_ring->napi);
}
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
@@ -1809,7 +1831,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int ring, max_sds_rings;
+ int ring, max_sds_rings, temp;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -1820,14 +1842,23 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
max_sds_rings = adapter->max_sds_rings;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- if (adapter->flags & QLCNIC_MSIX_ENABLED)
- netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_rx_poll,
- QLCNIC_NETDEV_WEIGHT * 2);
- else
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_rx_poll,
+ QLCNIC_NETDEV_WEIGHT * 2);
+ } else {
+ temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_msix_sriov_vf_poll,
+ temp);
+ }
+
+ } else {
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_poll,
QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+ }
}
if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1835,7 +1866,8 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
return -ENOMEM;
}
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi,
@@ -1861,7 +1893,8 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
qlcnic_free_sds_rings(adapter->recv_ctx);
- if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 28a6d4838364..264d5a4f8153 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_sriov.h"
#include "qlcnic_hw.h"
#include <linux/swab.h>
@@ -85,8 +86,8 @@ static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-static int qlcnic_vlan_rx_add(struct net_device *, u16);
-static int qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
@@ -109,6 +110,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
{0,}
};
@@ -154,25 +156,112 @@ static const u32 qlcnic_reg_tbl[] = {
};
static const struct qlcnic_board_info qlcnic_boards[] = {
- {0x1077, 0x8020, 0x1077, 0x203,
- "8200 Series Single Port 10GbE Converged Network Adapter"
- "(TCP/IP Networking)"},
- {0x1077, 0x8020, 0x1077, 0x207,
- "8200 Series Dual Port 10GbE Converged Network Adapter"
- "(TCP/IP Networking)"},
- {0x1077, 0x8020, 0x1077, 0x20b,
- "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
- {0x1077, 0x8020, 0x1077, 0x20c,
- "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
- {0x1077, 0x8020, 0x1077, 0x20f,
- "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
- {0x1077, 0x8020, 0x103c, 0x3733,
- "NC523SFP 10Gb 2-port Server Adapter"},
- {0x1077, 0x8020, 0x103c, 0x3346,
- "CN1000Q Dual Port Converged Network Adapter"},
- {0x1077, 0x8020, 0x1077, 0x210,
- "QME8242-k 10GbE Dual Port Mezzanine Card"},
- {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x24e,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x243,
+ "8300 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x24a,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x246,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x252,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x26e,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x260,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x266,
+ "8300 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x269,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x271,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ 0x0, 0x0, "8300 Series 1/10GbE Controller" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x103c, 0x3733,
+ "NC523SFP 10Gb 2-port Server Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x103c, 0x3346,
+ "CN1000Q Dual Port Converged Network Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x210,
+ "QME8242-k 10GbE Dual Port Mezzanine Card" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x0, 0x0, "cLOM8214 1/10GbE Controller" },
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
@@ -198,8 +287,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
recv_ctx->sds_rings = NULL;
}
-static int
-qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
u8 mac_addr[ETH_ALEN];
struct net_device *netdev = adapter->netdev;
@@ -225,6 +313,9 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+
if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
return -EOPNOTSUPP;
@@ -253,11 +344,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = -EOPNOTSUPP;
- if (!adapter->fdb_mac_learn) {
- pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
- __func__);
- return err;
- }
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
if (is_unicast_ether_addr(addr))
@@ -277,11 +365,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
- if (!adapter->fdb_mac_learn) {
- pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
- __func__);
- return -EOPNOTSUPP;
- }
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
pr_info("%s: FDB e-switch is not enabled\n", __func__);
@@ -292,7 +377,7 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
if (is_unicast_ether_addr(addr))
- err = qlcnic_nic_add_mac(adapter, addr);
+ err = qlcnic_nic_add_mac(adapter, addr, 0);
else if (is_multicast_ether_addr(addr))
err = dev_mc_add_excl(netdev, addr);
else
@@ -306,11 +391,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if (!adapter->fdb_mac_learn) {
- pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
- __func__);
- return -EOPNOTSUPP;
- }
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
@@ -346,6 +428,12 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
+#ifdef CONFIG_QLCNIC_SRIOV
+ .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
+ .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate,
+ .ndo_get_vf_config = qlcnic_sriov_get_vf_config,
+ .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
+#endif
};
static const struct net_device_ops qlcnic_netdev_failed_ops = {
@@ -387,6 +475,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
.create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
.create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
+ .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx,
+ .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx,
.setup_link_event = qlcnic_82xx_linkevent_request,
.get_nic_info = qlcnic_82xx_get_nic_info,
.get_pci_info = qlcnic_82xx_get_pci_info,
@@ -402,13 +492,22 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.config_promisc_mode = qlcnic_82xx_nic_set_promisc,
.change_l2_filter = qlcnic_82xx_change_filter,
.get_board_info = qlcnic_82xx_get_board_info,
+ .free_mac_list = qlcnic_82xx_free_mac_list,
};
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
int err = -1, i;
- int max_tx_rings;
+ int max_tx_rings, tx_vector;
+
+ if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
+ max_tx_rings = 0;
+ tx_vector = 0;
+ } else {
+ max_tx_rings = adapter->max_drv_tx_rings;
+ tx_vector = 1;
+ }
if (!adapter->msix_entries) {
adapter->msix_entries = kcalloc(num_msix,
@@ -431,7 +530,6 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
if (qlcnic_83xx_check(adapter)) {
adapter->ahw->num_msix = num_msix;
/* subtract mail box and tx ring vectors */
- max_tx_rings = adapter->max_drv_tx_rings;
adapter->max_sds_rings = num_msix -
max_tx_rings - 1;
} else {
@@ -444,11 +542,11 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
"Unable to allocate %d MSI-X interrupt vectors\n",
num_msix);
if (qlcnic_83xx_check(adapter)) {
- if (err < QLC_83XX_MINIMUM_VECTOR)
+ if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
return err;
- err -= (adapter->max_drv_tx_rings + 1);
+ err -= (max_tx_rings + 1);
num_msix = rounddown_pow_of_two(err);
- num_msix += (adapter->max_drv_tx_rings + 1);
+ num_msix += (max_tx_rings + 1);
} else {
num_msix = rounddown_pow_of_two(err);
}
@@ -542,11 +640,10 @@ void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
}
}
-static void
-qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
+static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
{
- if (adapter->ahw->pci_base0 != NULL)
- iounmap(adapter->ahw->pci_base0);
+ if (ahw->pci_base0 != NULL)
+ iounmap(ahw->pci_base0);
}
static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
@@ -721,6 +818,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
@@ -751,7 +849,7 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
return -EIO;
}
- dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+ dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10));
ahw->pci_base0 = mem_ptr0;
ahw->pci_len0 = pci_len0;
@@ -891,24 +989,50 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
else
adapter->flags |= QLCNIC_TAGGING_ENABLED;
- if (esw_cfg->vlan_id)
- adapter->pvid = esw_cfg->vlan_id;
- else
- adapter->pvid = 0;
+ if (esw_cfg->vlan_id) {
+ adapter->rx_pvid = esw_cfg->vlan_id;
+ adapter->tx_pvid = esw_cfg->vlan_id;
+ } else {
+ adapter->rx_pvid = 0;
+ adapter->tx_pvid = 0;
+ }
}
static int
-qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
+qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 1);
+ if (err) {
+ netdev_err(netdev,
+ "Cannot add VLAN filter for VLAN id %d, err=%d",
+ vid, err);
+ return err;
+ }
+ }
+
set_bit(vid, adapter->vlans);
return 0;
}
static int
-qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
+qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 0);
+ if (err) {
+ netdev_err(netdev,
+ "Cannot delete VLAN filter for VLAN id %d, err=%d",
+ vid, err);
+ return err;
+ }
+ }
qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
clear_bit(vid, adapter->vlans);
@@ -1250,7 +1374,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
irq_handler_t handler;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
- int err, ring;
+ int err, ring, num_sds_rings;
unsigned long flags = 0;
struct net_device *netdev = adapter->netdev;
@@ -1281,10 +1405,20 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
if (qlcnic_82xx_check(adapter) ||
(qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED))) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ num_sds_rings = adapter->max_sds_rings;
+ for (ring = 0; ring < num_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- snprintf(sds_ring->name, sizeof(int) + IFNAMSIZ,
- "%s[%d]", netdev->name, ring);
+ if (qlcnic_82xx_check(adapter) &&
+ (ring == (num_sds_rings - 1)))
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "qlcnic-%s[Tx0+Rx%d]",
+ netdev->name, ring);
+ else
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "qlcnic-%s[Rx%d]",
+ netdev->name, ring);
err = request_irq(sds_ring->irq, handler, flags,
sds_ring->name, sds_ring);
if (err)
@@ -1292,14 +1426,14 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
}
}
if (qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
handler = qlcnic_msix_tx_intr;
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
- snprintf(tx_ring->name, sizeof(int) + IFNAMSIZ,
- "%s[%d]", netdev->name,
- adapter->max_sds_rings + ring);
+ snprintf(tx_ring->name, sizeof(tx_ring->name),
+ "qlcnic-%s[Tx%d]", netdev->name, ring);
err = request_irq(tx_ring->irq, handler, flags,
tx_ring->name, tx_ring);
if (err)
@@ -1328,7 +1462,8 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
free_irq(sds_ring->irq, sds_ring);
}
}
- if (qlcnic_83xx_check(adapter)) {
+ if (qlcnic_83xx_check(adapter) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
@@ -1418,9 +1553,12 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
+ adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
qlcnic_free_mac_list(adapter);
@@ -1545,7 +1683,9 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
+
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
GFP_KERNEL);
if (!adapter->recv_ctx) {
@@ -1553,9 +1693,14 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
goto err_out;
}
/* Initialize interrupt coalesce parameters */
- adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
- adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
- adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ }
/* clear stats */
memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
@@ -1685,7 +1830,7 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
return err;
}
-static int
+int
qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
int pci_using_dac)
{
@@ -1701,11 +1846,14 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
qlcnic_change_mtu(netdev, netdev->mtu);
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+ if (qlcnic_sriov_vf_check(adapter))
+ SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops);
+ else
+ SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO |
- NETIF_F_HW_VLAN_RX);
+ NETIF_F_HW_VLAN_CTAG_RX);
netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM);
@@ -1720,7 +1868,10 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
}
if (qlcnic_vlan_tx_check(adapter))
- netdev->features |= (NETIF_F_HW_VLAN_TX);
+ netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
netdev->features |= NETIF_F_LRO;
@@ -1820,6 +1971,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 capab2;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+ if (pdev->is_virtfn)
+ return -ENODEV;
+
err = pci_enable_device(pdev);
if (err)
return err;
@@ -1844,12 +1998,18 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!ahw)
goto err_out_free_res;
- if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) {
+ switch (ent->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE824X:
ahw->hw_ops = &qlcnic_hw_ops;
- ahw->reg_tbl = (u32 *)qlcnic_reg_tbl;
- } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+ ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
qlcnic_83xx_register_map(ahw);
- } else {
+ break;
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ qlcnic_sriov_vf_register_map(ahw);
+ break;
+ default:
goto err_out_free_hw_res;
}
@@ -1911,11 +2071,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
- err = qlcnic_83xx_init(adapter);
+ err = qlcnic_83xx_init(adapter, pci_using_dac);
if (err) {
dev_err(&pdev->dev, "%s: failed\n", __func__);
goto err_out_free_hw;
}
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
} else {
dev_err(&pdev->dev,
"%s: failed. Please Reboot\n", __func__);
@@ -1932,6 +2094,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
module_name(THIS_MODULE),
board_name, adapter->ahw->revision_id);
}
+
+ if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
+ !!qlcnic_use_msi)
+ dev_warn(&pdev->dev,
+ "83xx adapter do not support MSI interrupts\n");
+
err = qlcnic_setup_intr(adapter, 0);
if (err) {
dev_err(&pdev->dev, "Failed to setup interrupt\n");
@@ -1999,7 +2167,7 @@ err_out_free_netdev:
free_netdev(netdev);
err_out_iounmap:
- qlcnic_cleanup_pci_map(adapter);
+ qlcnic_cleanup_pci_map(ahw);
err_out_free_hw_res:
kfree(ahw);
@@ -2024,11 +2192,13 @@ static void qlcnic_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
+ qlcnic_sriov_pf_disable(adapter);
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
unregister_netdev(netdev);
+ qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
@@ -2054,7 +2224,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_remove_sysfs(adapter);
- qlcnic_cleanup_pci_map(adapter);
+ qlcnic_cleanup_pci_map(adapter->ahw);
qlcnic_release_firmware(adapter);
@@ -2084,6 +2254,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
+ qlcnic_sriov_cleanup(adapter);
if (qlcnic_82xx_check(adapter))
qlcnic_clr_all_drv_state(adapter, 0);
@@ -3205,20 +3376,40 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_validate_max_rss(u8 max_hw, u8 val)
+int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
+ __u32 val)
{
+ struct net_device *netdev = adapter->netdev;
+ u8 max_hw = adapter->ahw->max_rx_ques;
u32 max_allowed;
- if (max_hw > QLC_MAX_SDS_RINGS) {
- max_hw = QLC_MAX_SDS_RINGS;
- pr_info("max rss reset to %d\n", QLC_MAX_SDS_RINGS);
+ if (val > QLC_MAX_SDS_RINGS) {
+ netdev_err(netdev, "RSS value should not be higher than %u\n",
+ QLC_MAX_SDS_RINGS);
+ return -EINVAL;
}
max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
num_online_cpus()));
if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
- pr_info("rss_ring valid range [2 - %x] in powers of 2\n",
- max_allowed);
+ if (!is_power_of_2(val))
+ netdev_err(netdev, "RSS value should be a power of 2\n");
+
+ if (val < 2)
+ netdev_err(netdev, "RSS value should not be lower than 2\n");
+
+ if (val > max_hw)
+ netdev_err(netdev,
+ "RSS value should not be higher than[%u], the max RSS rings supported by the adapter\n",
+ max_hw);
+
+ if (val > num_online_cpus())
+ netdev_err(netdev,
+ "RSS value should not be higher than[%u], number of online CPUs in the system\n",
+ num_online_cpus());
+
+ netdev_err(netdev, "Unable to configure %u RSS rings\n", val);
+
return -EINVAL;
}
return 0;
@@ -3238,8 +3429,10 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
qlcnic_detach(adapter);
- if (qlcnic_83xx_check(adapter))
+ if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_enable_mbx_poll(adapter);
+ }
qlcnic_teardown_intr(adapter);
err = qlcnic_setup_intr(adapter, data);
@@ -3253,6 +3446,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
err = qlcnic_83xx_setup_mbx_intr(adapter);
+ qlcnic_83xx_disable_mbx_poll(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"failed to setup mbx interrupt\n");
@@ -3318,7 +3512,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
rcu_read_lock();
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
- dev = __vlan_find_dev_deep(netdev, vid);
+ dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid);
if (!dev)
continue;
qlcnic_config_indev_addr(adapter, dev, event);
@@ -3432,7 +3626,10 @@ static struct pci_driver qlcnic_driver = {
.resume = qlcnic_resume,
#endif
.shutdown = qlcnic_shutdown,
- .err_handler = &qlcnic_err_handler
+ .err_handler = &qlcnic_err_handler,
+#ifdef CONFIG_QLCNIC_SRIOV
+ .sriov_configure = qlcnic_pci_sriov_configure,
+#endif
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index abbd22c814a6..4b9bab18ebd9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -810,11 +810,8 @@ static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
&tmp_addr_t, GFP_KERNEL);
- if (!tmp_addr) {
- dev_err(&adapter->pdev->dev,
- "Can't get memory for FW dump template\n");
+ if (!tmp_addr)
return -ENOMEM;
- }
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
new file mode 100644
index 000000000000..d85fbb57c25b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -0,0 +1,263 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef _QLCNIC_83XX_SRIOV_H_
+#define _QLCNIC_83XX_SRIOV_H_
+
+#include "qlcnic.h"
+#include <linux/types.h>
+#include <linux/pci.h>
+
+extern const u32 qlcnic_83xx_reg_tbl[];
+extern const u32 qlcnic_83xx_ext_reg_tbl[];
+
+struct qlcnic_bc_payload {
+ u64 payload[126];
+};
+
+struct qlcnic_bc_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u8 version;
+ u8 msg_type:4;
+ u8 rsvd1:3;
+ u8 op_type:1;
+ u8 num_cmds;
+ u8 num_frags;
+ u8 frag_num;
+ u8 cmd_op;
+ u16 seq_id;
+ u64 rsvd3;
+#elif defined(__BIG_ENDIAN)
+ u8 num_frags;
+ u8 num_cmds;
+ u8 op_type:1;
+ u8 rsvd1:3;
+ u8 msg_type:4;
+ u8 version;
+ u16 seq_id;
+ u8 cmd_op;
+ u8 frag_num;
+ u64 rsvd3;
+#endif
+};
+
+enum qlcnic_bc_commands {
+ QLCNIC_BC_CMD_CHANNEL_INIT = 0x0,
+ QLCNIC_BC_CMD_CHANNEL_TERM = 0x1,
+ QLCNIC_BC_CMD_GET_ACL = 0x2,
+ QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
+};
+
+#define QLC_BC_CMD 1
+
+struct qlcnic_trans_list {
+ /* Lock for manipulating list */
+ spinlock_t lock;
+ struct list_head wait_list;
+ int count;
+};
+
+enum qlcnic_trans_state {
+ QLC_INIT = 0,
+ QLC_WAIT_FOR_CHANNEL_FREE,
+ QLC_WAIT_FOR_RESP,
+ QLC_ABORT,
+ QLC_END,
+};
+
+struct qlcnic_bc_trans {
+ u8 func_id;
+ u8 active;
+ u8 curr_rsp_frag;
+ u8 curr_req_frag;
+ u16 cmd_id;
+ u16 req_pay_size;
+ u16 rsp_pay_size;
+ u32 trans_id;
+ enum qlcnic_trans_state trans_state;
+ struct list_head list;
+ struct qlcnic_bc_hdr *req_hdr;
+ struct qlcnic_bc_hdr *rsp_hdr;
+ struct qlcnic_bc_payload *req_pay;
+ struct qlcnic_bc_payload *rsp_pay;
+ struct completion resp_cmpl;
+ struct qlcnic_vf_info *vf;
+};
+
+enum qlcnic_vf_state {
+ QLC_BC_VF_SEND = 0,
+ QLC_BC_VF_RECV,
+ QLC_BC_VF_CHANNEL,
+ QLC_BC_VF_STATE,
+ QLC_BC_VF_FLR,
+ QLC_BC_VF_SOFT_FLR,
+};
+
+enum qlcnic_vlan_mode {
+ QLC_NO_VLAN_MODE = 0,
+ QLC_PVID_MODE,
+ QLC_GUEST_VLAN_MODE,
+};
+
+struct qlcnic_resources {
+ u16 num_tx_mac_filters;
+ u16 num_rx_ucast_mac_filters;
+ u16 num_rx_mcast_mac_filters;
+
+ u16 num_txvlan_keys;
+
+ u16 num_rx_queues;
+ u16 num_tx_queues;
+
+ u16 num_rx_buf_rings;
+ u16 num_rx_status_rings;
+
+ u16 num_destip;
+ u32 num_lro_flows_supported;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
+};
+
+struct qlcnic_vport {
+ u16 handle;
+ u16 max_tx_bw;
+ u16 min_tx_bw;
+ u8 vlan_mode;
+ u16 vlan;
+ u8 qos;
+ u8 mac[6];
+};
+
+struct qlcnic_vf_info {
+ u8 pci_func;
+ u16 rx_ctx_id;
+ u16 tx_ctx_id;
+ unsigned long state;
+ struct completion ch_free_cmpl;
+ struct work_struct trans_work;
+ struct work_struct flr_work;
+ /* It synchronizes commands sent from VF */
+ struct mutex send_cmd_lock;
+ struct qlcnic_bc_trans *send_cmd;
+ struct qlcnic_bc_trans *flr_trans;
+ struct qlcnic_trans_list rcv_act;
+ struct qlcnic_trans_list rcv_pend;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_vport *vp;
+};
+
+struct qlcnic_async_work_list {
+ struct list_head list;
+ struct work_struct work;
+ void *ptr;
+};
+
+struct qlcnic_back_channel {
+ u16 trans_counter;
+ struct workqueue_struct *bc_trans_wq;
+ struct workqueue_struct *bc_async_wq;
+ struct workqueue_struct *bc_flr_wq;
+ struct list_head async_list;
+};
+
+struct qlcnic_sriov {
+ u16 vp_handle;
+ u8 num_vfs;
+ u8 any_vlan;
+ u8 vlan_mode;
+ u16 num_allowed_vlans;
+ u16 *allowed_vlans;
+ u16 vlan;
+ struct qlcnic_resources ff_max;
+ struct qlcnic_back_channel bc;
+ struct qlcnic_vf_info *vf_info;
+};
+
+int qlcnic_sriov_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
+int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *);
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
+ struct qlcnic_bc_trans *);
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u16);
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
+
+static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false;
+}
+
+#ifdef CONFIG_QLCNIC_SRIOV
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *,
+ struct qlcnic_bc_trans *,
+ struct qlcnic_cmd_args *);
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *);
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *);
+int qlcnic_pci_sriov_configure(struct pci_dev *, int);
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *, struct qlcnic_vf_info *);
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
+ struct qlcnic_bc_trans *,
+ struct qlcnic_vf_info *);
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
+int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int);
+int qlcnic_sriov_get_vf_config(struct net_device *, int ,
+ struct ifla_vf_info *);
+int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
+#else
+static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
+static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
+static inline void
+qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf) {}
+static inline bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{ return false; }
+static inline void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) {}
+static inline int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
new file mode 100644
index 000000000000..44d547d78b84
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -0,0 +1,1954 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_83xx_hw.h"
+#include <linux/types.h>
+
+#define QLC_BC_COMMAND 0
+#define QLC_BC_RESPONSE 1
+
+#define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
+#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
+
+#define QLC_BC_MSG 0
+#define QLC_BC_CFREE 1
+#define QLC_BC_FLR 2
+#define QLC_BC_HDR_SZ 16
+#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
+
+#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
+#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
+
+#define QLC_83XX_VF_RESET_FAIL_THRESH 8
+#define QLC_BC_CMD_MAX_RETRY_CNT 5
+
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
+static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
+
+static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_sriov_vf_mbx_op,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+ .free_mac_list = qlcnic_sriov_vf_free_mac_list,
+};
+
+static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+};
+
+static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
+ {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
+ {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
+ {QLCNIC_BC_CMD_GET_ACL, 3, 14},
+ {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
+};
+
+static inline bool qlcnic_sriov_bc_msg_check(u32 val)
+{
+ return (val & (1 << QLC_BC_MSG)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_channel_free_check(u32 val)
+{
+ return (val & (1 << QLC_BC_CFREE)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_flr_check(u32 val)
+{
+ return (val & (1 << QLC_BC_FLR)) ? true : false;
+}
+
+static inline u8 qlcnic_sriov_target_func_id(u32 val)
+{
+ return (val >> 4) & 0xff;
+}
+
+static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
+{
+ struct pci_dev *dev = adapter->pdev;
+ int pos;
+ u16 stride, offset;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+
+ return (dev->devfn + offset + stride * vf_id) & 0xff;
+}
+
+int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct qlcnic_sriov *sriov;
+ struct qlcnic_back_channel *bc;
+ struct workqueue_struct *wq;
+ struct qlcnic_vport *vp;
+ struct qlcnic_vf_info *vf;
+ int err, i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return -EIO;
+
+ sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
+ if (!sriov)
+ return -ENOMEM;
+
+ adapter->ahw->sriov = sriov;
+ sriov->num_vfs = num_vfs;
+ bc = &sriov->bc;
+ sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
+ num_vfs, GFP_KERNEL);
+ if (!sriov->vf_info) {
+ err = -ENOMEM;
+ goto qlcnic_free_sriov;
+ }
+
+ wq = create_singlethread_workqueue("bc-trans");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Cannot create bc-trans workqueue\n");
+ goto qlcnic_free_vf_info;
+ }
+
+ bc->bc_trans_wq = wq;
+
+ wq = create_singlethread_workqueue("async");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
+ goto qlcnic_destroy_trans_wq;
+ }
+
+ bc->bc_async_wq = wq;
+ INIT_LIST_HEAD(&bc->async_list);
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->adapter = adapter;
+ vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
+ mutex_init(&vf->send_cmd_lock);
+ INIT_LIST_HEAD(&vf->rcv_act.wait_list);
+ INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
+ spin_lock_init(&vf->rcv_act.lock);
+ spin_lock_init(&vf->rcv_pend.lock);
+ init_completion(&vf->ch_free_cmpl);
+
+ if (qlcnic_sriov_pf_check(adapter)) {
+ vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
+ if (!vp) {
+ err = -ENOMEM;
+ goto qlcnic_destroy_async_wq;
+ }
+ sriov->vf_info[i].vp = vp;
+ vp->max_tx_bw = MAX_BW;
+ random_ether_addr(vp->mac);
+ dev_info(&adapter->pdev->dev,
+ "MAC Address %pM is configured for VF %d\n",
+ vp->mac, i);
+ }
+ }
+
+ return 0;
+
+qlcnic_destroy_async_wq:
+ destroy_workqueue(bc->bc_async_wq);
+
+qlcnic_destroy_trans_wq:
+ destroy_workqueue(bc->bc_trans_wq);
+
+qlcnic_free_vf_info:
+ kfree(sriov->vf_info);
+
+qlcnic_free_sriov:
+ kfree(adapter->ahw->sriov);
+ return err;
+}
+
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
+{
+ struct qlcnic_bc_trans *trans;
+ struct qlcnic_cmd_args cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&t_list->lock, flags);
+
+ while (!list_empty(&t_list->wait_list)) {
+ trans = list_first_entry(&t_list->wait_list,
+ struct qlcnic_bc_trans, list);
+ list_del(&trans->list);
+ t_list->count--;
+ cmd.req.arg = (u32 *)trans->req_pay;
+ cmd.rsp.arg = (u32 *)trans->rsp_pay;
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ }
+
+ spin_unlock_irqrestore(&t_list->lock, flags);
+}
+
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_back_channel *bc = &sriov->bc;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_cleanup_async_list(bc);
+ destroy_workqueue(bc->bc_async_wq);
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+ cancel_work_sync(&vf->trans_work);
+ qlcnic_sriov_cleanup_list(&vf->rcv_act);
+ }
+
+ destroy_workqueue(bc->bc_trans_wq);
+
+ for (i = 0; i < sriov->num_vfs; i++)
+ kfree(sriov->vf_info[i].vp);
+
+ kfree(sriov->vf_info);
+ kfree(adapter->ahw->sriov);
+}
+
+static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
+{
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ __qlcnic_sriov_cleanup(adapter);
+}
+
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_cleanup(adapter);
+}
+
+static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
+ u32 *pay, u8 pci_func, u8 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ unsigned long flags;
+ u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
+ u16 opcode;
+ u8 mbx_err_code;
+ int i, j;
+
+ opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ dev_info(&adapter->pdev->dev,
+ "Mailbox cmd attempted, 0x%x\n", opcode);
+ dev_info(&adapter->pdev->dev, "Mailbox detached\n");
+ return 0;
+ }
+
+ spin_lock_irqsave(&ahw->mbx_lock, flags);
+
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (mbx_val) {
+ QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
+ spin_unlock_irqrestore(&ahw->mbx_lock, flags);
+ return QLCNIC_RCODE_TIMEOUT;
+ }
+ /* Fill in mailbox registers */
+ val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ mbx_cmd = 0x1 | (1 << 4);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ mbx_cmd |= (pci_func << 5);
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+ for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ i++, j++) {
+ writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
+ }
+ for (j = 0; j < size; j++, i++)
+ writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
+
+ /* Signal FW about the impending command */
+ QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+
+ /* Waiting for the mailbox cmd to complete and while waiting here
+ * some AEN might arrive. If more than 5 seconds expire we can
+ * assume something is wrong.
+ */
+poll:
+ rsp = qlcnic_83xx_mbx_poll(adapter);
+ if (rsp != QLCNIC_RCODE_TIMEOUT) {
+ /* Get the FW response data */
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
+ __qlcnic_83xx_process_aen(adapter);
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (mbx_val)
+ goto poll;
+ }
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
+ opcode = QLCNIC_MBX_RSP(fw_data);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ rsp = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ rsp = qlcnic_83xx_mac_rcode(adapter);
+ if (!rsp)
+ goto out;
+ }
+ dev_err(&adapter->pdev->dev,
+ "MBX command 0x%x failed with err:0x%x\n",
+ opcode, mbx_err_code);
+ rsp = mbx_err_code;
+ break;
+ }
+ goto out;
+ }
+
+ dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
+ QLCNIC_MBX_RSP(mbx_cmd));
+ rsp = QLCNIC_RCODE_TIMEOUT;
+out:
+ /* clear fw mbx control register */
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ return rsp;
+}
+
+static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
+{
+ adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u16 vport_id)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ int err;
+ u32 status;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = vport_id << 16 | 0x1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get vport info, err=%d\n", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+
+ status = cmd.rsp.arg[2] & 0xffff;
+ if (status & BIT_0)
+ npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
+ if (status & BIT_1)
+ npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
+ if (status & BIT_2)
+ npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
+ if (status & BIT_3)
+ npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
+ if (status & BIT_4)
+ npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
+ if (status & BIT_5)
+ npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
+ if (status & BIT_6)
+ npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
+ if (status & BIT_7)
+ npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
+ if (status & BIT_8)
+ npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
+ if (status & BIT_9)
+ npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
+
+ npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
+ npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
+ npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+ npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
+
+ dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
+ "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+ "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+ "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+ "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+ "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
+ npar_info->min_tx_bw, npar_info->max_tx_bw,
+ npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
+ npar_info->max_rx_mcast_mac_filters,
+ npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+ npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+ npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+ npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+ npar_info->max_remote_ipv6_addrs);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ return 0;
+}
+
+static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int i, num_vlans;
+ u16 *vlans;
+
+ if (sriov->allowed_vlans)
+ return 0;
+
+ sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+ if (!sriov->any_vlan)
+ return 0;
+
+ sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+ num_vlans = sriov->num_allowed_vlans;
+ sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
+ if (!sriov->allowed_vlans)
+ return -ENOMEM;
+
+ vlans = (u16 *)&cmd->rsp.arg[3];
+ for (i = 0; i < num_vlans; i++)
+ sriov->allowed_vlans[i] = vlans[i];
+
+ return 0;
+}
+
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_cmd_args cmd;
+ int ret;
+
+ ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
+ ret);
+ } else {
+ sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
+ switch (sriov->vlan_mode) {
+ case QLC_GUEST_VLAN_MODE:
+ ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
+ break;
+ case QLC_PVID_MODE:
+ ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
+ break;
+ }
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_info nic_info;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
+ if (err)
+ return err;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ err = qlcnic_sriov_get_vf_acl(adapter);
+ if (err)
+ return err;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return -EIO;
+
+ qlcnic_sriov_vf_cfg_buff_desc(adapter);
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->op_mode = nic_info.op_mode;
+ ahw->capabilities = nic_info.capabilities;
+ return 0;
+}
+
+static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
+ int pci_using_dac)
+{
+ int err;
+
+ INIT_LIST_HEAD(&adapter->vf_mc_list);
+ if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
+ dev_warn(&adapter->pdev->dev,
+ "83xx adapter do not support MSI interrupts\n");
+
+ err = qlcnic_setup_intr(adapter, 1);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto err_out_disable_msi;
+
+ err = qlcnic_sriov_init(adapter, 1);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto err_out_cleanup_sriov;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (err)
+ goto err_out_disable_bc_intr;
+
+ err = qlcnic_sriov_vf_init_driver(adapter);
+ if (err)
+ goto err_out_send_channel_term;
+
+ err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
+ if (err)
+ goto err_out_send_channel_term;
+
+ pci_set_drvdata(adapter->pdev, adapter);
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ adapter->ahw->idc.delay);
+ return 0;
+
+err_out_send_channel_term:
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_disable_bc_intr:
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+
+err_out_cleanup_sriov:
+ __qlcnic_sriov_cleanup(adapter);
+
+err_out_disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+ return err;
+}
+
+static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ do {
+ msleep(20);
+ if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
+ return -EIO;
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ } while (state != QLC_83XX_IDC_DEV_READY);
+
+ return 0;
+}
+
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ spin_lock_init(&ahw->mbx_lock);
+ set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
+ ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ ahw->reset_context = 0;
+ adapter->fw_fail_cnt = 0;
+ ahw->msix_supported = 1;
+ adapter->need_fw_reset = 0;
+ adapter->flags |= QLCNIC_TX_INTR_SHARED;
+
+ err = qlcnic_sriov_check_dev_ready(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
+ if (err)
+ return err;
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged SRIOV function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_sriov_vf_ops;
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ return;
+}
+
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_sriov_vf_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
+{
+ u32 pay_size;
+
+ pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
+
+ if (pay_size)
+ pay_size = QLC_BC_PAYLOAD_SZ;
+ else
+ pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
+
+ return pay_size;
+}
+
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
+ u8 i;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
+ if (vf_info[i].pci_func == pci_func)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
+{
+ *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
+ if (!*trans)
+ return -ENOMEM;
+
+ init_completion(&(*trans)->resp_cmpl);
+ return 0;
+}
+
+static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
+ u32 size)
+{
+ *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
+ if (!*hdr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
+{
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+ int i, size;
+
+ mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
+
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_BC_CMD;
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) |
+ (3 << 29));
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd,
+ u16 seq, u8 msg_type)
+{
+ struct qlcnic_bc_hdr *hdr;
+ int i;
+ u32 num_regs, bc_pay_sz;
+ u16 remainder;
+ u8 cmd_op, num_frags, t_num_frags;
+
+ bc_pay_sz = QLC_BC_PAYLOAD_SZ;
+ if (msg_type == QLC_BC_COMMAND) {
+ trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
+ trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
+ num_regs = cmd->req.num;
+ trans->req_pay_size = (num_regs * 4);
+ num_regs = cmd->rsp.num;
+ trans->rsp_pay_size = (num_regs * 4);
+ cmd_op = cmd->req.arg[0] & 0xff;
+ remainder = (trans->req_pay_size) % (bc_pay_sz);
+ num_frags = (trans->req_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ t_num_frags = num_frags;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
+ return -ENOMEM;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
+ return -ENOMEM;
+ num_frags = t_num_frags;
+ hdr = trans->req_hdr;
+ } else {
+ cmd->req.arg = (u32 *)trans->req_pay;
+ cmd->rsp.arg = (u32 *)trans->rsp_pay;
+ cmd_op = cmd->req.arg[0] & 0xff;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ cmd->req.num = trans->req_pay_size / 4;
+ cmd->rsp.num = trans->rsp_pay_size / 4;
+ hdr = trans->rsp_hdr;
+ }
+
+ trans->trans_id = seq;
+ trans->cmd_id = cmd_op;
+ for (i = 0; i < num_frags; i++) {
+ hdr[i].version = 2;
+ hdr[i].msg_type = msg_type;
+ hdr[i].op_type = cmd->op_type;
+ hdr[i].num_cmds = 1;
+ hdr[i].num_frags = num_frags;
+ hdr[i].frag_num = i + 1;
+ hdr[i].cmd_op = cmd_op;
+ hdr[i].seq_id = seq;
+ }
+ return 0;
+}
+
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
+{
+ if (!trans)
+ return;
+ kfree(trans->req_hdr);
+ kfree(trans->rsp_hdr);
+ kfree(trans);
+}
+
+static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_trans_list *t_list;
+ unsigned long flags;
+ int ret = 0;
+
+ if (type == QLC_BC_RESPONSE) {
+ t_list = &vf->rcv_act;
+ spin_lock_irqsave(&t_list->lock, flags);
+ t_list->count--;
+ list_del(&trans->list);
+ if (t_list->count > 0)
+ ret = 1;
+ spin_unlock_irqrestore(&t_list->lock, flags);
+ }
+ if (type == QLC_BC_COMMAND) {
+ while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ msleep(100);
+ vf->send_cmd = NULL;
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+ }
+ return ret;
+}
+
+static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ work_func_t func)
+{
+ if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+ vf->adapter->need_fw_reset)
+ return;
+
+ INIT_WORK(&vf->trans_work, func);
+ queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
+}
+
+static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
+{
+ struct completion *cmpl = &trans->resp_cmpl;
+
+ if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
+ trans->trans_state = QLC_END;
+ else
+ trans->trans_state = QLC_ABORT;
+
+ return;
+}
+
+static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ if (type == QLC_BC_RESPONSE) {
+ trans->curr_rsp_frag++;
+ if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_END;
+ } else {
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag < trans->req_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_WAIT_FOR_RESP;
+ }
+}
+
+static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct completion *cmpl = &vf->ch_free_cmpl;
+
+ if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
+ trans->trans_state = QLC_ABORT;
+ return;
+ }
+
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ qlcnic_sriov_handle_multi_frags(trans, type);
+}
+
+static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
+ u32 *hdr, u32 *pay, u32 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 fw_mbx;
+ u8 i, max = 2, hdr_size, j;
+
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ max = (size / sizeof(u32)) + hdr_size;
+
+ fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
+ for (; j < max; i++, j++)
+ *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
+}
+
+static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
+{
+ int ret = -EBUSY;
+ u32 timeout = 10000;
+
+ do {
+ if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
+ ret = 0;
+ break;
+ }
+ mdelay(1);
+ } while (--timeout);
+
+ return ret;
+}
+
+static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u32 pay_size, hdr_size;
+ u32 *hdr, *pay;
+ int ret;
+ u8 pci_func = trans->func_id;
+
+ if (__qlcnic_sriov_issue_bc_post(vf))
+ return -EBUSY;
+
+ if (type == QLC_BC_COMMAND) {
+ hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
+ pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ pay_size = (pay_size / sizeof(u32));
+ } else {
+ hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
+ pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ pay_size = (pay_size / sizeof(u32));
+ }
+
+ ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
+ pci_func, pay_size);
+ return ret;
+}
+
+static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf, u8 type)
+{
+ bool flag = true;
+ int err = -EIO;
+
+ while (flag) {
+ if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+ vf->adapter->need_fw_reset)
+ trans->trans_state = QLC_ABORT;
+
+ switch (trans->trans_state) {
+ case QLC_INIT:
+ trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
+ if (qlcnic_sriov_issue_bc_post(trans, type))
+ trans->trans_state = QLC_ABORT;
+ break;
+ case QLC_WAIT_FOR_CHANNEL_FREE:
+ qlcnic_sriov_wait_for_channel_free(trans, type);
+ break;
+ case QLC_WAIT_FOR_RESP:
+ qlcnic_sriov_wait_for_resp(trans);
+ break;
+ case QLC_END:
+ err = 0;
+ flag = false;
+ break;
+ case QLC_ABORT:
+ err = -EIO;
+ flag = false;
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ break;
+ default:
+ err = -EIO;
+ flag = false;
+ }
+ }
+ return err;
+}
+
+static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans, int pci_func)
+{
+ struct qlcnic_vf_info *vf;
+ int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return -EIO;
+
+ vf = &adapter->ahw->sriov->vf_info[index];
+ trans->vf = vf;
+ trans->func_id = pci_func;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
+ if (qlcnic_sriov_pf_check(adapter))
+ return -EIO;
+ if (qlcnic_sriov_vf_check(adapter) &&
+ trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return -EIO;
+ }
+
+ mutex_lock(&vf->send_cmd_lock);
+ vf->send_cmd = trans;
+ err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
+ qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
+ mutex_unlock(&vf->send_cmd_lock);
+ return err;
+}
+
+static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+#ifdef CONFIG_QLCNIC_SRIOV
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
+ return;
+ }
+#endif
+ cmd->rsp.arg[0] |= (0x9 << 25);
+ return;
+}
+
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
+{
+ struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
+ trans_work);
+ struct qlcnic_bc_trans *trans = NULL;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u8 req;
+
+ if (adapter->need_fw_reset)
+ return;
+
+ if (test_bit(QLC_BC_VF_FLR, &vf->state))
+ return;
+
+ trans = list_first_entry(&vf->rcv_act.wait_list,
+ struct qlcnic_bc_trans, list);
+ adapter = vf->adapter;
+
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
+ QLC_BC_RESPONSE))
+ goto cleanup_trans;
+
+ __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
+ trans->trans_state = QLC_INIT;
+ __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
+
+cleanup_trans:
+ qlcnic_free_mbx_args(&cmd);
+ req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
+ qlcnic_sriov_cleanup_transaction(trans);
+ if (req)
+ qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+}
+
+static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ u32 pay_size;
+
+ if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ return;
+
+ trans = vf->send_cmd;
+
+ if (trans == NULL)
+ goto clear_send;
+
+ if (trans->trans_id != hdr->seq_id)
+ goto clear_send;
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
+ (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
+ pay_size);
+ if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ goto clear_send;
+
+ complete(&trans->resp_cmpl);
+
+clear_send:
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+}
+
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+ t_list->count++;
+ list_add_tail(&trans->list, &t_list->wait_list);
+ if (t_list->count == 1)
+ qlcnic_sriov_schedule_bc_cmd(sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+ return 0;
+}
+
+static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+ spin_lock(&t_list->lock);
+
+ __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+ spin_unlock(&t_list->lock);
+ return 0;
+}
+
+static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_hdr *hdr)
+{
+ struct qlcnic_bc_trans *trans = NULL;
+ struct list_head *node;
+ u32 pay_size, curr_frag;
+ u8 found = 0, active = 0;
+
+ spin_lock(&vf->rcv_pend.lock);
+ if (vf->rcv_pend.count > 0) {
+ list_for_each(node, &vf->rcv_pend.wait_list) {
+ trans = list_entry(node, struct qlcnic_bc_trans, list);
+ if (trans->trans_id == hdr->seq_id) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ curr_frag = trans->curr_req_frag;
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ curr_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + curr_frag),
+ (u32 *)(trans->req_pay + curr_frag),
+ pay_size);
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag >= hdr->num_frags) {
+ vf->rcv_pend.count--;
+ list_del(&trans->list);
+ active = 1;
+ }
+ }
+ spin_unlock(&vf->rcv_pend.lock);
+
+ if (active)
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans))
+ qlcnic_sriov_cleanup_transaction(trans);
+
+ return;
+}
+
+static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u32 pay_size;
+ int err;
+ u8 cmd_op;
+
+ if (adapter->need_fw_reset)
+ return;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
+ hdr->op_type != QLC_BC_CMD &&
+ hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return;
+
+ if (hdr->frag_num > 1) {
+ qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
+ return;
+ }
+
+ cmd_op = hdr->cmd_op;
+ if (qlcnic_sriov_alloc_bc_trans(&trans))
+ return;
+
+ if (hdr->op_type == QLC_BC_CMD)
+ err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
+ else
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
+
+ if (err) {
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ cmd.op_type = hdr->op_type;
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
+ QLC_BC_COMMAND)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + trans->curr_req_frag),
+ (u32 *)(trans->req_pay + trans->curr_req_frag),
+ pay_size);
+ trans->func_id = vf->pci_func;
+ trans->vf = vf;
+ trans->trans_id = hdr->seq_id;
+ trans->curr_req_frag++;
+
+ if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
+ return;
+
+ if (trans->curr_req_frag == trans->req_hdr->num_frags) {
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ }
+ } else {
+ spin_lock(&vf->rcv_pend.lock);
+ list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
+ vf->rcv_pend.count++;
+ spin_unlock(&vf->rcv_pend.lock);
+ }
+}
+
+static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_hdr hdr;
+ u32 *ptr = (u32 *)&hdr;
+ u8 msg_type, i;
+
+ for (i = 2; i < 6; i++)
+ ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
+ msg_type = hdr.msg_type;
+
+ switch (msg_type) {
+ case QLC_BC_COMMAND:
+ qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
+ break;
+ case QLC_BC_RESPONSE:
+ qlcnic_sriov_handle_bc_resp(&hdr, vf);
+ break;
+ }
+}
+
+static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_handle_flr(sriov, vf);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Invalid event to VF. VF should not get FLR event\n");
+}
+
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
+{
+ struct qlcnic_vf_info *vf;
+ struct qlcnic_sriov *sriov;
+ int index;
+ u8 pci_func;
+
+ sriov = adapter->ahw->sriov;
+ pci_func = qlcnic_sriov_target_func_id(event);
+ index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return;
+
+ vf = &sriov->vf_info[index];
+ vf->pci_func = pci_func;
+
+ if (qlcnic_sriov_channel_free_check(event))
+ complete(&vf->ch_free_cmpl);
+
+ if (qlcnic_sriov_flr_check(event)) {
+ qlcnic_sriov_handle_flr_event(sriov, vf);
+ return;
+ }
+
+ if (qlcnic_sriov_bc_msg_check(event))
+ qlcnic_sriov_handle_msg_event(sriov, vf);
+}
+
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
+ return -ENOMEM;
+
+ if (enable)
+ cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+
+ err = qlcnic_83xx_mbx_op(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s bc events, err=%d\n",
+ (enable ? "enable" : "disable"), err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans)
+{
+ u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
+ u32 state;
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (state == QLC_83XX_IDC_DEV_READY) {
+ msleep(20);
+ clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
+ trans->trans_state = QLC_INIT;
+ if (++adapter->fw_fail_cnt > max)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_bc_trans *trans;
+ int err;
+ u32 rsp_data, opcode, mbx_err_code, rsp;
+ u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
+ u8 func = ahw->pci_func;
+
+ rsp = qlcnic_sriov_alloc_bc_trans(&trans);
+ if (rsp)
+ return rsp;
+
+ rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
+ if (rsp)
+ goto cleanup_transaction;
+
+retry:
+ if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ rsp = -EIO;
+ QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
+ QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
+ goto err_out;
+ }
+
+ err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
+ if (err) {
+ dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
+ (cmd->req.arg[0] & 0xffff), func);
+ rsp = QLCNIC_RCODE_TIMEOUT;
+
+ /* After adapter reset PF driver may take some time to
+ * respond to VF's request. Retry request till maximum retries.
+ */
+ if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+ !qlcnic_sriov_retry_bc_cmd(adapter, trans))
+ goto retry;
+
+ goto err_out;
+ }
+
+ rsp_data = cmd->rsp.arg[0];
+ mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
+ opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
+
+ if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
+ (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ rsp = mbx_err_code;
+ if (!rsp)
+ rsp = 1;
+ dev_err(dev,
+ "MBX command 0x%x failed with err:0x%x for VF %d\n",
+ opcode, mbx_err_code, func);
+ }
+
+err_out:
+ if (rsp == QLCNIC_RCODE_TIMEOUT) {
+ ahw->reset_context = 1;
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ }
+
+cleanup_transaction:
+ qlcnic_sriov_cleanup_transaction(trans);
+ return rsp;
+}
+
+int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
+ int ret;
+
+ if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
+ return -ENOMEM;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
+ ret);
+ goto out;
+ }
+
+ cmd_op = (cmd.rsp.arg[0] & 0xff);
+ if (cmd.rsp.arg[0] >> 25 == 2)
+ return 2;
+ if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_mac_list_s *cur;
+ struct list_head *head, tmp_list;
+
+ INIT_LIST_HEAD(&tmp_list);
+ head = &adapter->vf_mc_list;
+ netif_addr_lock_bh(netdev);
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ list_move(&cur->list, &tmp_list);
+ }
+
+ netif_addr_unlock_bh(netdev);
+
+ while (!list_empty(&tmp_list)) {
+ cur = list_entry((&tmp_list)->next,
+ struct qlcnic_mac_list_s, list);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
+{
+ struct list_head *head = &bc->async_list;
+ struct qlcnic_async_work_list *entry;
+
+ while (!list_empty(head)) {
+ entry = list_entry(head->next, struct qlcnic_async_work_list,
+ list);
+ cancel_work_sync(&entry->work);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ u16 vlan;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ vlan = adapter->ahw->sriov->vlan;
+ __qlcnic_set_multi(netdev, vlan);
+}
+
+static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
+{
+ struct qlcnic_async_work_list *entry;
+ struct net_device *netdev;
+
+ entry = container_of(work, struct qlcnic_async_work_list, work);
+ netdev = (struct net_device *)entry->ptr;
+
+ qlcnic_sriov_vf_set_multi(netdev);
+ return;
+}
+
+static struct qlcnic_async_work_list *
+qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
+{
+ struct list_head *node;
+ struct qlcnic_async_work_list *entry = NULL;
+ u8 empty = 0;
+
+ list_for_each(node, &bc->async_list) {
+ entry = list_entry(node, struct qlcnic_async_work_list, list);
+ if (!work_pending(&entry->work)) {
+ empty = 1;
+ break;
+ }
+ }
+
+ if (!empty) {
+ entry = kzalloc(sizeof(struct qlcnic_async_work_list),
+ GFP_ATOMIC);
+ if (entry == NULL)
+ return NULL;
+ list_add_tail(&entry->list, &bc->async_list);
+ }
+
+ return entry;
+}
+
+static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
+ work_func_t func, void *data)
+{
+ struct qlcnic_async_work_list *entry = NULL;
+
+ entry = qlcnic_sriov_get_free_node_async_work(bc);
+ if (!entry)
+ return;
+
+ entry->ptr = data;
+ INIT_WORK(&entry->work, func);
+ queue_work(bc->bc_async_wq, &entry->work);
+}
+
+void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
+{
+
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+
+ if (adapter->need_fw_reset)
+ return;
+
+ qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
+ netdev);
+}
+
+static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ qlcnic_83xx_enable_mbx_intrpt(adapter);
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (err)
+ goto err_out_cleanup_bc_intr;
+
+ err = qlcnic_sriov_vf_init_driver(adapter);
+ if (err)
+ goto err_out_term_channel;
+
+ return 0;
+
+err_out_term_channel:
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_cleanup_bc_intr:
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ return err;
+}
+
+static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (!qlcnic_up(adapter, netdev))
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+}
+
+static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
+ struct net_device *netdev = adapter->netdev;
+ u8 i, max_ints = ahw->num_msix - 1;
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ for (i = 0; i < max_ints; i++) {
+ intr_tbl[i].id = i;
+ intr_tbl[i].enabled = 0;
+ intr_tbl[i].src = 0;
+ }
+ ahw->reset_context = 0;
+}
+
+static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u8 func = ahw->pci_func;
+ u32 state;
+
+ if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+ qlcnic_sriov_vf_attach(adapter);
+ adapter->fw_fail_cnt = 0;
+ dev_info(dev,
+ "%s: Reinitalization of VF 0x%x done after FW reset\n",
+ __func__, func);
+ } else {
+ dev_err(dev,
+ "%s: Reinitialization of VF 0x%x failed after FW reset\n",
+ __func__, func);
+ state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+ dev_info(dev, "Current state 0x%x after FW reset\n",
+ state);
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u8 func = ahw->pci_func;
+ u32 state;
+
+ adapter->reset_ctx_cnt++;
+
+ /* Skip the context reset and check if FW is hung */
+ if (adapter->reset_ctx_cnt < 3) {
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ dev_info(dev,
+ "Resetting context, wait here to check if FW is in failed state\n");
+ return 0;
+ }
+
+ /* Check if number of resets exceed the threshold.
+ * If it exceeds the threshold just fail the VF.
+ */
+ if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
+ clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ adapter->tx_timeo_cnt = 0;
+ adapter->fw_fail_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ qlcnic_sriov_vf_detach(adapter);
+ dev_err(dev,
+ "Device context resets have exceeded the threshold, device interface will be shutdown\n");
+ return -EIO;
+ }
+
+ dev_info(dev, "Resetting context of VF 0x%x\n", func);
+ dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
+ __func__, adapter->reset_ctx_cnt, func);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ qlcnic_sriov_vf_detach(adapter);
+ adapter->need_fw_reset = 0;
+
+ if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+ qlcnic_sriov_vf_attach(adapter);
+ adapter->netdev->trans_start = jiffies;
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ adapter->fw_fail_cnt = 0;
+ dev_info(dev, "Done resetting context for VF 0x%x\n", func);
+ } else {
+ dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
+ __func__, func);
+ state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+ dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
+ ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
+ else if (ahw->reset_context)
+ ret = qlcnic_sriov_vf_handle_context_reset(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+ dev_err(&adapter->pdev->dev, "Device is in failed state\n");
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
+ qlcnic_sriov_vf_detach(adapter);
+
+ clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return -EIO;
+}
+
+static int
+qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+ dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ qlcnic_sriov_vf_detach(adapter);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+ u8 func = adapter->ahw->pci_func;
+
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware hang detected by VF 0x%x\n", func);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ qlcnic_sriov_vf_detach(adapter);
+ }
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
+ return 0;
+}
+
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlc_83xx_idc *idc;
+ int ret = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ idc = &adapter->ahw->idc;
+ idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ switch (idc->curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ ret = qlcnic_sriov_vf_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ case QLC_83XX_IDC_DEV_INIT:
+ ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ ret = qlcnic_sriov_vf_idc_failed_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ break;
+ default:
+ ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
+ }
+
+ idc->prev_state = idc->curr_state;
+ if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ idc->delay);
+}
+
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ msleep(20);
+
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
+ u16 vid, u8 enable)
+{
+ u16 vlan = sriov->vlan;
+ u8 allowed = 0;
+ int i;
+
+ if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
+ return -EINVAL;
+
+ if (enable) {
+ if (vlan)
+ return -EINVAL;
+
+ if (sriov->any_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (sriov->allowed_vlans[i] == vid)
+ allowed = 1;
+ }
+
+ if (!allowed)
+ return -EINVAL;
+ }
+ } else {
+ if (!vlan || vlan != vid)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
+ u16 vid, u8 enable)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_cmd_args cmd;
+ int ret;
+
+ if (vid == 0)
+ return 0;
+
+ ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
+ QLCNIC_BC_CMD_CFG_GUEST_VLAN);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = (enable & 1) | vid << 16;
+
+ qlcnic_sriov_cleanup_async_list(&sriov->bc);
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure guest VLAN, err=%d\n", ret);
+ } else {
+ qlcnic_free_mac_list(adapter);
+
+ if (enable)
+ sriov->vlan = vid;
+ else
+ sriov->vlan = 0;
+
+ qlcnic_sriov_vf_set_multi(adapter->netdev);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_list_s *cur;
+ u16 vlan;
+
+ vlan = adapter->ahw->sriov->vlan;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ vlan, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
new file mode 100644
index 000000000000..c81be2da119b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -0,0 +1,1780 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include <linux/types.h>
+
+#define QLCNIC_SRIOV_VF_MAX_MAC 1
+#define QLC_VF_MIN_TX_RATE 100
+#define QLC_VF_MAX_TX_RATE 9999
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
+
+struct qlcnic_sriov_cmd_handler {
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+struct qlcnic_sriov_fw_cmd_handler {
+ u32 cmd;
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info,
+ u16 vport_id)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = (vport_id << 16) | 0x1;
+ cmd.req.arg[2] = npar_info->bit_offsets;
+ cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
+ cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
+ cmd.req.arg[4] = npar_info->max_tx_mac_filters;
+ cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
+ cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
+ (npar_info->max_rx_ip_addr << 16);
+ cmd.req.arg[6] = npar_info->max_rx_lro_flow |
+ (npar_info->max_rx_status_rings << 16);
+ cmd.req.arg[7] = npar_info->max_rx_buf_rings |
+ (npar_info->max_rx_ques << 16);
+ cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
+ cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
+ cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to set vport info, err=%d\n", err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u16 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_resources *res = &sriov->ff_max;
+ u32 temp, num_vf_macs, num_vfs, max;
+ int ret = -EIO, vpid, id;
+ struct qlcnic_vport *vp;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0)
+ return -EINVAL;
+
+ num_vfs = sriov->num_vfs;
+ max = num_vfs + 1;
+ info->bit_offsets = 0xffff;
+ info->max_tx_ques = res->num_tx_queues / max;
+ info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
+ num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
+
+ if (adapter->ahw->pci_func == func) {
+ temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
+ info->max_rx_ucast_mac_filters = temp;
+ temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
+ info->max_tx_mac_filters = temp;
+ info->min_tx_bw = 0;
+ info->max_tx_bw = MAX_BW;
+ } else {
+ id = qlcnic_sriov_func_to_index(adapter, func);
+ if (id < 0)
+ return id;
+ vp = sriov->vf_info[id].vp;
+ info->min_tx_bw = vp->min_tx_bw;
+ info->max_tx_bw = vp->max_tx_bw;
+ info->max_rx_ucast_mac_filters = num_vf_macs;
+ info->max_tx_mac_filters = num_vf_macs;
+ }
+
+ info->max_rx_ip_addr = res->num_destip / max;
+ info->max_rx_status_rings = res->num_rx_status_rings / max;
+ info->max_rx_buf_rings = res->num_rx_buf_rings / max;
+ info->max_rx_ques = res->num_rx_queues / max;
+ info->max_rx_lro_flow = res->num_lro_flows_supported / max;
+ info->max_tx_vlan_keys = res->num_txvlan_keys;
+ info->max_local_ipv6_addrs = res->max_local_ipv6_addrs;
+ info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs;
+
+ ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max;
+
+ ff_max->num_tx_mac_filters = info->max_tx_mac_filters;
+ ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters;
+ ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters;
+ ff_max->num_txvlan_keys = info->max_tx_vlan_keys;
+ ff_max->num_rx_queues = info->max_rx_ques;
+ ff_max->num_tx_queues = info->max_tx_ques;
+ ff_max->num_lro_flows_supported = info->max_rx_lro_flow;
+ ff_max->num_destip = info->max_rx_ip_addr;
+ ff_max->num_rx_buf_rings = info->max_rx_buf_rings;
+ ff_max->num_rx_status_rings = info->max_rx_status_rings;
+ ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs;
+ ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
+}
+
+static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = 0x2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get PF info, err=%d\n", err);
+ goto out;
+ }
+
+ npar_info->total_pf = cmd.rsp.arg[2] & 0xff;
+ npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff;
+ npar_info->max_vports = MSW(cmd.rsp.arg[2]);
+ npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]);
+ npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]);
+ npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]);
+ npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]);
+ npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
+ npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+
+ qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
+ dev_info(&adapter->pdev->dev,
+ "\n\ttotal_pf: %d,\n"
+ "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n"
+ "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+ "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+ "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+ "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+ "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n",
+ npar_info->total_pf, npar_info->total_rss_engines,
+ npar_info->max_vports, npar_info->max_tx_ques,
+ npar_info->max_tx_mac_filters,
+ npar_info->max_rx_mcast_mac_filters,
+ npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+ npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+ npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+ npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+ npar_info->max_remote_ipv6_addrs);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = 0;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = 0;
+ }
+}
+
+static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter,
+ u16 vport_handle, u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = vport_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = vport_handle;
+ }
+}
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ return sriov->vp_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index >= 0) {
+ vf_info = &sriov->vf_info[index];
+ return vf_info->vp->handle;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter,
+ u8 flag, u16 func)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret;
+ int vpid;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT))
+ return -ENOMEM;
+
+ if (flag) {
+ cmd.req.arg[3] = func << 8;
+ } else {
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
+ }
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed %s vport, err %d for func 0x%x\n",
+ (flag ? "enable" : "disable"), ret, func);
+ goto out;
+ }
+
+ if (flag) {
+ vpid = cmd.rsp.arg[2] & 0xffff;
+ qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func);
+ } else {
+ qlcnic_sriov_pf_reset_vport_handle(adapter, func);
+ }
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
+ u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = 0x4;
+ if (enable)
+ cmd.req.arg[1] |= BIT_16;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VLAN filtering, err=%d\n", err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
+ u8 func, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err = -EIO;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH))
+ return -ENOMEM;
+
+ cmd.req.arg[0] |= (3 << 29);
+ cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
+ if (enable)
+ cmd.req.arg[1] |= BIT_0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to enable sriov eswitch%d\n", err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_back_channel *bc = &sriov->bc;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++)
+ cancel_work_sync(&sriov->vf_info[i].flr_work);
+
+ destroy_workqueue(bc->bc_flr_wq);
+}
+
+static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+ struct workqueue_struct *wq;
+
+ wq = create_singlethread_workqueue("qlcnic-flr");
+ if (wq == NULL) {
+ dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n");
+ return -ENOMEM;
+ }
+
+ bc->bc_flr_wq = wq;
+ return 0;
+}
+
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter)
+{
+ u8 func = adapter->ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_pf_del_flr_queue(adapter);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+ qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+ __qlcnic_sriov_cleanup(adapter);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+}
+
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter)
+{
+ if (!qlcnic_sriov_pf_check(adapter))
+ return;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ pci_disable_sriov(adapter->pdev);
+ netdev_info(adapter->netdev,
+ "SR-IOV is disabled successfully on port %d\n",
+ adapter->portnum);
+}
+
+static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_sriov_pf_disable(adapter);
+
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ /* After disabling SRIOV re-init the driver in default mode
+ configure opmode based on op_mode of function
+ */
+ if (qlcnic_83xx_configure_opmode(adapter))
+ return -EIO;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info, pf_info, vp_info;
+ int err;
+ u8 func = ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
+ if (err)
+ goto disable_vlan_filtering;
+
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (err)
+ goto disable_eswitch;
+
+ err = qlcnic_sriov_get_pf_info(adapter, &pf_info);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto delete_vport;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->nic_mode = QLC_83XX_SRIOV_MODE;
+ return err;
+
+delete_vport:
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+
+disable_eswitch:
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+
+disable_vlan_filtering:
+ qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ int err;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (err)
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ return err;
+}
+
+static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
+ int num_vfs)
+{
+ int err = 0;
+
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+ err = qlcnic_sriov_init(adapter, num_vfs);
+ if (err)
+ goto clear_op_mode;
+
+ err = qlcnic_sriov_pf_create_flr_queue(adapter);
+ if (err)
+ goto sriov_cleanup;
+
+ err = qlcnic_sriov_pf_init(adapter);
+ if (err)
+ goto del_flr_queue;
+
+ err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+ return err;
+
+del_flr_queue:
+ qlcnic_sriov_pf_del_flr_queue(adapter);
+
+sriov_cleanup:
+ __qlcnic_sriov_cleanup(adapter);
+
+clear_op_mode:
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ return err;
+}
+
+static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(netdev,
+ "SR-IOV cannot be enabled, when legacy interrupts are enabled\n");
+ return -EIO;
+ }
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
+ if (err) {
+ netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+ adapter->portnum);
+
+ err = -EIO;
+ if (qlcnic_83xx_configure_opmode(adapter))
+ goto error;
+ } else {
+ netdev_info(netdev,
+ "SR-IOV is enabled successfully on port %d\n",
+ adapter->portnum);
+ /* Return number of vfs enabled */
+ err = num_vfs;
+ }
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+error:
+ return err;
+}
+
+int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(dev);
+ int err;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (num_vfs == 0)
+ err = qlcnic_pci_sriov_disable(adapter);
+ else
+ err = qlcnic_pci_sriov_enable(adapter, num_vfs);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_vport *vp;
+ int err, id;
+
+ id = qlcnic_sriov_func_to_index(adapter, func);
+ if (id < 0)
+ return id;
+
+ vp = adapter->ahw->sriov->vf_info[id].vp;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = 0x3 | func << 16;
+ if (vp->vlan_mode == QLC_PVID_MODE) {
+ cmd.req.arg[2] |= BIT_6;
+ cmd.req.arg[3] |= vp->vlan << 8;
+ }
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter,
+ u16 func)
+{
+ struct qlcnic_info defvp_info;
+ int err;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func);
+ if (err)
+ return -EIO;
+
+ err = qlcnic_sriov_set_vf_acl(adapter, func);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+ u16 func = vf->pci_func;
+
+ cmd->rsp.arg[0] = trans->req_hdr->cmd_op;
+ cmd->rsp.arg[0] |= (1 << 16);
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (!err) {
+ err = qlcnic_sriov_set_vf_vport_info(adapter, func);
+ if (err)
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+ } else {
+ err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+
+ if (err)
+ goto err_out;
+
+ cmd->rsp.arg[0] |= (1 << 25);
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+ return err;
+
+err_out:
+ cmd->rsp.arg[0] |= (2 << 25);
+ return err;
+}
+
+static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_vport *vp,
+ u16 func, u16 vlan, u8 op)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_macvlan_mbx mv;
+ u8 *addr;
+ int err;
+ u32 *buf;
+ int vpid;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
+ return -ENOMEM;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (vlan)
+ op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
+
+ cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
+ cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+
+ addr = vp->mac;
+ mv.vlan = vlan;
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
+ buf = &cmd.req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "MAC-VLAN %s to CAM failed, err=%d.\n",
+ ((op == 1) ? "add " : "delete "), err);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ int err;
+ u16 vlan;
+
+ err = qlcnic_sriov_validate_create_rx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[6] = vf->vp->handle;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ vlan = vf->vp->vlan;
+ if (!err) {
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
+ vf->rx_ctx_id = mbx_out->ctx_id;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
+ vlan, QLCNIC_MAC_ADD);
+ } else {
+ vf->rx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u8 type, *mac;
+
+ type = cmd->req.arg[1];
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ cmd->rsp.arg[0] = (2 << 25);
+ break;
+ case QLCNIC_GET_CURRENT_MAC:
+ cmd->rsp.arg[0] = (1 << 25);
+ mac = vf->vp->mac;
+ cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00);
+ cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) |
+ ((mac[3]) << 16 & 0xff0000) |
+ ((mac[2]) << 24 & 0xff000000);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ int err;
+
+ err = qlcnic_sriov_validate_create_tx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[5] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err) {
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2];
+ vf->tx_ctx_id = mbx_out->ctx_id;
+ } else {
+ vf->tx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+ u16 vlan;
+
+ err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ vlan = vf->vp->vlan;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
+ vlan, QLCNIC_MAC_DEL);
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->rx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->tx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_lro(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err = -EIO;
+ u8 op;
+
+ op = cmd->req.arg[1] & 0xff;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
+ return -EINVAL;
+
+ if (!(cmd->req.arg[1] & BIT_16))
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xff) != 0x1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ if (cmd->req.arg[2] > adapter->ahw->max_mtu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_mtu(adapter, vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] & BIT_31) {
+ if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
+ return -EINVAL;
+ } else {
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_get_nic_info(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_rss(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u16 ctx_id, pkts, time;
+
+ ctx_id = cmd->req.arg[1] >> 16;
+ pkts = cmd->req.arg[2] & 0xffff;
+ time = cmd->req.arg[2] >> 16;
+
+ if (ctx_id != vf->rx_ctx_id)
+ return -EINVAL;
+ if (pkts > coal->rx_packets)
+ return -EINVAL;
+ if (time < coal->rx_time_us)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_macvlan_mbx *macvlan;
+ struct qlcnic_vport *vp = vf->vp;
+ u8 op, new_op;
+
+ if (!(cmd->req.arg[1] & BIT_8))
+ return -EINVAL;
+
+ cmd->req.arg[1] |= (vf->vp->handle << 16);
+ cmd->req.arg[1] |= BIT_31;
+
+ macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
+ if (!(macvlan->mac_addr0 & BIT_0)) {
+ dev_err(&adapter->pdev->dev,
+ "MAC address change is not allowed from VF %d",
+ vf->pci_func);
+ return -EINVAL;
+ }
+
+ if (vp->vlan_mode == QLC_PVID_MODE) {
+ op = cmd->req.arg[1] & 0x7;
+ cmd->req.arg[1] &= ~0x7;
+ new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+ cmd->req.arg[3] |= vp->vlan << 16;
+ cmd->req.arg[1] |= new_op;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ if (!(cmd->req.arg[1] & BIT_8))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_linkevent(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_vport *vp = vf->vp;
+ u8 cmd_op, mode = vp->vlan_mode;
+
+ cmd_op = trans->req_hdr->cmd_op;
+ cmd->rsp.arg[0] = (cmd_op & 0xffff) | 14 << 16 | 1 << 25;
+
+ switch (mode) {
+ case QLC_GUEST_VLAN_MODE:
+ cmd->rsp.arg[1] = mode | 1 << 8;
+ cmd->rsp.arg[2] = 1 << 16;
+ break;
+ case QLC_PVID_MODE:
+ cmd->rsp.arg[1] = mode | 1 << 8 | vp->vlan << 16;
+ break;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf)
+
+{
+ struct qlcnic_vport *vp = vf->vp;
+
+ if (!vp->vlan)
+ return -EINVAL;
+
+ if (!vf->rx_ctx_id) {
+ vp->vlan = 0;
+ return 0;
+ }
+
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
+ vp->vlan, QLCNIC_MAC_DEL);
+ vp->vlan = 0;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
+ 0, QLCNIC_MAC_ADD);
+ return 0;
+}
+
+static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vport *vp = vf->vp;
+ int err = -EIO;
+
+ if (vp->vlan)
+ return err;
+
+ if (!vf->rx_ctx_id) {
+ vp->vlan = cmd->req.arg[1] >> 16;
+ return 0;
+ }
+
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
+ 0, QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+
+ vp->vlan = cmd->req.arg[1] >> 16;
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
+ vp->vlan, QLCNIC_MAC_ADD);
+
+ if (err) {
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vp, vf->pci_func,
+ 0, QLCNIC_MAC_ADD);
+ vp->vlan = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_vport *vp = vf->vp;
+ int err = -EIO;
+ u8 op;
+
+ if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) {
+ cmd->rsp.arg[0] |= 2 << 25;
+ return err;
+ }
+
+ op = cmd->req.arg[1] & 0xf;
+
+ if (op)
+ err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
+ else
+ err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf);
+
+ cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
+ return err;
+}
+
+static const int qlcnic_pf_passthru_supp_cmds[] = {
+ QLCNIC_CMD_GET_STATISTICS,
+ QLCNIC_CMD_GET_PORT_CONFIG,
+ QLCNIC_CMD_GET_LINK_STATUS,
+};
+
+static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
+ [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+ [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+ [QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd},
+ [QLCNIC_BC_CMD_CFG_GUEST_VLAN] = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd},
+};
+
+static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd},
+ {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd},
+ {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd},
+ {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd},
+ {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd},
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd},
+ {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd},
+ {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd},
+ {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd},
+ {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd},
+ {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd},
+};
+
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 size, cmd_op;
+
+ cmd_op = trans->req_hdr->cmd_op;
+
+ if (trans->req_hdr->op_type == QLC_BC_CMD) {
+ size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr);
+ if (cmd_op < size) {
+ qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd);
+ return;
+ }
+ } else {
+ int i;
+ size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) {
+ qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd);
+ return;
+ }
+ }
+
+ size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) {
+ qlcnic_issue_cmd(adapter, cmd);
+ return;
+ }
+ }
+ }
+
+ cmd->rsp.arg[0] |= (0x9 << 25);
+}
+
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid;
+}
+
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ int vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_cmd_args cmd;
+ int vpid;
+
+ if (!vf->rx_ctx_id)
+ return;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+ return;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid >= 0) {
+ cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to delete Tx ctx in firmware for func 0x%x\n",
+ vf->pci_func);
+ else
+ vf->rx_ctx_id = 0;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_cmd_args cmd;
+ int vpid;
+
+ if (!vf->tx_ctx_id)
+ return;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+ return;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid >= 0) {
+ cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to delete Tx ctx in firmware for func 0x%x\n",
+ vf->pci_func);
+ else
+ vf->tx_ctx_id = 0;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+ unsigned long flag;
+
+ spin_lock_irqsave(&t_list->lock, flag);
+
+ __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+ spin_unlock_irqrestore(&t_list->lock, flag);
+ return 0;
+}
+
+static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+
+ qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+ cancel_work_sync(&vf->trans_work);
+ qlcnic_sriov_cleanup_list(&vf->rcv_act);
+
+ if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+ qlcnic_sriov_del_tx_ctx(adapter, vf);
+ qlcnic_sriov_del_rx_ctx(adapter, vf);
+ }
+
+ qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func);
+
+ clear_bit(QLC_BC_VF_FLR, &vf->state);
+ if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+ qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf,
+ vf->flr_trans);
+ clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+ vf->flr_trans = NULL;
+ }
+}
+
+static void qlcnic_sriov_pf_process_flr(struct work_struct *work)
+{
+ struct qlcnic_vf_info *vf;
+
+ vf = container_of(work, struct qlcnic_vf_info, flr_work);
+ __qlcnic_sriov_process_flr(vf);
+ return;
+}
+
+static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ work_func_t func)
+{
+ if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state))
+ return;
+
+ INIT_WORK(&vf->flr_work, func);
+ queue_work(sriov->bc.bc_flr_wq, &vf->flr_work);
+}
+
+static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+
+ set_bit(QLC_BC_VF_FLR, &vf->state);
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+ set_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+ vf->flr_trans = trans;
+ qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+ netdev_info(adapter->netdev, "Software FLR for PCI func %d\n",
+ vf->pci_func);
+}
+
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_hdr *hdr = trans->req_hdr;
+
+ if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+ (hdr->op_type == QLC_BC_CMD) &&
+ test_bit(QLC_BC_VF_STATE, &vf->state)) {
+ qlcnic_sriov_handle_soft_flr(adapter, trans, vf);
+ return true;
+ }
+
+ return false;
+}
+
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct net_device *dev = vf->adapter->netdev;
+
+ if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
+ clear_bit(QLC_BC_VF_FLR, &vf->state);
+ return;
+ }
+
+ if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) {
+ netdev_info(dev, "FLR for PCI func %d in progress\n",
+ vf->pci_func);
+ return;
+ }
+
+ qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+ netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
+}
+
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_sriov *sriov = ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ u16 num_vfs = sriov->num_vfs;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->rx_ctx_id = 0;
+ vf->tx_ctx_id = 0;
+ cancel_work_sync(&vf->flr_work);
+ __qlcnic_sriov_process_flr(vf);
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+ }
+
+ qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func);
+ QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8);
+}
+
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+ err = qlcnic_sriov_pf_init(adapter);
+ if (err)
+ return err;
+
+ dev_info(&adapter->pdev->dev, "%s: op_mode %d\n",
+ __func__, ahw->op_mode);
+ return err;
+}
+
+int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int i, num_vfs = sriov->num_vfs;
+ struct qlcnic_vf_info *vf_info;
+ u8 *curr_mac;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (!is_valid_ether_addr(mac) || vf >= num_vfs)
+ return -EINVAL;
+
+ if (!compare_ether_addr(adapter->mac_addr, mac)) {
+ netdev_err(netdev, "MAC address is already in use by the PF\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_vfs; i++) {
+ vf_info = &sriov->vf_info[i];
+ if (!compare_ether_addr(vf_info->vp->mac, mac)) {
+ netdev_err(netdev,
+ "MAC address is already in use by VF %d\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ vf_info = &sriov->vf_info[vf];
+ curr_mac = vf_info->vp->mac;
+
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ memcpy(curr_mac, mac, netdev->addr_len);
+ netdev_info(netdev, "MAC Address %pM is configured for VF %d\n",
+ mac, vf);
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_info nic_info;
+ struct qlcnic_vport *vp;
+ u16 vpid;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ if (tx_rate >= 10000 || tx_rate < 100) {
+ netdev_err(netdev,
+ "Invalid Tx rate, allowed range is [%d - %d]",
+ QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE);
+ return -EINVAL;
+ }
+
+ if (tx_rate == 0)
+ tx_rate = 10000;
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ vpid = vp->handle;
+
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
+ return -EIO;
+
+ nic_info.max_tx_bw = tx_rate / 100;
+ nic_info.bit_offsets = BIT_0;
+
+ if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
+ return -EIO;
+ }
+
+ vp->max_tx_bw = tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ tx_rate, vp->max_tx_bw, vf);
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
+ u16 vlan, u8 qos)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs || qos > 7)
+ return -EINVAL;
+
+ if (vlan > MAX_VLAN_ID) {
+ netdev_err(netdev,
+ "Invalid VLAN ID, allowed range is [0 - %d]\n",
+ MAX_VLAN_ID);
+ return -EINVAL;
+ }
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ switch (vlan) {
+ case 4095:
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
+ break;
+ case 0:
+ vp->vlan_mode = QLC_NO_VLAN_MODE;
+ vp->vlan = 0;
+ vp->qos = 0;
+ break;
+ default:
+ vp->vlan_mode = QLC_PVID_MODE;
+ vp->vlan = vlan;
+ vp->qos = qos;
+ }
+
+ netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
+ vlan, qos, vf);
+ return 0;
+}
+
+int qlcnic_sriov_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vp = sriov->vf_info[vf].vp;
+ memcpy(&ivi->mac, vp->mac, ETH_ALEN);
+ ivi->vlan = vp->vlan;
+ ivi->qos = vp->qos;
+ if (vp->max_tx_bw == MAX_BW)
+ ivi->tx_rate = 0;
+ else
+ ivi->tx_rate = vp->max_tx_bw * 100;
+
+ ivi->vf = vf;
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 987fb6f8adc3..4e22e794a186 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -21,8 +21,6 @@
#include <linux/aer.h>
#include <linux/log2.h>
-#include <linux/sysfs.h>
-
#define QLC_STATUS_UNSUPPORTED_CMD -2
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
@@ -200,10 +198,10 @@ beacon_err:
}
err = qlcnic_config_led(adapter, b_state, b_rate);
- if (!err)
+ if (!err) {
err = len;
- else
ahw->beacon_state = b_state;
+ }
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
@@ -886,6 +884,244 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return size;
}
+static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ unsigned char *p_read_buf;
+ int ret, count;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!size)
+ return QL_STATUS_INVALID_PARAM;
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ count = size / sizeof(u32);
+
+ if (size % sizeof(u32))
+ count++;
+
+ p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_read_buf)
+ return -ENOMEM;
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_read_buf);
+ return -EIO;
+ }
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
+ count);
+
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ kfree(p_read_buf);
+ return ret;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+ memcpy(buf, p_read_buf, size);
+ kfree(p_read_buf);
+
+ return size;
+}
+
+static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
+ ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
+ (u32 *)p_src,
+ QLC_83XX_FLASH_WRITE_MAX);
+
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset, size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32);
+ offset = offset + sizeof(u32);
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int ret;
+ static int flash_mode;
+ unsigned long data;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = kstrtoul(buf, 16, &data);
+
+ switch (data) {
+ case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
+ flash_mode = QLC_83XX_ERASE_MODE;
+ ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ break;
+
+ case QLC_83XX_FLASH_BULK_WRITE_CMD:
+ flash_mode = QLC_83XX_BULK_WRITE_MODE;
+ break;
+
+ case QLC_83XX_FLASH_WRITE_CMD:
+ flash_mode = QLC_83XX_WRITE_MODE;
+ break;
+ default:
+ if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ }
+
+ if (flash_mode == QLC_83XX_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__,
+ __LINE__);
+ return -EIO;
+ }
+ }
+ }
+
+ return size;
+}
+
static struct device_attribute dev_attr_bridged_mode = {
.attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
.show = qlcnic_show_bridged_mode,
@@ -960,6 +1196,13 @@ static struct bin_attribute bin_attr_pm_config = {
.write = qlcnic_sysfs_write_pm_config,
};
+static struct bin_attribute bin_attr_flash = {
+ .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_83xx_sysfs_flash_read_handler,
+ .write = qlcnic_83xx_sysfs_flash_write_handler,
+};
+
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -1048,10 +1291,18 @@ void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
+
qlcnic_create_diag_entries(adapter);
+
+ if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
+ dev_info(dev, "failed to create flash sysfs entry\n");
}
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
+
qlcnic_remove_diag_entries(adapter);
+ sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index a131d7b5d2fe..7e8d68263963 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.31"
+#define DRV_VERSION "v1.00.00.32"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 6f316ab23257..0780e039b271 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev,
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->advertising = ADVERTISED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
STS_LINK_TYPE_10GBASET) {
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP;
+ ecmd->autoneg = AUTONEG_ENABLE;
} else {
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b13ab544a7eb..87463bc701a6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -409,7 +409,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(qdev->
func << CAM_OUT_FUNC_SHIFT) |
(0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
cam_output |= CAM_OUT_RV;
/* route to NIC core */
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
@@ -1211,8 +1211,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
- netif_err(qdev, probe, qdev->ndev,
- "Couldn't get an skb.\n");
rx_ring->sbq_clean_idx = clean_idx;
return;
}
@@ -1434,11 +1432,13 @@ map_error:
}
/* Categorizing receive firmware frame errors */
-static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
+ struct rx_ring *rx_ring)
{
struct nic_stats *stats = &qdev->nic_stats;
stats->rx_err_count++;
+ rx_ring->rx_errors++;
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
@@ -1474,6 +1474,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
napi->dev = qdev->ndev;
skb = napi_get_frags(napi);
@@ -1500,7 +1506,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, vlan_id);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
napi_gro_frags(napi);
}
@@ -1519,8 +1525,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get an skb, need to unwind!.\n");
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
@@ -1529,6 +1533,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ goto err_out;
+ }
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1578,7 +1588,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, vlan_id);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(napi, skb);
else
@@ -1605,8 +1615,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
if (new_skb == NULL) {
- netif_err(qdev, probe, qdev->ndev,
- "No skb available, drop the packet.\n");
rx_ring->rx_dropped++;
return;
}
@@ -1614,6 +1622,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
@@ -1676,7 +1691,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, vlan_id);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
@@ -1919,6 +1934,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
return;
}
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1981,7 +2003,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
- __vlan_hwaccel_put_tag(skb, vlan_id);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
@@ -2000,12 +2022,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
- return (unsigned long)length;
- }
-
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
@@ -2285,7 +2301,7 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
@@ -2300,10 +2316,10 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -2313,7 +2329,7 @@ static int qlge_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features);
return 0;
@@ -2332,7 +2348,7 @@ static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
return err;
}
-static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
@@ -2363,7 +2379,7 @@ static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
return err;
}
-static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
@@ -4671,9 +4687,9 @@ static int qlge_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN |
- NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
ndev->features = ndev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->vlan_features = ndev->hw_features;
if (test_bit(QL_DMA64, &qdev->flags))
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 5b4103db70f5..e9dc84943cfc 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -224,11 +224,14 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
break;
}
+ if (limit < 0)
+ return -ETIMEDOUT;
+
return ioread16(ioaddr + MMRD);
}
/* Write a word data from PHY Chip */
-static void r6040_phy_write(void __iomem *ioaddr,
+static int r6040_phy_write(void __iomem *ioaddr,
int phy_addr, int reg, u16 val)
{
int limit = MAC_DEF_TIMEOUT;
@@ -243,6 +246,8 @@ static void r6040_phy_write(void __iomem *ioaddr,
if (!(cmd & MDIO_WRITE))
break;
}
+
+ return (limit < 0) ? -ETIMEDOUT : 0;
}
static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
@@ -261,9 +266,7 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- r6040_phy_write(ioaddr, phy_addr, reg, value);
-
- return 0;
+ return r6040_phy_write(ioaddr, phy_addr, reg, value);
}
static int r6040_mdiobus_reset(struct mii_bus *bus)
@@ -347,7 +350,6 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
- netdev_err(dev, "failed to alloc skb for rx\n");
rc = -ENOMEM;
goto err_exit;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index b62a32484f6a..7d1fb9ad1296 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -431,7 +431,7 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
cp->dev->stats.rx_bytes += skb->len;
if (opts2 & RxVlanTagged)
- __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
napi_gro_receive(&cp->napi, skb);
}
@@ -1438,7 +1438,7 @@ static int cp_set_features(struct net_device *dev, netdev_features_t features)
else
cp->cpcmd &= ~RxChkSum;
- if (features & NETIF_F_HW_VLAN_RX)
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
cp->cpcmd |= RxVlanOn;
else
cp->cpcmd &= ~RxVlanOn;
@@ -1955,14 +1955,14 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &cp_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
/* disabled by default until verified */
dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 1276ac71353a..3ccedeb8aba0 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2041,8 +2041,6 @@ keep_pkt:
netif_receive_skb (skb);
} else {
- if (net_ratelimit())
- netdev_warn(dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
}
received++;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 9f2d416de750..d77d60ea8202 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -782,8 +782,6 @@ static void net_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
- dev->name);
dev->stats.rx_dropped++;
goto done;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 28fb50a1e9c3..79c520b64fdd 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -47,7 +47,9 @@
#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
-#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
+#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
+#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
+#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
#ifdef RTL8169_DEBUG
#define assert(expr) \
@@ -140,6 +142,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_39,
RTL_GIGA_MAC_VER_40,
RTL_GIGA_MAC_VER_41,
+ RTL_GIGA_MAC_VER_42,
+ RTL_GIGA_MAC_VER_43,
RTL_GIGA_MAC_NONE = 0xff,
};
@@ -262,10 +266,16 @@ static const struct {
_R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
JUMBO_1K, true),
[RTL_GIGA_MAC_VER_40] =
- _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
+ _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2,
JUMBO_9K, false),
[RTL_GIGA_MAC_VER_41] =
_R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_42] =
+ _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_43] =
+ _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2,
+ JUMBO_1K, true),
};
#undef _R
@@ -329,6 +339,7 @@ enum rtl_registers {
#define RXCFG_FIFO_SHIFT 13
/* No threshold before first PCI xfer */
#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
+#define RX_EARLY_OFF (1 << 11)
#define RXCFG_DMA_SHIFT 8
/* Unlimited maximum PCI burst. */
#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
@@ -513,6 +524,7 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
+ ClkReqEn = (1 << 7), /* Clock Request Enable */
MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -533,6 +545,7 @@ enum rtl_register_content {
Spi_en = (1 << 3),
LanWake = (1 << 1), /* LanWake enable/disable */
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
+ ASPM_en = (1 << 0), /* ASPM enable */
/* TBICSR p.28 */
TBIReset = 0x80000000,
@@ -814,7 +827,9 @@ MODULE_FIRMWARE(FIRMWARE_8168F_2);
MODULE_FIRMWARE(FIRMWARE_8402_1);
MODULE_FIRMWARE(FIRMWARE_8411_1);
MODULE_FIRMWARE(FIRMWARE_8106E_1);
-MODULE_FIRMWARE(FIRMWARE_8168G_1);
+MODULE_FIRMWARE(FIRMWARE_8106E_2);
+MODULE_FIRMWARE(FIRMWARE_8168G_2);
+MODULE_FIRMWARE(FIRMWARE_8168G_3);
static void rtl_lock_work(struct rtl8169_private *tp)
{
@@ -1024,14 +1039,6 @@ static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
(RTL_R32(GPHY_OCP) & 0xffff) : ~0;
}
-static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
-{
- int val;
-
- val = r8168_phy_ocp_read(tp, reg);
- r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
-}
-
static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -1077,6 +1084,21 @@ static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
}
+static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
+{
+ if (reg == 0x1f) {
+ tp->ocp_base = value << 4;
+ return;
+ }
+
+ r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
+}
+
+static int mac_mcu_read(struct rtl8169_private *tp, int reg)
+{
+ return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
+}
+
DECLARE_RTL_COND(rtl_phyar_cond)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -1771,16 +1793,17 @@ static void __rtl8169_set_features(struct net_device *dev,
netdev_features_t changed = features ^ dev->features;
void __iomem *ioaddr = tp->mmio_addr;
- if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
+ if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX)))
return;
- if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
+ if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_RXCSUM)
tp->cp_cmd |= RxChkSum;
else
tp->cp_cmd &= ~RxChkSum;
- if (dev->features & NETIF_F_HW_VLAN_RX)
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
tp->cp_cmd |= RxVlan;
else
tp->cp_cmd &= ~RxVlan;
@@ -1820,7 +1843,7 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
u32 opts2 = le32_to_cpu(desc->opts2);
if (opts2 & RxVlanTag)
- __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
}
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2028,6 +2051,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
int mac_version;
} mac_info[] = {
/* 8168G family. */
+ { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
{ 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
{ 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
@@ -2116,6 +2140,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
netif_notice(tp, probe, dev,
"unknown MAC, using family default\n");
tp->mac_version = default_version;
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
+ tp->mac_version = tp->mii.supports_gmii ?
+ RTL_GIGA_MAC_VER_42 :
+ RTL_GIGA_MAC_VER_43;
}
}
@@ -2142,9 +2170,7 @@ static void rtl_writephy_batch(struct rtl8169_private *tp,
#define PHY_DATA_OR 0x10000000
#define PHY_DATA_AND 0x20000000
#define PHY_BJMPN 0x30000000
-#define PHY_READ_EFUSE 0x40000000
-#define PHY_READ_MAC_BYTE 0x50000000
-#define PHY_WRITE_MAC_BYTE 0x60000000
+#define PHY_MDIO_CHG 0x40000000
#define PHY_CLEAR_READCOUNT 0x70000000
#define PHY_WRITE 0x80000000
#define PHY_READCOUNT_EQ_SKIP 0x90000000
@@ -2153,7 +2179,6 @@ static void rtl_writephy_batch(struct rtl8169_private *tp,
#define PHY_WRITE_PREVIOUS 0xc0000000
#define PHY_SKIPN 0xd0000000
#define PHY_DELAY_MS 0xe0000000
-#define PHY_WRITE_ERI_WORD 0xf0000000
struct fw_info {
u32 magic;
@@ -2230,7 +2255,7 @@ static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
case PHY_READ:
case PHY_DATA_OR:
case PHY_DATA_AND:
- case PHY_READ_EFUSE:
+ case PHY_MDIO_CHG:
case PHY_CLEAR_READCOUNT:
case PHY_WRITE:
case PHY_WRITE_PREVIOUS:
@@ -2261,9 +2286,6 @@ static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
}
break;
- case PHY_READ_MAC_BYTE:
- case PHY_WRITE_MAC_BYTE:
- case PHY_WRITE_ERI_WORD:
default:
netif_err(tp, ifup, tp->dev,
"Invalid action 0x%08x\n", action);
@@ -2294,10 +2316,13 @@ out:
static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
{
struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ struct mdio_ops org, *ops = &tp->mdio_ops;
u32 predata, count;
size_t index;
predata = count = 0;
+ org.write = ops->write;
+ org.read = ops->read;
for (index = 0; index < pa->size; ) {
u32 action = le32_to_cpu(pa->code[index]);
@@ -2324,8 +2349,15 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
case PHY_BJMPN:
index -= regno;
break;
- case PHY_READ_EFUSE:
- predata = rtl8168d_efuse_read(tp, regno);
+ case PHY_MDIO_CHG:
+ if (data == 0) {
+ ops->write = org.write;
+ ops->read = org.read;
+ } else if (data == 1) {
+ ops->write = mac_mcu_write;
+ ops->read = mac_mcu_read;
+ }
+
index++;
break;
case PHY_CLEAR_READCOUNT:
@@ -2361,13 +2393,13 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index++;
break;
- case PHY_READ_MAC_BYTE:
- case PHY_WRITE_MAC_BYTE:
- case PHY_WRITE_ERI_WORD:
default:
BUG();
}
}
+
+ ops->write = org.write;
+ ops->read = org.read;
}
static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -3368,51 +3400,68 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
{
- static const u16 mac_ocp_patch[] = {
- 0xe008, 0xe01b, 0xe01d, 0xe01f,
- 0xe021, 0xe023, 0xe025, 0xe027,
- 0x49d2, 0xf10d, 0x766c, 0x49e2,
- 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
-
- 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
- 0xc707, 0x8ee1, 0x9d6c, 0xc603,
- 0xbe00, 0xb416, 0x0076, 0xe86c,
- 0xc602, 0xbe00, 0x0000, 0xc602,
-
- 0xbe00, 0x0000, 0xc602, 0xbe00,
- 0x0000, 0xc602, 0xbe00, 0x0000,
- 0xc602, 0xbe00, 0x0000, 0xc602,
- 0xbe00, 0x0000, 0xc602, 0xbe00,
-
- 0x0000, 0x0000, 0x0000, 0x0000
- };
- u32 i;
+ rtl_apply_firmware(tp);
- /* Patch code for GPHY reset */
- for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
- r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
- r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
- r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
+ rtl_writephy(tp, 0x1f, 0x0a46);
+ if (rtl_readphy(tp, 0x10) & 0x0100) {
+ rtl_writephy(tp, 0x1f, 0x0bcc);
+ rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000);
+ } else {
+ rtl_writephy(tp, 0x1f, 0x0bcc);
+ rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000);
+ }
- rtl_apply_firmware(tp);
+ rtl_writephy(tp, 0x1f, 0x0a46);
+ if (rtl_readphy(tp, 0x13) & 0x0100) {
+ rtl_writephy(tp, 0x1f, 0x0c41);
+ rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000);
+ } else {
+ rtl_writephy(tp, 0x1f, 0x0c41);
+ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002);
+ }
- if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
- rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
- else
- rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
+ /* Enable PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0a44);
+ rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000);
+
+ rtl_writephy(tp, 0x1f, 0x0bcc);
+ rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0a44);
+ rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x8084);
+ rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000);
+ rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000);
+
+ /* EEE auto-fallback function */
+ rtl_writephy(tp, 0x1f, 0x0a4b);
+ rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000);
+
+ /* Enable UC LPF tune function */
+ rtl_writephy(tp, 0x1f, 0x0a43);
+ rtl_writephy(tp, 0x13, 0x8012);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
- if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
- rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
- else
- rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
+ rtl_writephy(tp, 0x1f, 0x0c42);
+ rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000);
- rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
- rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
+ /* Improve SWR Efficiency */
+ rtl_writephy(tp, 0x1f, 0x0bcd);
+ rtl_writephy(tp, 0x14, 0x5065);
+ rtl_writephy(tp, 0x14, 0xd065);
+ rtl_writephy(tp, 0x1f, 0x0bc8);
+ rtl_writephy(tp, 0x11, 0x5655);
+ rtl_writephy(tp, 0x1f, 0x0bcd);
+ rtl_writephy(tp, 0x14, 0x1065);
+ rtl_writephy(tp, 0x14, 0x9065);
+ rtl_writephy(tp, 0x14, 0x1065);
- r8168_phy_ocp_write(tp, 0xa436, 0x8012);
- rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
- rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
+static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ rtl_apply_firmware(tp);
}
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
@@ -3600,6 +3649,10 @@ static void rtl_hw_phy_config(struct net_device *dev)
case RTL_GIGA_MAC_VER_40:
rtl8168g_1_hw_phy_config(tp);
break;
+ case RTL_GIGA_MAC_VER_42:
+ case RTL_GIGA_MAC_VER_43:
+ rtl8168g_2_hw_phy_config(tp);
+ break;
case RTL_GIGA_MAC_VER_41:
default:
@@ -3808,6 +3861,8 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_42:
+ case RTL_GIGA_MAC_VER_43:
ops->write = r8168g_mdio_write;
ops->read = r8168g_mdio_read;
break;
@@ -3818,6 +3873,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
+static void rtl_speed_down(struct rtl8169_private *tp)
+{
+ u32 adv;
+ int lpa;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ lpa = rtl_readphy(tp, MII_LPA);
+
+ if (lpa & (LPA_10HALF | LPA_10FULL))
+ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+ else if (lpa & (LPA_100HALF | LPA_100FULL))
+ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ else
+ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ (tp->mii.supports_gmii ?
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full : 0);
+
+ rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
+ adv);
+}
+
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -3835,6 +3914,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_42:
+ case RTL_GIGA_MAC_VER_43:
RTL_W32(RxConfig, RTL_R32(RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -3848,9 +3929,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
return false;
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, 0x0000);
-
+ rtl_speed_down(tp);
rtl_wol_suspend_quirk(tp);
return true;
@@ -3944,6 +4023,8 @@ static void r8168_phy_power_down(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
break;
@@ -4005,6 +4086,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_33:
RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
+ 0xfc000000, ERIAR_EXGMAC);
+ break;
}
}
@@ -4022,6 +4108,11 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_33:
RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
+ 0x00000000, ERIAR_EXGMAC);
+ break;
}
r8168_phy_power_up(tp);
@@ -4058,6 +4149,7 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_43:
ops->down = r810x_pll_power_down;
ops->up = r810x_pll_power_up;
break;
@@ -4085,6 +4177,7 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_42:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@@ -4127,6 +4220,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34:
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_42:
+ case RTL_GIGA_MAC_VER_43:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
default:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
break;
@@ -4283,6 +4382,8 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
*/
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_42:
+ case RTL_GIGA_MAC_VER_43:
default:
ops->disable = NULL;
ops->enable = NULL;
@@ -4390,6 +4491,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
tp->mac_version == RTL_GIGA_MAC_VER_37 ||
tp->mac_version == RTL_GIGA_MAC_VER_40 ||
tp->mac_version == RTL_GIGA_MAC_VER_41 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_42 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_43 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
@@ -5105,6 +5208,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
+ RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5116,6 +5221,7 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
@@ -5127,7 +5233,26 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
/* Adjust EEE LED frequency */
RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
- rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+}
+
+static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ static const struct ephy_info e_info_8168g_2[] = {
+ { 0x00, 0x0000, 0x0008 },
+ { 0x0c, 0x3df0, 0x0200 },
+ { 0x19, 0xffff, 0xfc00 },
+ { 0x1e, 0xffff, 0x20eb }
+ };
+
+ rtl_hw_start_8168g_1(tp);
+
+ /* disable aspm and clock request before access ephy */
+ RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
+ RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
}
static void rtl_hw_start_8168(struct net_device *dev)
@@ -5155,10 +5280,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
rtl_set_rx_tx_desc_registers(tp, ioaddr);
- rtl_set_rx_mode(dev);
-
- RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
- (InterFrameGap << TxInterFrameGapShift));
+ rtl_set_rx_tx_config_registers(tp);
RTL_R8(IntrMask);
@@ -5235,6 +5357,9 @@ static void rtl_hw_start_8168(struct net_device *dev)
case RTL_GIGA_MAC_VER_41:
rtl_hw_start_8168g_1(tp);
break;
+ case RTL_GIGA_MAC_VER_42:
+ rtl_hw_start_8168g_2(tp);
+ break;
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
@@ -5242,9 +5367,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
break;
}
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ rtl_set_rx_mode(dev);
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
}
@@ -5402,6 +5529,17 @@ static void rtl_hw_start_8101(struct net_device *dev)
RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+
+ tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+
+ rtl_set_rx_tx_desc_registers(tp, ioaddr);
+
+ rtl_set_rx_tx_config_registers(tp);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_07:
rtl_hw_start_8102e_1(tp);
@@ -5429,28 +5567,21 @@ static void rtl_hw_start_8101(struct net_device *dev)
case RTL_GIGA_MAC_VER_39:
rtl_hw_start_8106(tp);
break;
+ case RTL_GIGA_MAC_VER_43:
+ rtl_hw_start_8168g_2(tp);
+ break;
}
RTL_W8(Cfg9346, Cfg9346_Lock);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
-
- rtl_set_rx_max_size(ioaddr, rx_buf_sz);
-
- tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
- RTL_W16(CPlusCmd, tp->cp_cmd);
-
RTL_W16(IntrMitigate, 0x0000);
- rtl_set_rx_tx_desc_registers(tp, ioaddr);
-
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- rtl_set_rx_tx_config_registers(tp);
-
- RTL_R8(IntrMask);
rtl_set_rx_mode(dev);
+ RTL_R8(IntrMask);
+
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
}
@@ -5765,6 +5896,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
goto err_stop_0;
}
+ /* 8168evl does not automatically pad to minimum length. */
+ if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
+ skb->len < ETH_ZLEN)) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto err_update_stats;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop_0;
@@ -5836,6 +5975,7 @@ err_dma_1:
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
err_dma_0:
dev_kfree_skb(skb);
+err_update_stats:
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -6722,6 +6862,8 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_42:
+ case RTL_GIGA_MAC_VER_43:
rtl_hw_init_8168g(tp);
break;
@@ -6904,16 +7046,17 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* don't enable SG, IP_CSUM and TSO by default - it might not work
* properly for all devices */
dev->features |= NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
if (tp->mac_version == RTL_GIGA_MAC_VER_05)
/* 8110SCd requires hardware Rx VLAN - disallow toggling */
- dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
+ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 24c2305d7948..bed9841d728c 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -8,7 +8,8 @@ config SH_ETH
(CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
- CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779)
+ CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || \
+ ARCH_R8A7778 || ARCH_R8A7779)
select CRC32
select NET_CORE
select MII
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index bf5e3cf97c4d..33dc6f2418f2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2,7 +2,8 @@
* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2012 Renesas Solutions Corp.
+ * Copyright (C) 2008-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -49,6 +50,269 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDSR] = 0x0000,
+ [EDMR] = 0x0400,
+ [EDTRR] = 0x0408,
+ [EDRRR] = 0x0410,
+ [EESR] = 0x0428,
+ [EESIPR] = 0x0430,
+ [TDLAR] = 0x0010,
+ [TDFAR] = 0x0014,
+ [TDFXR] = 0x0018,
+ [TDFFR] = 0x001c,
+ [RDLAR] = 0x0030,
+ [RDFAR] = 0x0034,
+ [RDFXR] = 0x0038,
+ [RDFFR] = 0x003c,
+ [TRSCER] = 0x0438,
+ [RMFCR] = 0x0440,
+ [TFTR] = 0x0448,
+ [FDR] = 0x0450,
+ [RMCR] = 0x0458,
+ [RPADIR] = 0x0460,
+ [FCFTR] = 0x0468,
+ [CSMR] = 0x04E4,
+
+ [ECMR] = 0x0500,
+ [ECSR] = 0x0510,
+ [ECSIPR] = 0x0518,
+ [PIR] = 0x0520,
+ [PSR] = 0x0528,
+ [PIPR] = 0x052c,
+ [RFLR] = 0x0508,
+ [APR] = 0x0554,
+ [MPR] = 0x0558,
+ [PFTCR] = 0x055c,
+ [PFRCR] = 0x0560,
+ [TPAUSER] = 0x0564,
+ [GECMR] = 0x05b0,
+ [BCULR] = 0x05b4,
+ [MAHR] = 0x05c0,
+ [MALR] = 0x05c8,
+ [TROCR] = 0x0700,
+ [CDCR] = 0x0708,
+ [LCCR] = 0x0710,
+ [CEFCR] = 0x0740,
+ [FRECR] = 0x0748,
+ [TSFRCR] = 0x0750,
+ [TLFRCR] = 0x0758,
+ [RFCR] = 0x0760,
+ [CERCR] = 0x0768,
+ [CEECR] = 0x0770,
+ [MAFCR] = 0x0778,
+ [RMII_MII] = 0x0790,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAG0] = 0x0040,
+ [TSU_QTAG1] = 0x0044,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_VTAG0] = 0x0058,
+ [TSU_VTAG1] = 0x005c,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRH31] = 0x01f8,
+ [TSU_ADRL31] = 0x01fc,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+};
+
+static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0300,
+ [RFLR] = 0x0308,
+ [ECSR] = 0x0310,
+ [ECSIPR] = 0x0318,
+ [PIR] = 0x0320,
+ [PSR] = 0x0328,
+ [RDMLR] = 0x0340,
+ [IPGR] = 0x0350,
+ [APR] = 0x0354,
+ [MPR] = 0x0358,
+ [RFCF] = 0x0360,
+ [TPAUSER] = 0x0364,
+ [TPAUSECR] = 0x0368,
+ [MAHR] = 0x03c0,
+ [MALR] = 0x03c8,
+ [TROCR] = 0x03d0,
+ [CDCR] = 0x03d4,
+ [LCCR] = 0x03d8,
+ [CNDCR] = 0x03dc,
+ [CEFCR] = 0x03e4,
+ [FRECR] = 0x03e8,
+ [TSFRCR] = 0x03ec,
+ [TLFRCR] = 0x03f0,
+ [RFCR] = 0x03f4,
+ [MAFCR] = 0x03f8,
+
+ [EDMR] = 0x0200,
+ [EDTRR] = 0x0208,
+ [EDRRR] = 0x0210,
+ [TDLAR] = 0x0218,
+ [RDLAR] = 0x0220,
+ [EESR] = 0x0228,
+ [EESIPR] = 0x0230,
+ [TRSCER] = 0x0238,
+ [RMFCR] = 0x0240,
+ [TFTR] = 0x0248,
+ [FDR] = 0x0250,
+ [RMCR] = 0x0258,
+ [TFUCR] = 0x0264,
+ [RFOCR] = 0x0268,
+ [FCFTR] = 0x0270,
+ [TRIMD] = 0x027c,
+};
+
+static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0100,
+ [RFLR] = 0x0108,
+ [ECSR] = 0x0110,
+ [ECSIPR] = 0x0118,
+ [PIR] = 0x0120,
+ [PSR] = 0x0128,
+ [RDMLR] = 0x0140,
+ [IPGR] = 0x0150,
+ [APR] = 0x0154,
+ [MPR] = 0x0158,
+ [TPAUSER] = 0x0164,
+ [RFCF] = 0x0160,
+ [TPAUSECR] = 0x0168,
+ [BCFRR] = 0x016c,
+ [MAHR] = 0x01c0,
+ [MALR] = 0x01c8,
+ [TROCR] = 0x01d0,
+ [CDCR] = 0x01d4,
+ [LCCR] = 0x01d8,
+ [CNDCR] = 0x01dc,
+ [CEFCR] = 0x01e4,
+ [FRECR] = 0x01e8,
+ [TSFRCR] = 0x01ec,
+ [TLFRCR] = 0x01f0,
+ [RFCR] = 0x01f4,
+ [MAFCR] = 0x01f8,
+ [RTRATE] = 0x01fc,
+
+ [EDMR] = 0x0000,
+ [EDTRR] = 0x0008,
+ [EDRRR] = 0x0010,
+ [TDLAR] = 0x0018,
+ [RDLAR] = 0x0020,
+ [EESR] = 0x0028,
+ [EESIPR] = 0x0030,
+ [TRSCER] = 0x0038,
+ [RMFCR] = 0x0040,
+ [TFTR] = 0x0048,
+ [FDR] = 0x0050,
+ [RMCR] = 0x0058,
+ [TFUCR] = 0x0064,
+ [RFOCR] = 0x0068,
+ [FCFTR] = 0x0070,
+ [RPADIR] = 0x0078,
+ [TRIMD] = 0x007c,
+ [RBWAR] = 0x00c8,
+ [RDFAR] = 0x00cc,
+ [TBRAR] = 0x00d4,
+ [TDFAR] = 0x00d8,
+};
+
+static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0160,
+ [ECSR] = 0x0164,
+ [ECSIPR] = 0x0168,
+ [PIR] = 0x016c,
+ [MAHR] = 0x0170,
+ [MALR] = 0x0174,
+ [RFLR] = 0x0178,
+ [PSR] = 0x017c,
+ [TROCR] = 0x0180,
+ [CDCR] = 0x0184,
+ [LCCR] = 0x0188,
+ [CNDCR] = 0x018c,
+ [CEFCR] = 0x0194,
+ [FRECR] = 0x0198,
+ [TSFRCR] = 0x019c,
+ [TLFRCR] = 0x01a0,
+ [RFCR] = 0x01a4,
+ [MAFCR] = 0x01a8,
+ [IPGR] = 0x01b4,
+ [APR] = 0x01b8,
+ [MPR] = 0x01bc,
+ [TPAUSER] = 0x01c4,
+ [BCFR] = 0x01cc,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAGM0] = 0x0040,
+ [TSU_QTAGM1] = 0x0044,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRL31] = 0x01fc,
+};
+
#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
defined(CONFIG_CPU_SUBTYPE_SH7763) || \
defined(CONFIG_ARCH_R8A7740)
@@ -78,7 +342,7 @@ static void sh_eth_select_mii(struct net_device *ndev)
#endif
/* There is CPU dependent code */
-#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779)
+#if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779)
#define SH_ETH_RESET_DEFAULT 1
static void sh_eth_set_duplex(struct net_device *ndev)
{
@@ -93,18 +357,60 @@ static void sh_eth_set_duplex(struct net_device *ndev)
static void sh_eth_set_rate(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned int bits = ECMR_RTM;
-#if defined(CONFIG_ARCH_R8A7779)
- bits |= ECMR_ELB;
-#endif
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* R8A7778/9 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
+ .eesipr_value = 0x01ff009f,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+};
+#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
+#define SH_ETH_RESET_DEFAULT 1
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
switch (mdp->speed) {
case 10: /* 10BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
break;
case 100:/* 100BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
break;
default:
break;
@@ -592,7 +898,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
cnt--;
}
if (cnt < 0) {
- printk(KERN_ERR "Device reset fail\n");
+ pr_err("Device reset fail\n");
ret = -ETIMEDOUT;
}
return ret;
@@ -908,11 +1214,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Rx descriptors. */
rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
- GFP_KERNEL);
-
+ GFP_KERNEL);
if (!mdp->rx_ring) {
- dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
- rx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
@@ -922,10 +1225,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Tx descriptors. */
tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!mdp->tx_ring) {
- dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
- tx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
@@ -1216,10 +1517,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (felic_stat & ECSR_LCHNG) {
/* Link Changed */
if (mdp->cd->no_psr || mdp->no_ether_link) {
- if (mdp->link == PHY_DOWN)
- link_stat = 0;
- else
- link_stat = PHY_ST_LINK;
+ goto ignore_link;
} else {
link_stat = (sh_eth_read(ndev, PSR));
if (mdp->ether_link_active_low)
@@ -1242,6 +1540,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
}
}
+ignore_link:
if (intr_status & EESR_TWB) {
/* Write buck end. unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */
@@ -1326,12 +1625,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
- u32 intr_status = 0;
+ unsigned long intr_status;
spin_lock(&mdp->lock);
- /* Get interrpt stat */
+ /* Get interrupt status */
intr_status = sh_eth_read(ndev, EESR);
+ /* Mask it with the interrupt mask, forcing ECI interrupt to be always
+ * enabled since it's the one that comes thru regardless of the mask,
+ * and we need to fully handle it in sh_eth_error() in order to quench
+ * it as it doesn't get cleared by just writing 1 to the ECI bit...
+ */
+ intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
@@ -1373,7 +1678,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
struct phy_device *phydev = mdp->phydev;
int new_state = 0;
- if (phydev->link != PHY_DOWN) {
+ if (phydev->link) {
if (phydev->duplex != mdp->duplex) {
new_state = 1;
mdp->duplex = phydev->duplex;
@@ -1387,17 +1692,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
}
- if (mdp->link == PHY_DOWN) {
+ if (!mdp->link) {
sh_eth_write(ndev,
(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
+ if (mdp->cd->no_psr || mdp->no_ether_link)
+ sh_eth_rcv_snd_enable(ndev);
}
} else if (mdp->link) {
new_state = 1;
- mdp->link = PHY_DOWN;
+ mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
+ if (mdp->cd->no_psr || mdp->no_ether_link)
+ sh_eth_rcv_snd_disable(ndev);
}
if (new_state && netif_msg_link(mdp))
@@ -1414,7 +1723,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
mdp->mii_bus->id , mdp->phy_id);
- mdp->link = PHY_DOWN;
+ mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
@@ -2139,7 +2448,8 @@ static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
return TSU_VTAG1;
}
-static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int vtag_reg_index = sh_eth_get_vtag_index(mdp);
@@ -2169,7 +2479,8 @@ static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
return 0;
}
-static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int vtag_reg_index = sh_eth_get_vtag_index(mdp);
@@ -2220,7 +2531,6 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
/* MDIO bus release function */
static int sh_mdio_release(struct net_device *ndev)
{
- struct sh_eth_private *mdp = netdev_priv(ndev);
struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
/* unregister mdio bus */
@@ -2229,15 +2539,9 @@ static int sh_mdio_release(struct net_device *ndev)
/* remove mdio bus info from net_device */
dev_set_drvdata(&ndev->dev, NULL);
- /* free interrupts memory */
- kfree(bus->irq);
-
/* free bitbang info */
free_mdio_bitbang(bus);
- /* free bitbang memory */
- kfree(mdp->bitbang);
-
return 0;
}
@@ -2250,7 +2554,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
struct sh_eth_private *mdp = netdev_priv(ndev);
/* create bit control struct for PHY */
- bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
+ bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
+ GFP_KERNEL);
if (!bitbang) {
ret = -ENOMEM;
goto out;
@@ -2259,18 +2564,17 @@ static int sh_mdio_init(struct net_device *ndev, int id,
/* bitbang init */
bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
bitbang->set_gate = pd->set_mdio_gate;
- bitbang->mdi_msk = 0x08;
- bitbang->mdo_msk = 0x04;
- bitbang->mmd_msk = 0x02;/* MMD */
- bitbang->mdc_msk = 0x01;
+ bitbang->mdi_msk = PIR_MDI;
+ bitbang->mdo_msk = PIR_MDO;
+ bitbang->mmd_msk = PIR_MMD;
+ bitbang->mdc_msk = PIR_MDC;
bitbang->ctrl.ops = &bb_ops;
/* MII controller setting */
- mdp->bitbang = bitbang;
mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
if (!mdp->mii_bus) {
ret = -ENOMEM;
- goto out_free_bitbang;
+ goto out;
}
/* Hook up MII support for ethtool */
@@ -2280,7 +2584,9 @@ static int sh_mdio_init(struct net_device *ndev, int id,
mdp->pdev->name, id);
/* PHY IRQ */
- mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
+ sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
if (!mdp->mii_bus->irq) {
ret = -ENOMEM;
goto out_free_bus;
@@ -2292,21 +2598,15 @@ static int sh_mdio_init(struct net_device *ndev, int id,
/* register mdio bus */
ret = mdiobus_register(mdp->mii_bus);
if (ret)
- goto out_free_irq;
+ goto out_free_bus;
dev_set_drvdata(&ndev->dev, mdp->mii_bus);
return 0;
-out_free_irq:
- kfree(mdp->mii_bus->irq);
-
out_free_bus:
free_mdio_bitbang(mdp->mii_bus);
-out_free_bitbang:
- kfree(bitbang);
-
out:
return ret;
}
@@ -2319,6 +2619,9 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
+ case SH_ETH_REG_FAST_RCAR:
+ reg_offset = sh_eth_offset_fast_rcar;
+ break;
case SH_ETH_REG_FAST_SH4:
reg_offset = sh_eth_offset_fast_sh4;
break;
@@ -2326,7 +2629,7 @@ static const u16 *sh_eth_get_register_offset(int register_type)
reg_offset = sh_eth_offset_fast_sh3_sh2;
break;
default:
- printk(KERN_ERR "Unknown register type (%d)\n", register_type);
+ pr_err("Unknown register type (%d)\n", register_type);
break;
}
@@ -2356,7 +2659,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp = NULL;
- struct sh_eth_plat_data *pd;
+ struct sh_eth_plat_data *pd = pdev->dev.platform_data;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2394,10 +2697,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp = netdev_priv(ndev);
mdp->num_tx_ring = TX_RING_SIZE;
mdp->num_rx_ring = RX_RING_SIZE;
- mdp->addr = ioremap(res->start, resource_size(res));
- if (mdp->addr == NULL) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "ioremap failed.\n");
+ mdp->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mdp->addr)) {
+ ret = PTR_ERR(mdp->addr);
goto out_release;
}
@@ -2406,7 +2708,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
- pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
/* get PHY ID */
mdp->phy_id = pd->phy;
mdp->phy_interface = pd->phy_interface;
@@ -2434,6 +2735,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* read and set MAC address */
read_mac_address(ndev, pd->mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev,
+ "no valid MAC address supplied, using a random one.\n");
+ eth_hw_addr_random(ndev);
+ }
/* ioremap the TSU registers */
if (mdp->cd->tsu) {
@@ -2444,15 +2750,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ret = -ENODEV;
goto out_release;
}
- mdp->tsu_addr = ioremap(rtsu->start,
- resource_size(rtsu));
- if (mdp->tsu_addr == NULL) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "TSU ioremap failed.\n");
+ mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
+ if (IS_ERR(mdp->tsu_addr)) {
+ ret = PTR_ERR(mdp->tsu_addr);
goto out_release;
}
mdp->port = devno % 2;
- ndev->features = NETIF_F_HW_VLAN_FILTER;
+ ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
}
/* initialize first or needed device */
@@ -2489,10 +2793,6 @@ out_unregister:
out_release:
/* net_dev free */
- if (mdp && mdp->addr)
- iounmap(mdp->addr);
- if (mdp && mdp->tsu_addr)
- iounmap(mdp->tsu_addr);
if (ndev)
free_netdev(ndev);
@@ -2503,14 +2803,10 @@ out:
static int sh_eth_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct sh_eth_private *mdp = netdev_priv(ndev);
- if (mdp->cd->tsu)
- iounmap(mdp->tsu_addr);
sh_mdio_release(ndev);
unregister_netdev(ndev);
pm_runtime_disable(&pdev->dev);
- iounmap(mdp->addr);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index e6655678458e..1ddc9f235bcb 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -156,225 +156,6 @@ enum {
SH_ETH_MAX_REGISTER_OFFSET,
};
-static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
- [EDSR] = 0x0000,
- [EDMR] = 0x0400,
- [EDTRR] = 0x0408,
- [EDRRR] = 0x0410,
- [EESR] = 0x0428,
- [EESIPR] = 0x0430,
- [TDLAR] = 0x0010,
- [TDFAR] = 0x0014,
- [TDFXR] = 0x0018,
- [TDFFR] = 0x001c,
- [RDLAR] = 0x0030,
- [RDFAR] = 0x0034,
- [RDFXR] = 0x0038,
- [RDFFR] = 0x003c,
- [TRSCER] = 0x0438,
- [RMFCR] = 0x0440,
- [TFTR] = 0x0448,
- [FDR] = 0x0450,
- [RMCR] = 0x0458,
- [RPADIR] = 0x0460,
- [FCFTR] = 0x0468,
- [CSMR] = 0x04E4,
-
- [ECMR] = 0x0500,
- [ECSR] = 0x0510,
- [ECSIPR] = 0x0518,
- [PIR] = 0x0520,
- [PSR] = 0x0528,
- [PIPR] = 0x052c,
- [RFLR] = 0x0508,
- [APR] = 0x0554,
- [MPR] = 0x0558,
- [PFTCR] = 0x055c,
- [PFRCR] = 0x0560,
- [TPAUSER] = 0x0564,
- [GECMR] = 0x05b0,
- [BCULR] = 0x05b4,
- [MAHR] = 0x05c0,
- [MALR] = 0x05c8,
- [TROCR] = 0x0700,
- [CDCR] = 0x0708,
- [LCCR] = 0x0710,
- [CEFCR] = 0x0740,
- [FRECR] = 0x0748,
- [TSFRCR] = 0x0750,
- [TLFRCR] = 0x0758,
- [RFCR] = 0x0760,
- [CERCR] = 0x0768,
- [CEECR] = 0x0770,
- [MAFCR] = 0x0778,
- [RMII_MII] = 0x0790,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWEN0] = 0x0010,
- [TSU_FWEN1] = 0x0014,
- [TSU_FCM] = 0x0018,
- [TSU_BSYSL0] = 0x0020,
- [TSU_BSYSL1] = 0x0024,
- [TSU_PRISL0] = 0x0028,
- [TSU_PRISL1] = 0x002c,
- [TSU_FWSL0] = 0x0030,
- [TSU_FWSL1] = 0x0034,
- [TSU_FWSLC] = 0x0038,
- [TSU_QTAG0] = 0x0040,
- [TSU_QTAG1] = 0x0044,
- [TSU_FWSR] = 0x0050,
- [TSU_FWINMK] = 0x0054,
- [TSU_ADQT0] = 0x0048,
- [TSU_ADQT1] = 0x004c,
- [TSU_VTAG0] = 0x0058,
- [TSU_VTAG1] = 0x005c,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
- [TSU_ADRH0] = 0x0100,
- [TSU_ADRL0] = 0x0104,
- [TSU_ADRH31] = 0x01f8,
- [TSU_ADRL31] = 0x01fc,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008c,
- [FWNLCR0] = 0x0090,
- [FWALCR0] = 0x0094,
- [TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
- [RXNLCR1] = 0x00a8,
- [RXALCR1] = 0x00ac,
- [FWNLCR1] = 0x00b0,
- [FWALCR1] = 0x00b4,
-};
-
-static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
- [ECMR] = 0x0100,
- [RFLR] = 0x0108,
- [ECSR] = 0x0110,
- [ECSIPR] = 0x0118,
- [PIR] = 0x0120,
- [PSR] = 0x0128,
- [RDMLR] = 0x0140,
- [IPGR] = 0x0150,
- [APR] = 0x0154,
- [MPR] = 0x0158,
- [TPAUSER] = 0x0164,
- [RFCF] = 0x0160,
- [TPAUSECR] = 0x0168,
- [BCFRR] = 0x016c,
- [MAHR] = 0x01c0,
- [MALR] = 0x01c8,
- [TROCR] = 0x01d0,
- [CDCR] = 0x01d4,
- [LCCR] = 0x01d8,
- [CNDCR] = 0x01dc,
- [CEFCR] = 0x01e4,
- [FRECR] = 0x01e8,
- [TSFRCR] = 0x01ec,
- [TLFRCR] = 0x01f0,
- [RFCR] = 0x01f4,
- [MAFCR] = 0x01f8,
- [RTRATE] = 0x01fc,
-
- [EDMR] = 0x0000,
- [EDTRR] = 0x0008,
- [EDRRR] = 0x0010,
- [TDLAR] = 0x0018,
- [RDLAR] = 0x0020,
- [EESR] = 0x0028,
- [EESIPR] = 0x0030,
- [TRSCER] = 0x0038,
- [RMFCR] = 0x0040,
- [TFTR] = 0x0048,
- [FDR] = 0x0050,
- [RMCR] = 0x0058,
- [TFUCR] = 0x0064,
- [RFOCR] = 0x0068,
- [FCFTR] = 0x0070,
- [RPADIR] = 0x0078,
- [TRIMD] = 0x007c,
- [RBWAR] = 0x00c8,
- [RDFAR] = 0x00cc,
- [TBRAR] = 0x00d4,
- [TDFAR] = 0x00d8,
-};
-
-static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
- [ECMR] = 0x0160,
- [ECSR] = 0x0164,
- [ECSIPR] = 0x0168,
- [PIR] = 0x016c,
- [MAHR] = 0x0170,
- [MALR] = 0x0174,
- [RFLR] = 0x0178,
- [PSR] = 0x017c,
- [TROCR] = 0x0180,
- [CDCR] = 0x0184,
- [LCCR] = 0x0188,
- [CNDCR] = 0x018c,
- [CEFCR] = 0x0194,
- [FRECR] = 0x0198,
- [TSFRCR] = 0x019c,
- [TLFRCR] = 0x01a0,
- [RFCR] = 0x01a4,
- [MAFCR] = 0x01a8,
- [IPGR] = 0x01b4,
- [APR] = 0x01b8,
- [MPR] = 0x01bc,
- [TPAUSER] = 0x01c4,
- [BCFR] = 0x01cc,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWEN0] = 0x0010,
- [TSU_FWEN1] = 0x0014,
- [TSU_FCM] = 0x0018,
- [TSU_BSYSL0] = 0x0020,
- [TSU_BSYSL1] = 0x0024,
- [TSU_PRISL0] = 0x0028,
- [TSU_PRISL1] = 0x002c,
- [TSU_FWSL0] = 0x0030,
- [TSU_FWSL1] = 0x0034,
- [TSU_FWSLC] = 0x0038,
- [TSU_QTAGM0] = 0x0040,
- [TSU_QTAGM1] = 0x0044,
- [TSU_ADQT0] = 0x0048,
- [TSU_ADQT1] = 0x004c,
- [TSU_FWSR] = 0x0050,
- [TSU_FWINMK] = 0x0054,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008c,
- [FWNLCR0] = 0x0090,
- [FWALCR0] = 0x0094,
- [TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
- [RXNLCR1] = 0x00a8,
- [RXALCR1] = 0x00ac,
- [FWNLCR1] = 0x00b0,
- [FWALCR1] = 0x00b4,
-
- [TSU_ADRH0] = 0x0100,
- [TSU_ADRL0] = 0x0104,
- [TSU_ADRL31] = 0x01fc,
-
-};
-
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
#define SH4_SKB_RX_ALIGN 32
@@ -705,7 +486,6 @@ struct sh_eth_private {
const u16 *reg_offset;
void __iomem *addr;
void __iomem *tsu_addr;
- struct bb_info *bitbang;
u32 num_rx_ring;
u32 num_tx_ring;
dma_addr_t rx_desc_dma;
@@ -723,7 +503,7 @@ struct sh_eth_private {
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
- enum phy_state link;
+ int link;
phy_interface_t phy_interface;
int msg_enable;
int speed;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 21683e2b1ff4..b6739afeaca1 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -998,6 +998,7 @@ static int s6gmac_probe(struct platform_device *pdev)
mb = mdiobus_alloc();
if (!mb) {
printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
+ res = -ENOMEM;
goto errmii;
}
mb->name = "s6gmac_mii";
@@ -1053,20 +1054,7 @@ static struct platform_driver s6gmac_driver = {
},
};
-static int __init s6gmac_init(void)
-{
- printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
- return platform_driver_register(&s6gmac_driver);
-}
-
-
-static void __exit s6gmac_exit(void)
-{
- platform_driver_unregister(&s6gmac_driver);
-}
-
-module_init(s6gmac_init);
-module_exit(s6gmac_exit);
+module_platform_driver(s6gmac_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 3aca57853ed4..bdac936a68bc 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -651,8 +651,11 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
received ++;
- } else
- goto dropping;
+ } else {
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ dev->stats.rx_dropped++;
+ goto done;
+ }
} else {
struct net_device_stats *stats = &dev->stats;
ether3_outw(next_ptr >> 8, REG_RECVEND);
@@ -679,21 +682,6 @@ done:
}
return maxcnt;
-
-dropping:{
- static unsigned long last_warned;
-
- ether3_outw(next_ptr >> 8, REG_RECVEND);
- /*
- * Don't print this message too many times...
- */
- if (time_after(jiffies, last_warned + 10 * HZ)) {
- last_warned = jiffies;
- printk("%s: memory squeeze, dropping packet.\n", dev->name);
- }
- dev->stats.rx_dropped++;
- goto done;
- }
}
/*
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 0fde9ca28269..0ad5694b41f8 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -381,8 +381,6 @@ memory_squeeze:
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
} else {
- printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
}
} else {
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 435b4f1e3488..4136ccc4a954 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -24,7 +24,7 @@ config SFC_MCDI_MON
bool "Solarflare SFC9000-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
default y
- ----help---
+ ---help---
This exposes the on-board firmware-managed sensors as a
hardware monitor device.
config SFC_SRIOV
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0bc00991d310..01b99206139a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -22,6 +22,7 @@
#include <linux/topology.h>
#include <linux/gfp.h>
#include <linux/cpu_rmap.h>
+#include <linux/aer.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
@@ -71,21 +72,21 @@ const char *const efx_loopback_mode_names[] = {
const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
const char *const efx_reset_type_names[] = {
- [RESET_TYPE_INVISIBLE] = "INVISIBLE",
- [RESET_TYPE_ALL] = "ALL",
- [RESET_TYPE_WORLD] = "WORLD",
- [RESET_TYPE_DISABLE] = "DISABLE",
- [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
- [RESET_TYPE_INT_ERROR] = "INT_ERROR",
- [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
- [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
- [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
- [RESET_TYPE_TX_SKIP] = "TX_SKIP",
- [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
+ [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
+ [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
-#define EFX_MAX_MTU (9 * 1024)
-
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -117,9 +118,12 @@ MODULE_PARM_DESC(separate_tx_channels,
static int napi_weight = 64;
/* This is the time (in jiffies) between invocations of the hardware
- * monitor. On Falcon-based NICs, this will:
+ * monitor.
+ * On Falcon-based NICs, this will:
* - Check the on-board hardware monitor;
* - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
*/
static unsigned int efx_monitor_interval = 1 * HZ;
@@ -203,13 +207,14 @@ static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
static int efx_check_disabled(struct efx_nic *efx)
{
- if (efx->state == STATE_DISABLED) {
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
@@ -242,15 +247,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
struct efx_rx_queue *rx_queue =
efx_channel_get_rx_queue(channel);
- /* Deliver last RX packet. */
- if (channel->rx_pkt) {
- __efx_rx_packet(channel, channel->rx_pkt);
- channel->rx_pkt = NULL;
- }
- if (rx_queue->enabled) {
- efx_rx_strategy(channel);
+ efx_rx_flush_packet(channel);
+ if (rx_queue->enabled)
efx_fast_push_rx_descriptors(rx_queue);
- }
}
return spent;
@@ -625,20 +624,51 @@ fail:
*/
static void efx_start_datapath(struct efx_nic *efx)
{
+ bool old_rx_scatter = efx->rx_scatter;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
+ size_t rx_buf_len;
/* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header
* alignment and overruns.
*/
- efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
- efx->type->rx_buffer_hash_size +
- efx->type->rx_buffer_padding);
- efx->rx_buffer_order = get_order(efx->rx_buffer_len +
- sizeof(struct efx_rx_page_state));
+ efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) +
+ EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+ EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
+ PAGE_SIZE / 2);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ efx_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* RX filters also have scatter-enabled flags */
+ if (efx->rx_scatter != old_rx_scatter)
+ efx_filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
@@ -655,16 +685,12 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue(tx_queue);
- /* The rx buffer allocation strategy is MTU dependent */
- efx_rx_strategy(channel);
-
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
efx_nic_generate_fill_event(rx_queue);
}
- WARN_ON(channel->rx_pkt != NULL);
- efx_rx_strategy(channel);
+ WARN_ON(channel->rx_pkt_n_frags);
}
if (netif_device_present(efx->net_dev))
@@ -683,7 +709,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
BUG_ON(efx->port_enabled);
/* Only perform flush if dma is enabled */
- if (dev->is_busmaster) {
+ if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
rc = efx_nic_flush_queues(efx);
if (rc && EFX_WORKAROUND_7803(efx)) {
@@ -1596,13 +1622,15 @@ static void efx_start_all(struct efx_nic *efx)
efx_start_port(efx);
efx_start_datapath(efx);
- /* Start the hardware monitor if there is one. Otherwise (we're link
- * event driven), we have to poll the PHY because after an event queue
- * flush, we could have a missed a link state change */
- if (efx->type->monitor != NULL) {
+ /* Start the hardware monitor if there is one */
+ if (efx->type->monitor != NULL)
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
- } else {
+
+ /* If link state detection is normally event-driven, we have
+ * to poll now because we could have missed a change
+ */
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
mutex_lock(&efx->mac_lock);
if (efx->phy_op->poll(efx))
efx_link_status_changed(efx);
@@ -2309,7 +2337,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
out:
/* Leave device stopped if necessary */
- disabled = rc || method == RESET_TYPE_DISABLE;
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
@@ -2328,13 +2358,48 @@ out:
return rc;
}
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+static int efx_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev =
+ of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
+
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
- unsigned long pending = ACCESS_ONCE(efx->reset_pending);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = ACCESS_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ efx_try_recovery(efx))
+ return;
if (!pending)
return;
@@ -2346,7 +2411,7 @@ static void efx_reset_work(struct work_struct *data)
* it cannot change again.
*/
if (efx->state == STATE_READY)
- (void)efx_reset(efx, fls(pending) - 1);
+ (void)efx_reset(efx, method);
rtnl_unlock();
}
@@ -2355,11 +2420,20 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
switch (type) {
case RESET_TYPE_INVISIBLE:
case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
@@ -2569,6 +2643,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_fini_struct(efx);
pci_set_drvdata(pci_dev, NULL);
free_netdev(efx->net_dev);
+
+ pci_disable_pcie_error_reporting(pci_dev);
};
/* NIC VPD information
@@ -2741,6 +2817,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
+ rc = pci_enable_pcie_error_reporting(pci_dev);
+ if (rc && rc != -EINVAL)
+ netif_warn(efx, probe, efx->net_dev,
+ "pci_enable_pcie_error_reporting failed (%d)\n", rc);
+
return 0;
fail4:
@@ -2865,12 +2946,112 @@ static const struct dev_pm_ops efx_pm_ops = {
.restore = efx_pm_resume,
};
+/* A PCI error affecting this device was detected.
+ * At this point MMIO and DMA may be disabled.
+ * Stop the software path and request a slot reset.
+ */
+static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_RECOVERY;
+ efx->reset_pending = 0;
+
+ efx_device_detach_sync(efx);
+
+ efx_stop_all(efx);
+ efx_stop_interrupts(efx, false);
+
+ status = PCI_ERS_RESULT_NEED_RESET;
+ } else {
+ /* If the interface is disabled we don't want to do anything
+ * with it.
+ */
+ status = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ rtnl_unlock();
+
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+/* Fake a successfull reset, which will be performed later in efx_io_resume. */
+static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ int rc;
+
+ if (pci_enable_device(pdev)) {
+ netif_err(efx, hw, efx->net_dev,
+ "Cannot re-enable PCI device after reset.\n");
+ status = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ rc = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
+ /* Non-fatal error. Continue. */
+ }
+
+ return status;
+}
+
+/* Perform the actual reset and resume I/O operations. */
+static void efx_io_resume(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ int rc;
+
+ rtnl_lock();
+
+ if (efx->state == STATE_DISABLED)
+ goto out;
+
+ rc = efx_reset(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "efx_reset failed after PCI error (%d)\n", rc);
+ } else {
+ efx->state = STATE_READY;
+ netif_dbg(efx, hw, efx->net_dev,
+ "Done resetting and resuming IO after PCI error.\n");
+ }
+
+out:
+ rtnl_unlock();
+}
+
+/* For simplicity and reliability, we always require a slot reset and try to
+ * reset the hardware when a pci error affecting the device is detected.
+ * We leave both the link_reset and mmio_enabled callback unimplemented:
+ * with our request for slot reset the mmio_enabled callback will never be
+ * called, and the link_reset callback is not used by AER or EEH mechanisms.
+ */
+static struct pci_error_handlers efx_err_handlers = {
+ .error_detected = efx_io_error_detected,
+ .slot_reset = efx_io_slot_reset,
+ .resume = efx_io_resume,
+};
+
static struct pci_driver efx_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = efx_pci_table,
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
+ .err_handler = &efx_err_handlers,
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index d2f790df6dcb..8372da239b43 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -33,17 +33,22 @@ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
/* RX */
+extern void efx_rx_config_page_split(struct efx_nic *efx);
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+extern void __efx_rx_packet(struct efx_channel *channel);
+extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
+ unsigned int index, unsigned int n_frags,
unsigned int len, u16 flags);
+static inline void efx_rx_flush_packet(struct efx_channel *channel)
+{
+ if (channel->rx_pkt_n_frags)
+ __efx_rx_packet(channel);
+}
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -67,6 +72,7 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx);
extern void efx_remove_filters(struct efx_nic *efx);
+extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
extern s32 efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace);
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 182dbe2cc6e4..ab8fb5889e55 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -137,8 +137,12 @@ enum efx_loopback_mode {
* Reset methods are numbered in order of increasing scope.
*
* @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
+ * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
+ * if unsuccessful.
* @RESET_TYPE_ALL: Reset datapath, MAC and PHY
* @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
+ * unsuccessful.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,9 +154,11 @@ enum efx_loopback_mode {
*/
enum reset_type {
RESET_TYPE_INVISIBLE = 0,
- RESET_TYPE_ALL = 1,
- RESET_TYPE_WORLD = 2,
- RESET_TYPE_DISABLE = 3,
+ RESET_TYPE_RECOVER_OR_ALL = 1,
+ RESET_TYPE_ALL = 2,
+ RESET_TYPE_WORLD = 3,
+ RESET_TYPE_RECOVER_OR_DISABLE = 4,
+ RESET_TYPE_DISABLE = 5,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 8e61cd06f66a..6e768175e7e0 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -154,6 +154,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
};
/* Number of ethtool statistics */
@@ -978,7 +979,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
rule->m_ext.data[1]))
return -EINVAL;
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0,
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ?
0xfff : rule->ring_cookie);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 49bcd196e10d..4486102fa9b3 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1546,10 +1546,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
static void falcon_init_rx_cfg(struct efx_nic *efx)
{
- /* Prior to Siena the RX DMA engine will split each frame at
- * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
- * be so large that that never happens. */
- const unsigned huge_buf_size = (3 * 4096) >> 5;
/* RX control FIFO thresholds (32 entries) */
const unsigned ctrl_xon_thr = 20;
const unsigned ctrl_xoff_thr = 25;
@@ -1557,10 +1553,15 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
efx_reado(efx, &reg, FR_AZ_RX_CFG);
if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
- /* Data FIFO size is 5.5K */
+ /* Data FIFO size is 5.5K. The RX DMA engine only
+ * supports scattering for user-mode queues, but will
+ * split DMA writes at intervals of RX_USR_BUF_SIZE
+ * (32-byte units) even for kernel-mode queues. We
+ * set it to be so large that that never happens.
+ */
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
- huge_buf_size);
+ (3 * 4096) >> 5);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
@@ -1569,7 +1570,7 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
/* Data FIFO size is 80K; register fields moved */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
- huge_buf_size);
+ EFX_RX_USR_BUF_SIZE >> 5);
/* Send XON and XOFF at ~3 * max MTU away from empty/full */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
@@ -1815,6 +1816,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_padding = 0x24,
+ .can_rx_scatter = false,
.max_interrupt_mode = EFX_INT_MODE_MSI,
.phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
@@ -1865,6 +1867,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
+ .can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 8af42cd1feda..2397f0e8d3eb 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -66,6 +66,10 @@ struct efx_filter_state {
#endif
};
+static void efx_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_filter_table *table,
+ unsigned int filter_idx);
+
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
* key derived from the n-tuple. The initial LFSR state is 0xffff. */
static u16 efx_filter_hash(u32 key)
@@ -168,6 +172,25 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS));
+
+ /* There is a single bit to enable RX scatter for all
+ * unmatched packets. Only set it if scatter is
+ * enabled in both filter specs.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
+ table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_SCATTER));
+ } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* We don't expose 'default' filters because unmatched
+ * packets always go to the queue number found in the
+ * RSS table. But we still need to set the RX scatter
+ * bit here.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ efx->rx_scatter);
}
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -409,9 +432,18 @@ static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
struct efx_filter_spec *spec = &table->spec[filter_idx];
+ enum efx_filter_flags flags = 0;
+
+ /* If there's only one channel then disable RSS for non VF
+ * traffic, thereby allowing VFs to use RSS when the PF can't.
+ */
+ if (efx->n_rx_channels > 1)
+ flags |= EFX_FILTER_FLAG_RX_RSS;
- efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL,
- EFX_FILTER_FLAG_RX_RSS, 0);
+ if (efx->rx_scatter)
+ flags |= EFX_FILTER_FLAG_RX_SCATTER;
+
+ efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
spec->type = EFX_FILTER_UC_DEF + filter_idx;
table->used_bitmap[0] |= 1 << filter_idx;
}
@@ -463,13 +495,6 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
break;
}
- case EFX_FILTER_TABLE_RX_DEF:
- /* One filter spec per type */
- BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
- BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
- EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
- return spec->type - EFX_FILTER_UC_DEF;
-
case EFX_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_7(
@@ -520,42 +545,6 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
return true;
}
-static int efx_filter_search(struct efx_filter_table *table,
- struct efx_filter_spec *spec, u32 key,
- bool for_insert, unsigned int *depth_required)
-{
- unsigned hash, incr, filter_idx, depth, depth_max;
-
- hash = efx_filter_hash(key);
- incr = efx_filter_increment(key);
-
- filter_idx = hash & (table->size - 1);
- depth = 1;
- depth_max = (for_insert ?
- (spec->priority <= EFX_FILTER_PRI_HINT ?
- FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
- table->search_depth[spec->type]);
-
- for (;;) {
- /* Return success if entry is used and matches this spec
- * or entry is unused and we are trying to insert.
- */
- if (test_bit(filter_idx, table->used_bitmap) ?
- efx_filter_equal(spec, &table->spec[filter_idx]) :
- for_insert) {
- *depth_required = depth;
- return filter_idx;
- }
-
- /* Return failure if we reached the maximum search depth */
- if (depth == depth_max)
- return for_insert ? -EBUSY : -ENOENT;
-
- filter_idx = (filter_idx + incr) & (table->size - 1);
- ++depth;
- }
-}
-
/*
* Construct/deconstruct external filter IDs. At least the RX filter
* IDs must be ordered by matching priority, for RX NFC semantics.
@@ -650,44 +639,111 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
* @spec: Specification for the filter
- * @replace: Flag for whether the specified filter may replace a filter
- * with an identical match expression and equal or lower priority
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
*
* On success, return the filter ID.
* On failure, return a negative error code.
+ *
+ * If an existing filter has equal match values to the new filter
+ * spec, then the new filter might replace it, depending on the
+ * relative priorities. If the existing filter has lower priority, or
+ * if @replace_equal is set and it has equal priority, then it is
+ * replaced. Otherwise the function fails, returning -%EPERM if
+ * the existing filter has higher priority or -%EEXIST if it has
+ * equal priority.
*/
s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
- bool replace)
+ bool replace_equal)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- struct efx_filter_spec *saved_spec;
efx_oword_t filter;
- unsigned int filter_idx, depth = 0;
- u32 key;
+ int rep_index, ins_index;
+ unsigned int depth = 0;
int rc;
if (!table || table->size == 0)
return -EINVAL;
- key = efx_filter_build(&filter, spec);
-
netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_depth=%d", __func__, spec->type,
table->search_depth[spec->type]);
- spin_lock_bh(&state->lock);
+ if (table->id == EFX_FILTER_TABLE_RX_DEF) {
+ /* One filter spec per type */
+ BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
+ EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
+ rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
+ ins_index = rep_index;
- rc = efx_filter_search(table, spec, key, true, &depth);
- if (rc < 0)
- goto out;
- filter_idx = rc;
- BUG_ON(filter_idx >= table->size);
- saved_spec = &table->spec[filter_idx];
-
- if (test_bit(filter_idx, table->used_bitmap)) {
- /* Should we replace the existing filter? */
- if (!replace) {
+ spin_lock_bh(&state->lock);
+ } else {
+ /* Search concurrently for
+ * (1) a filter to be replaced (rep_index): any filter
+ * with the same match values, up to the current
+ * search depth for this type, and
+ * (2) the insertion point (ins_index): (1) or any
+ * free slot before it or up to the maximum search
+ * depth for this priority
+ * We fail if we cannot find (2).
+ *
+ * We can stop once either
+ * (a) we find (1), in which case we have definitely
+ * found (2) as well; or
+ * (b) we have searched exhaustively for (1), and have
+ * either found (2) or searched exhaustively for it
+ */
+ u32 key = efx_filter_build(&filter, spec);
+ unsigned int hash = efx_filter_hash(key);
+ unsigned int incr = efx_filter_increment(key);
+ unsigned int max_rep_depth = table->search_depth[spec->type];
+ unsigned int max_ins_depth =
+ spec->priority <= EFX_FILTER_PRI_HINT ?
+ FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
+ unsigned int i = hash & (table->size - 1);
+
+ ins_index = -1;
+ depth = 1;
+
+ spin_lock_bh(&state->lock);
+
+ for (;;) {
+ if (!test_bit(i, table->used_bitmap)) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_filter_equal(spec, &table->spec[i])) {
+ /* Case (a) */
+ if (ins_index < 0)
+ ins_index = i;
+ rep_index = i;
+ break;
+ }
+
+ if (depth >= max_rep_depth &&
+ (ins_index >= 0 || depth >= max_ins_depth)) {
+ /* Case (b) */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ rep_index = -1;
+ break;
+ }
+
+ i = (i + incr) & (table->size - 1);
+ ++depth;
+ }
+ }
+
+ /* If we found a filter to be replaced, check whether we
+ * should do so
+ */
+ if (rep_index >= 0) {
+ struct efx_filter_spec *saved_spec = &table->spec[rep_index];
+
+ if (spec->priority == saved_spec->priority && !replace_equal) {
rc = -EEXIST;
goto out;
}
@@ -695,11 +751,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
rc = -EPERM;
goto out;
}
- } else {
- __set_bit(filter_idx, table->used_bitmap);
+ }
+
+ /* Insert the filter */
+ if (ins_index != rep_index) {
+ __set_bit(ins_index, table->used_bitmap);
++table->used;
}
- *saved_spec = *spec;
+ table->spec[ins_index] = *spec;
if (table->id == EFX_FILTER_TABLE_RX_DEF) {
efx_filter_push_rx_config(efx);
@@ -713,13 +772,19 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
}
efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
+ table->offset + table->step * ins_index);
+
+ /* If we were able to replace a filter by inserting
+ * at a lower depth, clear the replaced filter
+ */
+ if (ins_index != rep_index && rep_index >= 0)
+ efx_filter_table_clear_entry(efx, table, rep_index);
}
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
- __func__, spec->type, filter_idx, spec->dmaq_id);
- rc = efx_filter_make_id(spec, filter_idx);
+ __func__, spec->type, ins_index, spec->dmaq_id);
+ rc = efx_filter_make_id(spec, ins_index);
out:
spin_unlock_bh(&state->lock);
@@ -1060,6 +1125,50 @@ void efx_remove_filters(struct efx_nic *efx)
kfree(state);
}
+/* Update scatter enable flags for filters pointing to our own RX queues */
+void efx_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = EFX_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ table->spec[filter_idx].dmaq_id >=
+ efx->n_rx_channels)
+ continue;
+
+ if (efx->rx_scatter)
+ table->spec[filter_idx].flags |=
+ EFX_FILTER_FLAG_RX_SCATTER;
+ else
+ table->spec[filter_idx].flags &=
+ ~EFX_FILTER_FLAG_RX_SCATTER;
+
+ if (table_id == EFX_FILTER_TABLE_RX_DEF)
+ /* Pushed by efx_filter_push_rx_config() */
+ continue;
+
+ efx_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_filter_push_rx_config(efx);
+
+ spin_unlock_bh(&state->lock);
+}
+
#ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 0095ce95150b..97dd8f18c001 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -667,7 +667,7 @@ fail:
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities)
{
- uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
+ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
size_t outlen, offset, i;
int port_num = efx_port_num(efx);
int rc;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 9d426d0457bd..c5c9747861ba 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -553,6 +553,7 @@
#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0a90abd2421b..9bd433a095c5 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -69,6 +69,12 @@
#define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+/* Maximum possible MTU the driver supports */
+#define EFX_MAX_MTU (9 * 1024)
+
+/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */
+#define EFX_RX_USR_BUF_SIZE 1824
+
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
@@ -206,25 +212,23 @@ struct efx_tx_queue {
/**
* struct efx_rx_buffer - An Efx RX data buffer
* @dma_addr: DMA base address of the buffer
- * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE).
- * Will be %NULL if the buffer slot is currently free.
- * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
+ * @page: The associated page buffer.
* Will be %NULL if the buffer slot is currently free.
- * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
- * @len: Buffer length, in bytes.
- * @flags: Flags for buffer and packet state.
+ * @page_offset: If pending: offset in @page of DMA base address.
+ * If completed: offset in @page of Ethernet header.
+ * @len: If pending: length for DMA descriptor.
+ * If completed: received length, excluding hash prefix.
+ * @flags: Flags for buffer and packet state. These are only set on the
+ * first buffer of a scattered packet.
*/
struct efx_rx_buffer {
dma_addr_t dma_addr;
- union {
- struct sk_buff *skb;
- struct page *page;
- } u;
+ struct page *page;
u16 page_offset;
u16 len;
u16 flags;
};
-#define EFX_RX_BUF_PAGE 0x0001
+#define EFX_RX_BUF_LAST_IN_PAGE 0x0001
#define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004
@@ -260,14 +264,23 @@ struct efx_rx_page_state {
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
+ * @scatter_n: Number of buffers used by current packet
+ * @page_ring: The ring to store DMA mapped pages for reuse.
+ * @page_add: Counter to calculate the write pointer for the recycle ring.
+ * @page_remove: Counter to calculate the read pointer for the recycle ring.
+ * @page_recycle_count: The number of pages that have been recycled.
+ * @page_recycle_failed: The number of pages that couldn't be recycled because
+ * the kernel still held a reference to them.
+ * @page_recycle_full: The number of pages that were released because the
+ * recycle ring was full.
+ * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
* @max_fill: RX descriptor maximum fill level (<= ring size)
* @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
* (<= @max_fill)
* @min_fill: RX descriptor minimum non-zero fill level.
* This records the minimum fill level observed when a ring
* refill was triggered.
- * @alloc_page_count: RX allocation strategy counter.
- * @alloc_skb_count: RX allocation strategy counter.
+ * @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
*/
struct efx_rx_queue {
@@ -279,15 +292,22 @@ struct efx_rx_queue {
bool enabled;
bool flush_pending;
- int added_count;
- int notified_count;
- int removed_count;
+ unsigned int added_count;
+ unsigned int notified_count;
+ unsigned int removed_count;
+ unsigned int scatter_n;
+ struct page **page_ring;
+ unsigned int page_add;
+ unsigned int page_remove;
+ unsigned int page_recycle_count;
+ unsigned int page_recycle_failed;
+ unsigned int page_recycle_full;
+ unsigned int page_ptr_mask;
unsigned int max_fill;
unsigned int fast_fill_trigger;
unsigned int min_fill;
unsigned int min_overfill;
- unsigned int alloc_page_count;
- unsigned int alloc_skb_count;
+ unsigned int recycle_count;
struct timer_list slow_fill;
unsigned int slow_fill_count;
};
@@ -336,10 +356,6 @@ enum efx_rx_alloc_method {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
- * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
- * and diagnostic counters
- * @rx_alloc_push_pages: RX allocation method currently in use for pushing
- * descriptors
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -347,6 +363,12 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
+ * lack of descriptors
+ * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
+ * __efx_rx_packet(), or zero if there is none
+ * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
+ * by __efx_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
*/
@@ -371,9 +393,6 @@ struct efx_channel {
unsigned int rfs_filters_added;
#endif
- int rx_alloc_level;
- int rx_alloc_push_pages;
-
unsigned n_rx_tobe_disc;
unsigned n_rx_ip_hdr_chksum_err;
unsigned n_rx_tcp_udp_chksum_err;
@@ -381,11 +400,10 @@ struct efx_channel {
unsigned n_rx_frm_trunc;
unsigned n_rx_overlength;
unsigned n_skbuff_leaks;
+ unsigned int n_rx_nodesc_trunc;
- /* Used to pipeline received packets in order to optimise memory
- * access with prefetches.
- */
- struct efx_rx_buffer *rx_pkt;
+ unsigned int rx_pkt_n_frags;
+ unsigned int rx_pkt_index;
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
@@ -410,7 +428,7 @@ struct efx_channel_type {
void (*post_remove)(struct efx_channel *);
void (*get_name)(struct efx_channel *, char *buf, size_t len);
struct efx_channel *(*copy)(const struct efx_channel *);
- void (*receive_skb)(struct efx_channel *, struct sk_buff *);
+ bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
bool keep_eventq;
};
@@ -446,6 +464,7 @@ enum nic_state {
STATE_UNINIT = 0, /* device being probed/removed or is frozen */
STATE_READY = 1, /* hardware ready and netdev registered */
STATE_DISABLED = 2, /* device disabled due to hardware errors */
+ STATE_RECOVERY = 3, /* device recovering from PCI error */
};
/*
@@ -684,10 +703,13 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
- * @rx_buffer_len: RX buffer length
+ * @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
+ * for use in sk_buff::truesize
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
+ * @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer
@@ -800,10 +822,15 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
- unsigned int rx_buffer_len;
+ unsigned int rx_dma_len;
unsigned int rx_buffer_order;
+ unsigned int rx_buffer_truesize;
+ unsigned int rx_page_buf_step;
+ unsigned int rx_bufs_per_page;
+ unsigned int rx_pages_per_batch;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
+ bool rx_scatter;
unsigned int_error_count;
unsigned long int_error_expire;
@@ -934,8 +961,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_hash_size: Size of hash at start of RX buffer
- * @rx_buffer_padding: Size of padding at end of RX buffer
+ * @rx_buffer_hash_size: Size of hash at start of RX packet
+ * @rx_buffer_padding: Size of padding at end of RX packet
+ * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
* @phys_addr_channels: Number of channels with physically addressed
@@ -983,6 +1011,7 @@ struct efx_nic_type {
u64 max_dma_mask;
unsigned int rx_buffer_hash_size;
unsigned int rx_buffer_padding;
+ bool can_rx_scatter;
unsigned int max_interrupt_mode;
unsigned int phys_addr_channels;
unsigned int timer_period_max;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index eaa8e874a3cb..b0503cd8c2a0 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -305,11 +305,11 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len)
{
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, GFP_ATOMIC);
+ &buffer->dma_addr,
+ GFP_ATOMIC | __GFP_ZERO);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
- memset(buffer->addr, 0, len);
return 0;
}
@@ -592,12 +592,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0;
+ bool jumbo_en;
+
+ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
+ * DMA to continue after a PCIe page boundary (and scattering
+ * is not possible). In Falcon B0 and Siena, it enables
+ * scatter.
+ */
+ jumbo_en = !is_b0 || efx->rx_scatter;
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+ rx_queue->scatter_n = 0;
+
/* Pin RX descriptor ring */
efx_init_special_buffer(efx, &rx_queue->rxd);
@@ -614,8 +624,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
- /* For >=B0 this is scatter so disable */
- FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue));
@@ -969,13 +978,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
EFX_RX_PKT_DISCARD : 0;
}
-/* Handle receive events that are not in-order. */
-static void
+/* Handle receive events that are not in-order. Return true if this
+ * can be handled as a partial packet discard, false if it's more
+ * serious.
+ */
+static bool
efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped;
+ if (rx_queue->scatter_n &&
+ index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
+ rx_queue->ptr_mask)) {
+ ++channel->n_rx_nodesc_trunc;
+ return true;
+ }
+
expected = rx_queue->removed_count & rx_queue->ptr_mask;
dropped = (index - expected) & rx_queue->ptr_mask;
netif_info(efx, rx_err, efx->net_dev,
@@ -984,6 +1004,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return false;
}
/* Handle a packet received event
@@ -999,7 +1020,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr;
- bool rx_ev_pkt_ok;
+ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
u16 flags;
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
@@ -1007,21 +1028,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return;
- /* Basic packet information */
- rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
- rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
+ rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
+ rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel);
rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
- expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
- if (unlikely(rx_ev_desc_ptr != expected_ptr))
- efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
+ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
+ rx_queue->ptr_mask);
+
+ /* Check for partial drops and other errors */
+ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
+ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
+ if (rx_ev_desc_ptr != expected_ptr &&
+ !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
+ return;
+
+ /* Discard all pending fragments */
+ if (rx_queue->scatter_n) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ }
+
+ /* Return if there is no new fragment */
+ if (rx_ev_desc_ptr != expected_ptr)
+ return;
+
+ /* Discard new fragment if not SOP */
+ if (!rx_ev_sop) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ 1, 0, EFX_RX_PKT_DISCARD);
+ ++rx_queue->removed_count;
+ return;
+ }
+ }
+
+ ++rx_queue->scatter_n;
+ if (rx_ev_cont)
+ return;
+
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK and packet type is TCP/IP or
@@ -1049,7 +1105,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
channel->irq_mod_score += 2;
/* Handle received packet */
- efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
}
/* If this flush done event corresponds to a &struct efx_tx_queue, then
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3f93624fc273..07f6baa15c0c 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -99,6 +99,9 @@
#define PTP_V2_VERSION_LENGTH 1
#define PTP_V2_VERSION_OFFSET 29
+#define PTP_V2_UUID_LENGTH 8
+#define PTP_V2_UUID_OFFSET 48
+
/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
* the MC only captures the last six bytes of the clock identity. These values
* reflect those, not the ones used in the standard. The standard permits
@@ -429,13 +432,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
unsigned number_readings = (response_length /
MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
unsigned i;
- unsigned min;
- unsigned min_set = 0;
unsigned total;
unsigned ngood = 0;
unsigned last_good = 0;
struct efx_ptp_data *ptp = efx->ptp_data;
- bool min_valid = false;
u32 last_sec;
u32 start_sec;
struct timespec delta;
@@ -443,35 +443,17 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
if (number_readings == 0)
return -EAGAIN;
- /* Find minimum value in this set of results, discarding clearly
- * erroneous results.
+ /* Read the set of results and increment stats for any results that
+ * appera to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
- if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
- if (min_valid) {
- if (ptp->timeset[i].window < min_set)
- min_set = ptp->timeset[i].window;
- } else {
- min_valid = true;
- min_set = ptp->timeset[i].window;
- }
- }
- }
-
- if (min_valid) {
- if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
- min = ptp->base_sync_ns;
- else
- min = min_set;
- } else {
- min = SYNCHRONISATION_GRANULARITY_NS;
}
- /* Discard excessively long synchronise durations. The MC times
- * when it finishes reading the host time so the corrected window
- * time should be fairly constant for a given platform.
+ /* Find the last good host-MC synchronization result. The MC times
+ * when it finishes reading the host time so the corrected window time
+ * should be fairly constant for a given platform.
*/
total = 0;
for (i = 0; i < number_readings; i++)
@@ -489,8 +471,8 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
if (ngood == 0) {
netif_warn(efx, drv, efx->net_dev,
- "PTP no suitable synchronisations %dns %dns\n",
- ptp->base_sync_ns, min_set);
+ "PTP no suitable synchronisations %dns\n",
+ ptp->base_sync_ns);
return -EAGAIN;
}
@@ -1006,43 +988,53 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
* the receive timestamp from the MC - this will probably occur after the
* packet arrival because of the processing in the MC.
*/
-static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
+static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
{
struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
- u8 *data;
+ u8 *match_data_012, *match_data_345;
unsigned int version;
match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
/* Correct version? */
if (ptp->mode == MC_CMD_PTP_MODE_V1) {
- if (skb->len < PTP_V1_MIN_LENGTH) {
- netif_receive_skb(skb);
- return;
+ if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
+ return false;
}
version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
if (version != PTP_VERSION_V1) {
- netif_receive_skb(skb);
- return;
+ return false;
}
+
+ /* PTP V1 uses all six bytes of the UUID to match the packet
+ * to the timestamp
+ */
+ match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
+ match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
} else {
- if (skb->len < PTP_V2_MIN_LENGTH) {
- netif_receive_skb(skb);
- return;
+ if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
+ return false;
}
version = skb->data[PTP_V2_VERSION_OFFSET];
-
- BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
- BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
- BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
- BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
- BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
-
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
- netif_receive_skb(skb);
- return;
+ return false;
+ }
+
+ /* The original V2 implementation uses bytes 2-7 of
+ * the UUID to match the packet to the timestamp. This
+ * discards two of the bytes of the MAC address used
+ * to create the UUID (SF bug 33070). The PTP V2
+ * enhanced mode fixes this issue and uses bytes 0-2
+ * and byte 5-7 of the UUID.
+ */
+ match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
+ if (ptp->mode == MC_CMD_PTP_MODE_V2) {
+ match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
+ } else {
+ match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
+ BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
}
}
@@ -1056,14 +1048,19 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
timestamps = skb_hwtstamps(skb);
memset(timestamps, 0, sizeof(*timestamps));
+ /* We expect the sequence number to be in the same position in
+ * the packet for PTP V1 and V2
+ */
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
+
/* Extract UUID/Sequence information */
- data = skb->data + PTP_V1_UUID_OFFSET;
- match->words[0] = (data[0] |
- (data[1] << 8) |
- (data[2] << 16) |
- (data[3] << 24));
- match->words[1] = (data[4] |
- (data[5] << 8) |
+ match->words[0] = (match_data_012[0] |
+ (match_data_012[1] << 8) |
+ (match_data_012[2] << 16) |
+ (match_data_345[0] << 24));
+ match->words[1] = (match_data_345[1] |
+ (match_data_345[2] << 8) |
(skb->data[PTP_V1_SEQUENCE_OFFSET +
PTP_V1_SEQUENCE_LENGTH - 1] <<
16));
@@ -1073,6 +1070,8 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
skb_queue_tail(&ptp->rxq, skb);
queue_work(ptp->workwq, &ptp->work);
+
+ return true;
}
/* Transmit a PTP packet. This has to be transmitted by the MC
@@ -1167,7 +1166,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
* timestamped
*/
init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V2;
+ new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
enable_wanted = true;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -1186,7 +1185,14 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
if (init->tx_type != HWTSTAMP_TX_OFF)
enable_wanted = true;
+ /* Old versions of the firmware do not support the improved
+ * UUID filtering option (SF bug 33070). If the firmware does
+ * not accept the enhanced mode, fall back to the standard PTP
+ * v2 UUID filtering.
+ */
rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
+ if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
+ rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index bb579a6128c8..e73e30bac10e 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -16,6 +16,7 @@
#include <linux/udp.h>
#include <linux/prefetch.h>
#include <linux/moduleparam.h>
+#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
@@ -24,85 +25,39 @@
#include "selftest.h"
#include "workarounds.h"
-/* Number of RX descriptors pushed at once. */
-#define EFX_RX_BATCH 8
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
-/* Maximum size of a buffer sharing a page */
-#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
+#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 64u
-/*
- * rx_alloc_method - RX buffer allocation method
- *
- * This driver supports two methods for allocating and using RX buffers:
- * each RX buffer may be backed by an skb or by an order-n page.
- *
- * When GRO is in use then the second method has a lower overhead,
- * since we don't have to allocate then free skbs on reassembled frames.
- *
- * Values:
- * - RX_ALLOC_METHOD_AUTO = 0
- * - RX_ALLOC_METHOD_SKB = 1
- * - RX_ALLOC_METHOD_PAGE = 2
- *
- * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
- * controlled by the parameters below.
- *
- * - Since pushing and popping descriptors are separated by the rx_queue
- * size, so the watermarks should be ~rxd_size.
- * - The performance win by using page-based allocation for GRO is less
- * than the performance hit of using page-based allocation of non-GRO,
- * so the watermarks should reflect this.
- *
- * Per channel we maintain a single variable, updated by each channel:
- *
- * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
- * RX_ALLOC_FACTOR_SKB)
- * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
- * limits the hysteresis), and update the allocation strategy:
- *
- * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
- * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
- */
-static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
-
-#define RX_ALLOC_LEVEL_GRO 0x2000
-#define RX_ALLOC_LEVEL_MAX 0x3000
-#define RX_ALLOC_FACTOR_GRO 1
-#define RX_ALLOC_FACTOR_SKB (-2)
-
/* This is the percentage fill level below which new RX descriptors
* will be added to the RX descriptor ring.
*/
static unsigned int rx_refill_threshold;
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
/*
* RX maximum head room required.
*
- * This must be at least 1 to prevent overflow and at least 2 to allow
- * pipelined receives.
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
*/
-#define EFX_RXD_HEAD_ROOM 2
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
-/* Offset of ethernet header within page */
-static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
- struct efx_rx_buffer *buf)
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
- return buf->page_offset + efx->type->rx_buffer_hash_size;
-}
-static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
-{
- return PAGE_SIZE << efx->rx_buffer_order;
-}
-
-static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
-{
- if (buf->flags & EFX_RX_BUF_PAGE)
- return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
- else
- return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
+ return page_address(buf->page) + buf->page_offset;
}
static inline u32 efx_rx_buf_hash(const u8 *eh)
@@ -119,66 +74,81 @@ static inline u32 efx_rx_buf_hash(const u8 *eh)
#endif
}
-/**
- * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
- *
- * @rx_queue: Efx RX queue
- *
- * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
- * struct efx_rx_buffer for each one. Return a negative error code or 0
- * on success. May fail having only inserted fewer than EFX_RX_BATCH
- * buffers.
- */
-static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return efx_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
+static inline void efx_sync_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
+ DMA_FROM_DEVICE);
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
+ L1_CACHE_BYTES);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- struct net_device *net_dev = efx->net_dev;
- struct efx_rx_buffer *rx_buf;
- struct sk_buff *skb;
- int skb_len = efx->rx_buffer_len;
- unsigned index, count;
+ struct page *page;
+ struct efx_rx_page_state *state;
+ unsigned index;
- for (count = 0; count < EFX_RX_BATCH; ++count) {
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
-
- rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
- if (unlikely(!skb))
- return -ENOMEM;
-
- /* Adjust the SKB for padding */
- skb_reserve(skb, NET_IP_ALIGN);
- rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->flags = 0;
-
- rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
- skb->data, rx_buf->len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
- rx_buf->dma_addr))) {
- dev_kfree_skb_any(skb);
- rx_buf->u.skb = NULL;
- return -EIO;
- }
+ index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+ page = rx_queue->page_ring[index];
+ if (page == NULL)
+ return NULL;
+
+ rx_queue->page_ring[index] = NULL;
+ /* page_remove cannot exceed page_add. */
+ if (rx_queue->page_remove != rx_queue->page_add)
+ ++rx_queue->page_remove;
- ++rx_queue->added_count;
- ++rx_queue->alloc_skb_count;
+ /* If page_count is 1 then we hold the only reference to this page. */
+ if (page_count(page) == 1) {
+ ++rx_queue->page_recycle_count;
+ return page;
+ } else {
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ ++rx_queue->page_recycle_failed;
}
- return 0;
+ return NULL;
}
/**
- * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
+ * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
*
- * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
- * and populates struct efx_rx_buffers for each one. Return a negative error
- * code or 0 on success. If a single page can be split between two buffers,
- * then the page will either be inserted fully, or not at at all.
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
*/
-static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
@@ -188,150 +158,140 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
dma_addr_t dma_addr;
unsigned index, count;
- /* We can split a page between two buffers */
- BUILD_BUG_ON(EFX_RX_BATCH & 1);
-
- for (count = 0; count < EFX_RX_BATCH; ++count) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
- efx->rx_buffer_order);
- if (unlikely(page == NULL))
- return -ENOMEM;
- dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
- efx_rx_buf_size(efx),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
- __free_pages(page, efx->rx_buffer_order);
- return -EIO;
+ count = 0;
+ do {
+ page = efx_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
}
- state = page_address(page);
- state->refcnt = 0;
- state->dma_addr = dma_addr;
dma_addr += sizeof(struct efx_rx_page_state);
page_offset = sizeof(struct efx_rx_page_state);
- split:
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
- rx_buf->u.page = page;
- rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
- rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
- rx_buf->flags = EFX_RX_BUF_PAGE;
- ++rx_queue->added_count;
- ++rx_queue->alloc_page_count;
- ++state->refcnt;
-
- if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
- /* Use the second half of the page */
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
get_page(page);
- dma_addr += (PAGE_SIZE >> 1);
- page_offset += (PAGE_SIZE >> 1);
- ++count;
- goto split;
- }
- }
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
return 0;
}
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf,
- unsigned int used_len)
+ struct efx_rx_buffer *rx_buf)
{
- if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
- struct efx_rx_page_state *state;
-
- state = page_address(rx_buf->u.page);
- if (--state->refcnt == 0) {
- dma_unmap_page(&efx->pci_dev->dev,
- state->dma_addr,
- efx_rx_buf_size(efx),
- DMA_FROM_DEVICE);
- } else if (used_len) {
- dma_sync_single_for_cpu(&efx->pci_dev->dev,
- rx_buf->dma_addr, used_len,
- DMA_FROM_DEVICE);
- }
- } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
- rx_buf->len, DMA_FROM_DEVICE);
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct efx_rx_page_state *state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
}
}
-static void efx_free_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
{
- if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
- __free_pages(rx_buf->u.page, efx->rx_buffer_order);
- rx_buf->u.page = NULL;
- } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- dev_kfree_skb_any(rx_buf->u.skb);
- rx_buf->u.skb = NULL;
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
}
}
-static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
{
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
- efx_free_rx_buffer(rx_queue->efx, rx_buf);
-}
+ struct page *page = rx_buf->page;
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned index;
-/* Attempt to resurrect the other receive buffer that used to share this page,
- * which had previously been passed up to the kernel and freed. */
-static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
-{
- struct efx_rx_page_state *state = page_address(rx_buf->u.page);
- struct efx_rx_buffer *new_buf;
- unsigned fill_level, index;
-
- /* +1 because efx_rx_packet() incremented removed_count. +1 because
- * we'd like to insert an additional descriptor whilst leaving
- * EFX_RXD_HEAD_ROOM for the non-recycle path */
- fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
- if (unlikely(fill_level > rx_queue->max_fill)) {
- /* We could place "state" on a list, and drain the list in
- * efx_fast_push_rx_descriptors(). For now, this will do. */
+ /* Only recycle the page after processing the final buffer. */
+ if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
return;
- }
- ++state->refcnt;
- get_page(rx_buf->u.page);
+ index = rx_queue->page_add & rx_queue->page_ptr_mask;
+ if (rx_queue->page_ring[index] == NULL) {
+ unsigned read_index = rx_queue->page_remove &
+ rx_queue->page_ptr_mask;
- index = rx_queue->added_count & rx_queue->ptr_mask;
- new_buf = efx_rx_buffer(rx_queue, index);
- new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
- new_buf->u.page = rx_buf->u.page;
- new_buf->len = rx_buf->len;
- new_buf->flags = EFX_RX_BUF_PAGE;
- ++rx_queue->added_count;
+ /* The next slot in the recycle ring is available, but
+ * increment page_remove if the read pointer currently
+ * points here.
+ */
+ if (read_index == index)
+ ++rx_queue->page_remove;
+ rx_queue->page_ring[index] = page;
+ ++rx_queue->page_add;
+ return;
+ }
+ ++rx_queue->page_recycle_full;
+ efx_unmap_rx_buffer(efx, rx_buf);
+ put_page(rx_buf->page);
}
-/* Recycle the given rx buffer directly back into the rx_queue. There is
- * always room to add this buffer, because we've just popped a buffer. */
-static void efx_recycle_rx_buffer(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
- struct efx_nic *efx = channel->efx;
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- struct efx_rx_buffer *new_buf;
- unsigned index;
-
- rx_buf->flags &= EFX_RX_BUF_PAGE;
-
- if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
- efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
- page_count(rx_buf->u.page) == 1)
- efx_resurrect_rx_buffer(rx_queue, rx_buf);
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_free_rx_buffer(rx_buf);
+ }
+ rx_buf->page = NULL;
+}
- index = rx_queue->added_count & rx_queue->ptr_mask;
- new_buf = efx_rx_buffer(rx_queue, index);
+/* Recycle the pages that are used by buffers that have just been received. */
+static void efx_recycle_rx_buffers(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- memcpy(new_buf, rx_buf, sizeof(*new_buf));
- rx_buf->u.page = NULL;
- ++rx_queue->added_count;
+ do {
+ efx_recycle_rx_page(channel, rx_buf);
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
}
/**
@@ -348,8 +308,8 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
*/
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- unsigned fill_level;
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
int space, rc = 0;
/* Calculate current fill level, and exit if we don't need to fill */
@@ -364,28 +324,26 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
rx_queue->min_fill = fill_level;
}
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
space = rx_queue->max_fill - fill_level;
- EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
+ EFX_BUG_ON_PARANOID(space < batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
- " level %d to level %d using %s allocation\n",
+ " level %d to level %d\n",
efx_rx_queue_index(rx_queue), fill_level,
- rx_queue->max_fill,
- channel->rx_alloc_push_pages ? "page" : "skb");
+ rx_queue->max_fill);
+
do {
- if (channel->rx_alloc_push_pages)
- rc = efx_init_rx_buffers_page(rx_queue);
- else
- rc = efx_init_rx_buffers_skb(rx_queue);
+ rc = efx_init_rx_buffers(rx_queue);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
efx_schedule_slow_fill(rx_queue);
goto out;
}
- } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
+ } while ((space -= batch_size) >= batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filled descriptor ring "
@@ -408,7 +366,7 @@ void efx_rx_slow_fill(unsigned long context)
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
- int len, bool *leak_packet)
+ int len)
{
struct efx_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -428,11 +386,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
efx_rx_queue_index(rx_queue), len, max_len,
efx->type->rx_buffer_padding);
- /* If this buffer was skb-allocated, then the meta
- * data at the end of the skb will be trashed. So
- * we have no choice but to leak the fragment.
- */
- *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
@@ -448,212 +401,238 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
/* Pass a received packet up through GRO. GRO can handle pages
* regardless of checksum state and skbs with a good checksum.
*/
-static void efx_rx_packet_gro(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- const u8 *eh)
+static void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
- if (rx_buf->flags & EFX_RX_BUF_PAGE) {
- struct efx_nic *efx = channel->efx;
- struct page *page = rx_buf->u.page;
- struct sk_buff *skb;
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ while (n_frags--) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+ return;
+ }
- rx_buf->u.page = NULL;
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb->rxhash = efx_rx_buf_hash(eh);
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
- skb = napi_get_frags(napi);
- if (!skb) {
- put_page(page);
- return;
- }
+ skb->data_len = skb->len;
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ gro_result = napi_gro_frags(napi);
+ if (gro_result != GRO_DROP)
+ channel->irq_mod_score += 2;
+}
- if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
+/* Allocate and construct an SKB around page fragments */
+static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags,
+ u8 *eh, int hdr_len)
+{
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
- skb_fill_page_desc(skb, 0, page,
- efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
+ /* Allocate an SKB to store the headers */
+ skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+ if (unlikely(skb == NULL))
+ return NULL;
- skb->len = rx_buf->len;
- skb->data_len = rx_buf->len;
- skb->truesize += rx_buf->len;
- skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
- CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+ EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
+ skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
+ memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
- gro_result = napi_gro_frags(napi);
- } else {
- struct sk_buff *skb = rx_buf->u.skb;
+ /* Append the remaining page(s) onto the frag list */
+ if (rx_buf->len > hdr_len) {
+ rx_buf->page_offset += hdr_len;
+ rx_buf->len -= hdr_len;
- EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
- rx_buf->u.skb = NULL;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ skb->data_len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
- gro_result = napi_gro_receive(napi, skb);
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+ } else {
+ __free_pages(rx_buf->page, efx->rx_buffer_order);
+ rx_buf->page = NULL;
+ n_frags = 0;
}
- if (gro_result == GRO_NORMAL) {
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
- } else if (gro_result != GRO_DROP) {
- channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
- channel->irq_mod_score += 2;
- }
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+ return skb;
}
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int len, u16 flags)
+ unsigned int n_frags, unsigned int len, u16 flags)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf;
- bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->flags |= flags;
- /* This allows the refill path to post another buffer.
- * EFX_RXD_HEAD_ROOM ensures that the slot we are using
- * isn't overwritten yet.
- */
- rx_queue->removed_count++;
-
- /* Validate the length encoded in the event vs the descriptor pushed */
- efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
+ /* Validate the number of fragments and completed length */
+ if (n_frags == 1) {
+ efx_rx_packet__check_len(rx_queue, rx_buf, len);
+ } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
+ unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(!efx->rx_scatter)) {
+ /* If this isn't an explicit discard request, either
+ * the hardware or the driver is broken.
+ */
+ WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
+ rx_buf->flags |= EFX_RX_PKT_DISCARD;
+ }
netif_vdbg(efx, rx_status, efx->net_dev,
- "RX queue %d received id %x at %llx+%x %s%s\n",
+ "RX queue %d received ids %x-%x len %d %s%s\n",
efx_rx_queue_index(rx_queue), index,
- (unsigned long long)rx_buf->dma_addr, len,
+ (index + n_frags - 1) & rx_queue->ptr_mask, len,
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
- /* Discard packet, if instructed to do so */
+ /* Discard packet, if instructed to do so. Process the
+ * previous receive first.
+ */
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
- if (unlikely(leak_packet))
- channel->n_skbuff_leaks++;
- else
- efx_recycle_rx_buffer(channel, rx_buf);
-
- /* Don't hold off the previous receive */
- rx_buf = NULL;
- goto out;
+ efx_rx_flush_packet(channel);
+ put_page(rx_buf->page);
+ efx_recycle_rx_buffers(channel, rx_buf, n_frags);
+ return;
}
- /* Release and/or sync DMA mapping - assumes all RX buffers
- * consumed in-order per RX queue
+ if (n_frags == 1)
+ rx_buf->len = len;
+
+ /* Release and/or sync the DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue.
*/
- efx_unmap_rx_buffer(efx, rx_buf, len);
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
- prefetch(efx_rx_buf_eh(efx, rx_buf));
+ prefetch(efx_rx_buf_va(rx_buf));
+
+ rx_buf->page_offset += efx->type->rx_buffer_hash_size;
+ rx_buf->len -= efx->type->rx_buffer_hash_size;
+
+ if (n_frags > 1) {
+ /* Release/sync DMA mapping for additional fragments.
+ * Fix length for last fragment.
+ */
+ unsigned int tail_frags = n_frags - 1;
+
+ for (;;) {
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ if (--tail_frags == 0)
+ break;
+ efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
+ }
+ rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+ }
+
+ /* All fragments have been DMA-synced, so recycle buffers and pages. */
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_recycle_rx_buffers(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
- rx_buf->len = len - efx->type->rx_buffer_hash_size;
-out:
- if (channel->rx_pkt)
- __efx_rx_packet(channel, channel->rx_pkt);
- channel->rx_pkt = rx_buf;
+ efx_rx_flush_packet(channel);
+ channel->rx_pkt_n_frags = n_frags;
+ channel->rx_pkt_index = index;
}
-static void efx_rx_deliver(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
+static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
{
struct sk_buff *skb;
+ u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
- /* We now own the SKB */
- skb = rx_buf->u.skb;
- rx_buf->u.skb = NULL;
+ skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
+ if (unlikely(skb == NULL)) {
+ efx_free_rx_buffer(rx_buf);
+ return;
+ }
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
/* Set the SKB flags */
skb_checksum_none_assert(skb);
- /* Record the rx_queue */
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
-
- /* Pass the packet up */
if (channel->type->receive_skb)
- channel->type->receive_skb(channel, skb);
- else
- netif_receive_skb(skb);
+ if (channel->type->receive_skb(channel, skb))
+ return;
- /* Update allocation strategy method */
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+ /* Pass the packet up */
+ netif_receive_skb(skb);
}
/* Handle a received packet. Second half: Touches packet payload. */
-void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
+void __efx_rx_packet(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- u8 *eh = efx_rx_buf_eh(efx, rx_buf);
+ struct efx_rx_buffer *rx_buf =
+ efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ u8 *eh = efx_rx_buf_va(rx_buf);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
efx_loopback_rx_packet(efx, eh, rx_buf->len);
- efx_free_rx_buffer(efx, rx_buf);
- return;
- }
-
- if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
- struct sk_buff *skb = rx_buf->u.skb;
-
- prefetch(skb_shinfo(skb));
-
- skb_reserve(skb, efx->type->rx_buffer_hash_size);
- skb_put(skb, rx_buf->len);
-
- if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
-
- /* Move past the ethernet header. rx_buf->data still points
- * at the ethernet header */
- skb->protocol = eth_type_trans(skb, efx->net_dev);
-
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
+ efx_free_rx_buffer(rx_buf);
+ goto out;
}
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
- if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
- !channel->type->receive_skb)
- efx_rx_packet_gro(channel, rx_buf, eh);
+ if (!channel->type->receive_skb)
+ efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
else
- efx_rx_deliver(channel, rx_buf);
-}
-
-void efx_rx_strategy(struct efx_channel *channel)
-{
- enum efx_rx_alloc_method method = rx_alloc_method;
-
- if (channel->type->receive_skb) {
- channel->rx_alloc_push_pages = false;
- return;
- }
-
- /* Only makes sense to use page based allocation if GRO is enabled */
- if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
- method = RX_ALLOC_METHOD_SKB;
- } else if (method == RX_ALLOC_METHOD_AUTO) {
- /* Constrain the rx_alloc_level */
- if (channel->rx_alloc_level < 0)
- channel->rx_alloc_level = 0;
- else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
- channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
-
- /* Decide on the allocation method */
- method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
- RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
- }
-
- /* Push the option */
- channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
+ efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
+out:
+ channel->rx_pkt_n_frags = 0;
}
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
@@ -683,9 +662,32 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
}
+
return rc;
}
+static void efx_init_rx_recycle_ring(struct efx_nic *efx,
+ struct efx_rx_queue *rx_queue)
+{
+ unsigned int bufs_in_recycle_ring, page_ring_size;
+
+ /* Set the RX recycle ring size */
+#ifdef CONFIG_PPC64
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+#else
+ if (efx->pci_dev->dev.iommu_group)
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+ else
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
+#endif /* CONFIG_PPC64 */
+
+ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+ efx->rx_bufs_per_page);
+ rx_queue->page_ring = kcalloc(page_ring_size,
+ sizeof(*rx_queue->page_ring), GFP_KERNEL);
+ rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
@@ -699,10 +701,18 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->notified_count = 0;
rx_queue->removed_count = 0;
rx_queue->min_fill = -1U;
+ efx_init_rx_recycle_ring(efx, rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
/* Initialise limit fields */
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
- max_trigger = max_fill - EFX_RX_BATCH;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
if (rx_refill_threshold != 0) {
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
if (trigger > max_trigger)
@@ -722,6 +732,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
int i;
+ struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
@@ -733,13 +744,32 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
- /* Release RX buffers NB start at index 0 not current HW ptr */
+ /* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
- for (i = 0; i <= rx_queue->ptr_mask; i++) {
- rx_buf = efx_rx_buffer(rx_queue, i);
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned index = i & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
efx_fini_rx_buffer(rx_queue, rx_buf);
}
}
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct efx_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -754,9 +784,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
}
-module_param(rx_alloc_method, int, 0644);
-MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
-
module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index ba40f67e4f05..51669244d154 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -202,7 +202,7 @@ out:
static enum reset_type siena_map_reset_reason(enum reset_type reason)
{
- return RESET_TYPE_ALL;
+ return RESET_TYPE_RECOVER_OR_ALL;
}
static int siena_map_reset_flags(u32 *flags)
@@ -245,6 +245,22 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
return efx_mcdi_reset_port(efx);
}
+#ifdef CONFIG_EEH
+/* When a PCI device is isolated from the bus, a subsequent MMIO read is
+ * required for the kernel EEH mechanisms to notice. As the Solarflare driver
+ * was written to minimise MMIO read (for latency) then a periodic call to check
+ * the EEH status of the device is required so that device recovery can happen
+ * in a timely fashion.
+ */
+static void siena_monitor(struct efx_nic *efx)
+{
+ struct eeh_dev *eehdev =
+ of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
+
+ eeh_dev_check_failure(eehdev);
+}
+#endif
+
static int siena_probe_nvconfig(struct efx_nic *efx)
{
u32 caps = 0;
@@ -398,6 +414,8 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE,
+ EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
/* Set hash key for IPv4 */
@@ -665,7 +683,11 @@ const struct efx_nic_type siena_a0_nic_type = {
.init = siena_init_nic,
.dimension_resources = siena_dimension_resources,
.fini = efx_port_dummy_op_void,
+#ifdef CONFIG_EEH
+ .monitor = siena_monitor,
+#else
.monitor = NULL,
+#endif
.map_reset_reason = siena_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
.reset = siena_reset_hw,
@@ -698,6 +720,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
+ .can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 79ad9c94a21b..4bdbaad9932d 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -213,10 +213,11 @@ static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
- &priv->tx_ring_dma, GFP_ATOMIC);
+ &priv->tx_ring_dma,
+ GFP_ATOMIC | __GFP_ZERO);
if (!priv->tx_ring)
return -ENOMEM;
- memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE);
+
priv->tx_count = priv->tx_read = priv->tx_write = 0;
mace->eth.tx_ring_base = priv->tx_ring_dma;
/* Now init skb save area */
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index efca14eaefa9..eb4aea3fe793 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1187,8 +1187,14 @@ sis900_init_rx_ring(struct net_device *net_dev)
}
sis_priv->rx_skbuff[i] = skb;
sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
- sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
- skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
+ skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
+ sis_priv->rx_ring[i].bufptr))) {
+ dev_kfree_skb(skb);
+ sis_priv->rx_skbuff[i] = NULL;
+ break;
+ }
}
sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
@@ -1621,6 +1627,14 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
/* set the transmit buffer descriptor and enable Transmit State Machine */
sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
skb->data, skb->len, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
+ sis_priv->tx_ring[entry].bufptr))) {
+ dev_kfree_skb(skb);
+ sis_priv->tx_skbuff[entry] = NULL;
+ net_dev->stats.tx_dropped++;
+ spin_unlock_irqrestore(&sis_priv->lock, flags);
+ return NETDEV_TX_OK;
+ }
sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
sw32(cr, TxENA | sr32(cr));
@@ -1824,9 +1838,15 @@ static int sis900_rx(struct net_device *net_dev)
refill_rx_ring:
sis_priv->rx_skbuff[entry] = skb;
sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
- sis_priv->rx_ring[entry].bufptr =
+ sis_priv->rx_ring[entry].bufptr =
pci_map_single(sis_priv->pci_dev, skb->data,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
+ sis_priv->rx_ring[entry].bufptr))) {
+ dev_kfree_skb_irq(skb);
+ sis_priv->rx_skbuff[entry] = NULL;
+ break;
+ }
}
sis_priv->cur_rx++;
entry = sis_priv->cur_rx % NUM_RX_DESC;
@@ -1841,23 +1861,26 @@ refill_rx_ring:
entry = sis_priv->dirty_rx % NUM_RX_DESC;
if (sis_priv->rx_skbuff[entry] == NULL) {
- if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
+ skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE);
+ if (skb == NULL) {
/* not enough memory for skbuff, this makes a
* "hole" on the buffer ring, it is not clear
* how the hardware will react to this kind
* of degenerated buffer */
- if (netif_msg_rx_err(sis_priv))
- printk(KERN_INFO "%s: Memory squeeze, "
- "deferring packet.\n",
- net_dev->name);
net_dev->stats.rx_dropped++;
break;
}
sis_priv->rx_skbuff[entry] = skb;
sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
- sis_priv->rx_ring[entry].bufptr =
+ sis_priv->rx_ring[entry].bufptr =
pci_map_single(sis_priv->pci_dev, skb->data,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
+ sis_priv->rx_ring[entry].bufptr))) {
+ dev_kfree_skb_irq(skb);
+ sis_priv->rx_skbuff[entry] = NULL;
+ break;
+ }
}
}
/* re-enable the potentially idle receive state matchine */
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 5a689af516e9..bb4c1674ff99 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_SMSC
bool "SMC (SMSC)/Western Digital devices"
default y
- depends on ARM || ISA || MAC || ARM || MIPS || M32R || SUPERH || \
+ depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \
BLACKFIN || MN10300 || COLDFIRE || PCI || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
@@ -40,7 +40,7 @@ config SMC91X
select NET_CORE
select MII
depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \
- MN10300 || COLDFIRE)
+ MN10300 || COLDFIRE || ARM64)
---help---
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 50823da9dc1e..e85c2e7e8246 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1223,9 +1223,7 @@ static void smc_rcv(struct net_device *dev)
dev->stats.multicast++;
skb = netdev_alloc_skb(dev, packet_length + 5);
-
if ( skb == NULL ) {
- printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
dev->stats.rx_dropped++;
goto done;
}
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 04393b5fef71..656d2e2ebfc9 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -2054,16 +2054,4 @@ static struct pcmcia_driver smc91c92_cs_driver = {
.suspend = smc91c92_suspend,
.resume = smc91c92_resume,
};
-
-static int __init init_smc91c92_cs(void)
-{
- return pcmcia_register_driver(&smc91c92_cs_driver);
-}
-
-static void __exit exit_smc91c92_cs(void)
-{
- pcmcia_unregister_driver(&smc91c92_cs_driver);
-}
-
-module_init(init_smc91c92_cs);
-module_exit(exit_smc91c92_cs);
+module_pcmcia_driver(smc91c92_cs_driver);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 591650a8de38..dfbf978315df 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -465,8 +465,6 @@ static inline void smc_rcv(struct net_device *dev)
*/
skb = netdev_alloc_skb(dev, packet_len);
if (unlikely(skb == NULL)) {
- printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
- dev->name);
SMC_WAIT_MMU_BUSY(lp);
SMC_SET_MMU_CMD(lp, MC_RELEASE);
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index da5cc9a3b34c..48e2b99bec51 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2115,7 +2115,7 @@ static int smsc911x_init(struct net_device *dev)
spin_lock_init(&pdata->dev_lock);
spin_lock_init(&pdata->mac_lock);
- if (pdata->ioaddr == 0) {
+ if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
return -ENODEV;
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index d457fa2d7509..ffa5c4ad1210 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -848,10 +848,8 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
BUG_ON(pd->rx_buffers[index].skb);
BUG_ON(pd->rx_buffers[index].mapping);
- if (unlikely(!skb)) {
- smsc_warn(RX_ERR, "Failed to allocate new skb!");
+ if (unlikely(!skb))
return -ENOMEM;
- }
mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index c0ea838c78d1..f695a50bac47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -5,6 +5,7 @@ config STMMAC_ETH
select MII
select PHYLIB
select CRC32
+ select PTP_1588_CLOCK
---help---
This is the driver for the Ethernet IPs are built around a
Synopsys IP Core and only tested on the STMicroelectronics
@@ -54,22 +55,4 @@ config STMMAC_DA
By default, the DMA arbitration scheme is based on Round-robin
(rx:tx priority is 1:1).
-choice
- prompt "Select the DMA TX/RX descriptor operating modes"
- depends on STMMAC_ETH
- ---help---
- This driver supports DMA descriptor to operate both in dual buffer
- (RING) and linked-list(CHAINED) mode. In RING mode each descriptor
- points to two data buffer pointers whereas in CHAINED mode they
- points to only one data buffer pointer.
-
-config STMMAC_RING
- bool "Enable Descriptor Ring Mode"
-
-config STMMAC_CHAINED
- bool "Enable Descriptor Chained Mode"
-
-endchoice
-
-
endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c8e8ea60ac19..356a9dd32be7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,9 +1,7 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
-stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
-stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
-stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
- dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
+ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
- mmc_core.o $(stmmac-y)
+ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 0668659803ed..d234ab540b29 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -28,9 +28,9 @@
#include "stmmac.h"
-unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
- struct stmmac_priv *priv = (struct stmmac_priv *) p;
+ struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
unsigned int entry = priv->cur_tx % txsize;
struct dma_desc *desc = priv->dma_tx + entry;
@@ -47,7 +47,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
- priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) {
entry = (++priv->cur_tx) % txsize;
@@ -57,8 +58,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i),
bmax, DMA_TO_DEVICE);
- priv->hw->desc->prepare_tx_desc(desc, 0, bmax,
- csum);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
+ STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
len -= bmax;
@@ -67,8 +69,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i), len,
DMA_TO_DEVICE);
- priv->hw->desc->prepare_tx_desc(desc, 0, len,
- csum);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+ STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
len = 0;
@@ -89,49 +92,70 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
return ret;
}
-static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
-{
-}
-
-static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
-{
-}
-
-static void stmmac_clean_desc3(struct dma_desc *p)
-{
-}
-
-static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
- unsigned int size)
+static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
+ unsigned int size, unsigned int extend_desc)
{
/*
* In chained mode the des3 points to the next element in the ring.
* The latest element has to point to the head.
*/
int i;
- struct dma_desc *p = des;
dma_addr_t dma_phy = phy_addr;
- for (i = 0; i < (size - 1); i++) {
- dma_phy += sizeof(struct dma_desc);
- p->des3 = (unsigned int)dma_phy;
- p++;
+ if (extend_desc) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)des;
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_extended_desc);
+ p->basic.des3 = (unsigned int)dma_phy;
+ p++;
+ }
+ p->basic.des3 = (unsigned int)phy_addr;
+
+ } else {
+ struct dma_desc *p = (struct dma_desc *)des;
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_desc);
+ p->des3 = (unsigned int)dma_phy;
+ p++;
+ }
+ p->des3 = (unsigned int)phy_addr;
}
- p->des3 = (unsigned int)phy_addr;
}
-static int stmmac_set_16kib_bfsize(int mtu)
+static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+
+ if (priv->hwts_rx_en && !priv->extend_desc)
+ /* NOTE: Device will overwrite des3 with timestamp value if
+ * 1588-2002 time stamping is enabled, hence reinitialize it
+ * to keep explicit chaining in the descriptor.
+ */
+ p->des3 = (unsigned int)(priv->dma_rx_phy +
+ (((priv->dirty_rx) + 1) %
+ priv->dma_rx_size) *
+ sizeof(struct dma_desc));
+}
+
+static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
- /* Not supported */
- return 0;
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+
+ if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc)
+ /* NOTE: Device will overwrite des3 with timestamp value if
+ * 1588-2002 time stamping is enabled, hence reinitialize it
+ * to keep explicit chaining in the descriptor.
+ */
+ p->des3 = (unsigned int)(priv->dma_tx_phy +
+ (((priv->dirty_tx + 1) %
+ priv->dma_tx_size) *
+ sizeof(struct dma_desc)));
}
-const struct stmmac_ring_mode_ops ring_mode_ops = {
+const struct stmmac_chain_mode_ops chain_mode_ops = {
+ .init = stmmac_init_dma_chain,
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
- .init_desc3 = stmmac_init_desc3,
- .init_dma_chain = stmmac_init_dma_chain,
.clean_desc3 = stmmac_clean_desc3,
- .set_16kib_bfsize = stmmac_set_16kib_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 186d14806122..7788fbe44f0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -117,6 +117,36 @@ struct stmmac_extra_stats {
unsigned long irq_rx_path_in_lpi_mode_n;
unsigned long irq_rx_path_exit_lpi_mode_n;
unsigned long phy_eee_wakeup_error_n;
+ /* Extended RDES status */
+ unsigned long ip_hdr_err;
+ unsigned long ip_payload_err;
+ unsigned long ip_csum_bypassed;
+ unsigned long ipv4_pkt_rcvd;
+ unsigned long ipv6_pkt_rcvd;
+ unsigned long rx_msg_type_ext_no_ptp;
+ unsigned long rx_msg_type_sync;
+ unsigned long rx_msg_type_follow_up;
+ unsigned long rx_msg_type_delay_req;
+ unsigned long rx_msg_type_delay_resp;
+ unsigned long rx_msg_type_pdelay_req;
+ unsigned long rx_msg_type_pdelay_resp;
+ unsigned long rx_msg_type_pdelay_follow_up;
+ unsigned long ptp_frame_type;
+ unsigned long ptp_ver;
+ unsigned long timestamp_dropped;
+ unsigned long av_pkt_rcvd;
+ unsigned long av_tagged_pkt_rcvd;
+ unsigned long vlan_tag_priority_val;
+ unsigned long l3_filter_match;
+ unsigned long l4_filter_match;
+ unsigned long l3_l4_filter_no_match;
+ /* PCS */
+ unsigned long irq_pcs_ane_n;
+ unsigned long irq_pcs_link_n;
+ unsigned long irq_rgmii_n;
+ unsigned long pcs_link;
+ unsigned long pcs_duplex;
+ unsigned long pcs_speed;
};
/* CSR Frequency Access Defines*/
@@ -138,37 +168,43 @@ struct stmmac_extra_stats {
#define FLOW_TX 2
#define FLOW_AUTO (FLOW_TX | FLOW_RX)
-#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+/* PCS defines */
+#define STMMAC_PCS_RGMII (1 << 0)
+#define STMMAC_PCS_SGMII (1 << 1)
+#define STMMAC_PCS_TBI (1 << 2)
+#define STMMAC_PCS_RTBI (1 << 3)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
/* DAM HW feature register fields */
-#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */
-#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */
-#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */
-#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */
-#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */
-#define DMA_HW_FEAT_ADDMACADRSEL 0x00000020 /* Multiple MAC Addr Reg */
-#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */
-#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */
-#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */
-#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */
-#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */
-#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */
-#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 Timestamp */
-#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 Adv Timestamp */
-#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */
-#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */
-#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */
-#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP csum Offload(Type 1) in Rx */
-#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP csum Offload(Type 2) in Rx */
-#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */
-#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. of additional Rx Channels */
-#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. of additional Tx Channels */
-#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate (Enhanced Descriptor) */
-#define DMA_HW_FEAT_INTTSEN 0x02000000 /* Timestamping with Internal
- System Time */
-#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
-#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */
-#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */
+#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */
+#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */
+#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */
+#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */
+#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */
+#define DMA_HW_FEAT_ADDMAC 0x00000020 /* Multiple MAC Addr Reg */
+#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */
+#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */
+#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */
+#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */
+#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */
+#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */
+#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 */
+#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 PTPv2 */
+#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */
+#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */
+#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */
+#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP COE (Type 1) in Rx */
+#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP COE (Type 2) in Rx */
+#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */
+#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. additional Rx Channels */
+#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. additional Tx Channels */
+#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate Descriptor */
+/* Timestamping with Internal System Time */
+#define DMA_HW_FEAT_INTTSEN 0x02000000
+#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
+#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN */
+#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
#define DEFAULT_DMA_PBL 8
/* Max/Min RI Watchdog Timer count value */
@@ -180,7 +216,8 @@ struct stmmac_extra_stats {
#define STMMAC_TX_MAX_FRAMES 256
#define STMMAC_TX_FRAMES 64
-enum rx_frame_status { /* IPC status */
+/* Rx IPC status */
+enum rx_frame_status {
good_frame = 0,
discard_frame = 1,
csum_none = 2,
@@ -194,17 +231,25 @@ enum dma_irq_status {
handle_tx = 0x8,
};
-enum core_specific_irq_mask {
- core_mmc_tx_irq = 1,
- core_mmc_rx_irq = 2,
- core_mmc_rx_csum_offload_irq = 4,
- core_irq_receive_pmt_irq = 8,
- core_irq_tx_path_in_lpi_mode = 16,
- core_irq_tx_path_exit_lpi_mode = 32,
- core_irq_rx_path_in_lpi_mode = 64,
- core_irq_rx_path_exit_lpi_mode = 128,
+#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1)
+#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2)
+#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3)
+#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4)
+
+#define CORE_PCS_ANE_COMPLETE (1 << 5)
+#define CORE_PCS_LINK_STATUS (1 << 6)
+#define CORE_RGMII_IRQ (1 << 7)
+
+struct rgmii_adv {
+ unsigned int pause;
+ unsigned int duplex;
+ unsigned int lp_pause;
+ unsigned int lp_duplex;
};
+#define STMMAC_PCS_PAUSE 1
+#define STMMAC_PCS_ASYM_PAUSE 2
+
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
@@ -217,9 +262,9 @@ struct dma_features {
unsigned int pmt_remote_wake_up;
unsigned int pmt_magic_frame;
unsigned int rmon;
- /* IEEE 1588-2002*/
+ /* IEEE 1588-2002 */
unsigned int time_stamp;
- /* IEEE 1588-2008*/
+ /* IEEE 1588-2008 */
unsigned int atime_stamp;
/* 802.3az - Energy-Efficient Ethernet (EEE) */
unsigned int eee;
@@ -232,7 +277,7 @@ struct dma_features {
/* TX and RX number of channels */
unsigned int number_rx_channel;
unsigned int number_tx_channel;
- /* Alternate (enhanced) DESC mode*/
+ /* Alternate (enhanced) DESC mode */
unsigned int enh_desc;
};
@@ -255,23 +300,26 @@ struct dma_features {
#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
+#define STMMAC_CHAIN_MODE 0x1
+#define STMMAC_RING_MODE 0x2
+
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
- void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic);
+ void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
+ int end);
/* DMA TX descriptor ring initialization */
- void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
+ void (*init_tx_desc) (struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
- int csum_flag);
+ int csum_flag, int mode);
/* Set/get the owner of the descriptor */
void (*set_tx_owner) (struct dma_desc *p);
int (*get_tx_owner) (struct dma_desc *p);
/* Invoked by the xmit function to close the tx descriptor */
void (*close_tx_desc) (struct dma_desc *p);
/* Clean the tx descriptor as soon as the tx irq is received */
- void (*release_tx_desc) (struct dma_desc *p);
+ void (*release_tx_desc) (struct dma_desc *p, int mode);
/* Clear interrupt on tx frame completion. When this bit is
* set an interrupt happens as soon as the frame is transmitted */
void (*clear_tx_ic) (struct dma_desc *p);
@@ -290,12 +338,22 @@ struct stmmac_desc_ops {
/* Return the reception status looking at the RDES1 */
int (*rx_status) (void *data, struct stmmac_extra_stats *x,
struct dma_desc *p);
+ void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p);
+ /* Set tx timestamp enable bit */
+ void (*enable_tx_timestamp) (struct dma_desc *p);
+ /* get tx timestamp status */
+ int (*get_tx_timestamp_status) (struct dma_desc *p);
+ /* get timestamp value */
+ u64(*get_timestamp) (void *desc, u32 ats);
+ /* get rx timestamp status */
+ int (*get_rx_timestamp_status) (void *desc, u32 ats);
};
struct stmmac_dma_ops {
/* DMA core initialization */
int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
- int burst_len, u32 dma_tx, u32 dma_rx);
+ int burst_len, u32 dma_tx, u32 dma_rx, int atds);
/* Dump DMA registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Set tx/rx threshold in the csr6 register
@@ -321,13 +379,14 @@ struct stmmac_dma_ops {
struct stmmac_ops {
/* MAC core initialization */
- void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
+ void (*core_init) (void __iomem *ioaddr);
/* Enable and verify that the IPC module is supported */
int (*rx_ipc) (void __iomem *ioaddr);
/* Dump MAC registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Handle extra events on specific interrupts hw dependent */
- int (*host_irq_status) (void __iomem *ioaddr);
+ int (*host_irq_status) (void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev, int id);
/* Flow control setting */
@@ -344,6 +403,18 @@ struct stmmac_ops {
void (*reset_eee_mode) (void __iomem *ioaddr);
void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
void (*set_eee_pls) (void __iomem *ioaddr, int link);
+ void (*ctrl_ane) (void __iomem *ioaddr, bool restart);
+ void (*get_adv) (void __iomem *ioaddr, struct rgmii_adv *adv);
+};
+
+struct stmmac_hwtimestamp {
+ void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
+ void (*config_sub_second_increment) (void __iomem *ioaddr);
+ int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
+ int (*config_addend) (void __iomem *ioaddr, u32 addend);
+ int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub);
+ u64(*get_systime) (void __iomem *ioaddr);
};
struct mac_link {
@@ -360,19 +431,28 @@ struct mii_regs {
struct stmmac_ring_mode_ops {
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
- void (*refill_desc3) (int bfsize, struct dma_desc *p);
- void (*init_desc3) (int des3_as_data_buf, struct dma_desc *p);
- void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr,
- unsigned int size);
- void (*clean_desc3) (struct dma_desc *p);
+ void (*refill_desc3) (void *priv, struct dma_desc *p);
+ void (*init_desc3) (struct dma_desc *p);
+ void (*clean_desc3) (void *priv, struct dma_desc *p);
int (*set_16kib_bfsize) (int mtu);
};
+struct stmmac_chain_mode_ops {
+ void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
+ unsigned int extend_desc);
+ unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ void (*refill_desc3) (void *priv, struct dma_desc *p);
+ void (*clean_desc3) (void *priv, struct dma_desc *p);
+};
+
struct mac_device_info {
- const struct stmmac_ops *mac;
- const struct stmmac_desc_ops *desc;
- const struct stmmac_dma_ops *dma;
- const struct stmmac_ring_mode_ops *ring;
+ const struct stmmac_ops *mac;
+ const struct stmmac_desc_ops *desc;
+ const struct stmmac_dma_ops *dma;
+ const struct stmmac_ring_mode_ops *ring;
+ const struct stmmac_chain_mode_ops *chain;
+ const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
unsigned int synopsys_uid;
@@ -390,5 +470,6 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
+extern const struct stmmac_chain_mode_ops chain_mode_ops;
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 223adf95fd03..ad3996038018 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -24,6 +24,7 @@
#ifndef __DESCS_H__
#define __DESCS_H__
+/* Basic descriptor structure for normal and alternate descriptors */
struct dma_desc {
/* Receive descriptor */
union {
@@ -60,7 +61,7 @@ struct dma_desc {
} rx;
struct {
/* RDES0 */
- u32 payload_csum_error:1;
+ u32 rx_mac_addr:1;
u32 crc_error:1;
u32 dribbling:1;
u32 error_gmii:1;
@@ -162,13 +163,57 @@ struct dma_desc {
unsigned int des3;
};
+/* Extended descriptor structure (supported by new SYNP GMAC generations) */
+struct dma_extended_desc {
+ struct dma_desc basic;
+ union {
+ struct {
+ u32 ip_payload_type:3;
+ u32 ip_hdr_err:1;
+ u32 ip_payload_err:1;
+ u32 ip_csum_bypassed:1;
+ u32 ipv4_pkt_rcvd:1;
+ u32 ipv6_pkt_rcvd:1;
+ u32 msg_type:4;
+ u32 ptp_frame_type:1;
+ u32 ptp_ver:1;
+ u32 timestamp_dropped:1;
+ u32 reserved:1;
+ u32 av_pkt_rcvd:1;
+ u32 av_tagged_pkt_rcvd:1;
+ u32 vlan_tag_priority_val:3;
+ u32 reserved3:3;
+ u32 l3_filter_match:1;
+ u32 l4_filter_match:1;
+ u32 l3_l4_filter_no_match:2;
+ u32 reserved4:4;
+ } erx;
+ struct {
+ u32 reserved;
+ } etx;
+ } des4;
+ unsigned int des5; /* Reserved */
+ unsigned int des6; /* Tx/Rx Timestamp Low */
+ unsigned int des7; /* Tx/Rx Timestamp High */
+};
+
/* Transmit checksum insertion control */
enum tdes_csum_insertion {
cic_disabled = 0, /* Checksum Insertion Control */
cic_only_ip = 1, /* Only IP header */
- cic_no_pseudoheader = 2, /* IP header but pseudoheader
- * is not calculated */
+ /* IP header but pseudoheader is not calculated */
+ cic_no_pseudoheader = 2,
cic_full = 3, /* IP header and pseudoheader */
};
+/* Extended RDES4 definitions */
+#define RDES_EXT_NO_PTP 0
+#define RDES_EXT_SYNC 0x1
+#define RDES_EXT_FOLLOW_UP 0x2
+#define RDES_EXT_DELAY_REQ 0x3
+#define RDES_EXT_DELAY_RESP 0x4
+#define RDES_EXT_PDELAY_REQ 0x5
+#define RDES_EXT_PDELAY_RESP 0x6
+#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
+
#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 7ee9499a6e38..6f2cc78c5cf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -30,26 +30,28 @@
#ifndef __DESC_COM_H__
#define __DESC_COM_H__
-#if defined(CONFIG_STMMAC_RING)
-static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+/* Specific functions used for Ring mode */
+
+/* Enhanced descriptors */
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
if (end)
p->des01.erx.end_ring = 1;
}
-static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
{
if (end)
p->des01.etx.end_ring = 1;
}
-static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter)
+static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
{
p->des01.etx.end_ring = ter;
}
-static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
+static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_4KiB)) {
p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
@@ -58,25 +60,26 @@ static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.etx.buffer1_size = len;
}
-static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+/* Normal descriptors */
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
{
p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
if (end)
p->des01.rx.end_ring = 1;
}
-static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
{
if (end)
p->des01.tx.end_ring = 1;
}
-static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter)
+static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
{
p->des01.tx.end_ring = ter;
}
-static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
+static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_2KiB)) {
p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
@@ -85,47 +88,47 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.tx.buffer1_size = len;
}
-#else
+/* Specific functions used for Chain mode */
-static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+/* Enhanced descriptors */
+static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
{
p->des01.erx.second_address_chained = 1;
}
-static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
{
p->des01.etx.second_address_chained = 1;
}
-static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter)
+static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
{
p->des01.etx.second_address_chained = 1;
}
-static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
+static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
p->des01.etx.buffer1_size = len;
}
-static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+/* Normal descriptors */
+static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
{
p->des01.rx.second_address_chained = 1;
}
-static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int ring_size)
+static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int ring_size)
{
p->des01.tx.second_address_chained = 1;
}
-static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter)
+static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
{
p->des01.tx.second_address_chained = 1;
}
-static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
+static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
p->des01.tx.buffer1_size = len;
}
-#endif
-
#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 7ad56afd6324..c12aabb8cf93 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -89,13 +89,46 @@ enum power_event {
(reg * 8))
#define GMAC_MAX_PERFECT_ADDRESSES 32
+/* PCS registers (AN/TBI/SGMII/RGMII) offset */
#define GMAC_AN_CTRL 0x000000c0 /* AN control */
#define GMAC_AN_STATUS 0x000000c4 /* AN status */
#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
-#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
+#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */
#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
#define GMAC_TBI 0x000000d4 /* TBI extend status */
-#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
+#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */
+
+/* AN Configuration defines */
+#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */
+#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */
+#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */
+#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */
+#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */
+#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */
+
+/* AN Status defines */
+#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
+#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
+#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
+#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
+
+/* Register 54 (SGMII/RGMII status register) */
+#define GMAC_S_R_GMII_LINK 0x8
+#define GMAC_S_R_GMII_SPEED 0x5
+#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
+#define GMAC_S_R_GMII_MODE 0x1
+#define GMAC_S_R_GMII_SPEED_125 2
+#define GMAC_S_R_GMII_SPEED_25 1
+
+/* Common ADV and LPA defines */
+#define GMAC_ANE_FD (1 << 5)
+#define GMAC_ANE_HD (1 << 6)
+#define GMAC_ANE_PSE (3 << 7)
+#define GMAC_ANE_PSE_SHIFT 7
+
+ /* GMAC Configuration defines */
+#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
+#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
/* GMAC Configuration defines */
#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
@@ -108,19 +141,19 @@ enum inter_frame_gap {
GMAC_CONTROL_IFG_80 = 0x00020000,
GMAC_CONTROL_IFG_40 = 0x000e0000,
};
-#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */
-#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
-#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
-#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
-#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
-#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
-#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
-#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
-#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
-#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Stripping */
-#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
-#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
-#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */
+#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
+#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
+#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
+#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
+#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
+#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
+#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */
+#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
+#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
GMAC_CONTROL_JE | GMAC_CONTROL_BE)
@@ -151,15 +184,16 @@ enum inter_frame_gap {
#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
-#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
/* Programmable burst length (passed thorugh platform)*/
#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
enum rx_tx_priority_ratio {
- double_ratio = 0x00004000, /*2:1 */
- triple_ratio = 0x00008000, /*3:1 */
- quadruple_ratio = 0x0000c000, /*4:1 */
+ double_ratio = 0x00004000, /* 2:1 */
+ triple_ratio = 0x00008000, /* 3:1 */
+ quadruple_ratio = 0x0000c000, /* 4:1 */
};
#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
@@ -179,9 +213,10 @@ enum rx_tx_priority_ratio {
#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
-#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */
-#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
-#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
+/* Disable Drop TCP/IP csum error */
+#define DMA_CONTROL_DT 0x04000000
+#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
+#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
/* Threshold for Activating the FC */
enum rfa {
act_full_minus_1 = 0x00800000,
@@ -196,7 +231,7 @@ enum rfd {
deac_full_minus_3 = 0x00401000,
deac_full_minus_4 = 0x00401800,
};
-#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
+#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
enum ttc_control {
DMA_CONTROL_TTC_64 = 0x00000000,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index bfe022605498..7e05e8d0f1c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -28,6 +28,7 @@
#include <linux/crc32.h>
#include <linux/slab.h>
+#include <linux/ethtool.h>
#include <asm/io.h>
#include "dwmac1000.h"
@@ -71,22 +72,22 @@ static void dwmac1000_dump_regs(void __iomem *ioaddr)
}
static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
- unsigned int reg_n)
+ unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
+ GMAC_ADDR_LOW(reg_n));
}
static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
- unsigned int reg_n)
+ unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
+ GMAC_ADDR_LOW(reg_n));
}
static void dwmac1000_set_filter(struct net_device *dev, int id)
{
- void __iomem *ioaddr = (void __iomem *) dev->base_addr;
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
unsigned int value = 0;
unsigned int perfect_addr_number;
@@ -96,7 +97,7 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
if (dev->flags & IFF_PROMISC)
value = GMAC_FRAME_FILTER_PR;
else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
+ || (dev->flags & IFF_ALLMULTI)) {
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
@@ -110,12 +111,13 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
/* The upper 6 bits of the calculated CRC are used to
- index the contens of the hash table */
- int bit_nr =
- bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+ * index the contens of the hash table
+ */
+ int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
/* The most significant bit determines the register to
* use (H/L) while the other 5 bits determine the bit
- * within the register. */
+ * within the register.
+ */
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
@@ -128,10 +130,11 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
else
perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES / 2;
- /* Handle multiple unicast addresses (perfect filtering)*/
+ /* Handle multiple unicast addresses (perfect filtering) */
if (netdev_uc_count(dev) > perfect_addr_number)
- /* Switch to promiscuous mode is more than 16 addrs
- are required */
+ /* Switch to promiscuous mode if more than 16 addrs
+ * are required
+ */
value |= GMAC_FRAME_FILTER_PR;
else {
int reg = 1;
@@ -149,13 +152,13 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
#endif
writel(value, ioaddr + GMAC_FRAME_FILTER);
- CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
- "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
- readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+ CHIP_DBG(KERN_INFO "\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
+ readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
}
static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
+ unsigned int fc, unsigned int pause_time)
{
unsigned int flow = 0;
@@ -193,74 +196,106 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
writel(pmt, ioaddr + GMAC_PMT);
}
-
-static int dwmac1000_irq_status(void __iomem *ioaddr)
+static int dwmac1000_irq_status(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
{
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
- int status = 0;
+ int ret = 0;
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & mmc_tx_irq)) {
CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_TX_INTR));
- status |= core_mmc_tx_irq;
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ x->mmc_tx_irq_n++;
}
if (unlikely(intr_status & mmc_rx_irq)) {
CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_INTR));
- status |= core_mmc_rx_irq;
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ x->mmc_rx_irq_n++;
}
if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
- status |= core_mmc_rx_csum_offload_irq;
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ x->mmc_rx_csum_offload_irq_n++;
}
if (unlikely(intr_status & pmt_irq)) {
CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
- /* clear the PMT bits 5 and 6 by reading the PMT
- * status register. */
+ /* clear the PMT bits 5 and 6 by reading the PMT status reg */
readl(ioaddr + GMAC_PMT);
- status |= core_irq_receive_pmt_irq;
+ x->irq_receive_pmt_irq_n++;
}
/* MAC trx/rx EEE LPI entry/exit interrupts */
if (intr_status & lpiis_irq) {
/* Clean LPI interrupt by reading the Reg 12 */
- u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS);
+ ret = readl(ioaddr + LPI_CTRL_STATUS);
- if (lpi_status & LPI_CTRL_STATUS_TLPIEN) {
+ if (ret & LPI_CTRL_STATUS_TLPIEN) {
CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
- status |= core_irq_tx_path_in_lpi_mode;
+ x->irq_tx_path_in_lpi_mode_n++;
}
- if (lpi_status & LPI_CTRL_STATUS_TLPIEX) {
+ if (ret & LPI_CTRL_STATUS_TLPIEX) {
CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
- status |= core_irq_tx_path_exit_lpi_mode;
+ x->irq_tx_path_exit_lpi_mode_n++;
}
- if (lpi_status & LPI_CTRL_STATUS_RLPIEN) {
+ if (ret & LPI_CTRL_STATUS_RLPIEN) {
CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
- status |= core_irq_rx_path_in_lpi_mode;
+ x->irq_rx_path_in_lpi_mode_n++;
}
- if (lpi_status & LPI_CTRL_STATUS_RLPIEX) {
+ if (ret & LPI_CTRL_STATUS_RLPIEX) {
CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
- status |= core_irq_rx_path_exit_lpi_mode;
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+ }
+
+ if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n");
+ readl(ioaddr + GMAC_AN_STATUS);
+ x->irq_pcs_ane_n++;
+ }
+ if (intr_status & rgmii_irq) {
+ u32 status = readl(ioaddr + GMAC_S_R_GMII);
+ CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n");
+ x->irq_rgmii_n++;
+
+ /* Save and dump the link status. */
+ if (status & GMAC_S_R_GMII_LINK) {
+ int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
+ GMAC_S_R_GMII_SPEED_SHIFT;
+ x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
+
+ if (speed_value == GMAC_S_R_GMII_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_S_R_GMII_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
+
+ x->pcs_link = 1;
+ pr_debug("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_debug("Link is Down\n");
}
}
- return status;
+ return ret;
}
-static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
+static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
{
u32 value;
/* Enable the link status receive on RGMII, SGMII ore SMII
* receive path and instruct the transmit to enter in LPI
- * state. */
+ * state.
+ */
value = readl(ioaddr + LPI_CTRL_STATUS);
value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
writel(value, ioaddr + LPI_CTRL_STATUS);
}
-static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
+static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
{
u32 value;
@@ -269,7 +304,7 @@ static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
writel(value, ioaddr + LPI_CTRL_STATUS);
}
-static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
+static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
{
u32 value;
@@ -283,7 +318,7 @@ static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
writel(value, ioaddr + LPI_CTRL_STATUS);
}
-static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
+static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
{
int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
@@ -297,6 +332,41 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
writel(value, ioaddr + LPI_TIMER_CTRL);
}
+static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
+{
+ u32 value;
+
+ value = readl(ioaddr + GMAC_AN_CTRL);
+ /* auto negotiation enable and External Loopback enable */
+ value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+
+ if (restart)
+ value |= GMAC_AN_CTRL_RAN;
+
+ writel(value, ioaddr + GMAC_AN_CTRL);
+}
+
+static void dwmac1000_get_adv(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ u32 value = readl(ioaddr + GMAC_ANE_ADV);
+
+ if (value & GMAC_ANE_FD)
+ adv->duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv->duplex |= DUPLEX_HALF;
+
+ adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+
+ value = readl(ioaddr + GMAC_ANE_LPA);
+
+ if (value & GMAC_ANE_FD)
+ adv->lp_duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv->lp_duplex = DUPLEX_HALF;
+
+ adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+}
+
static const struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
.rx_ipc = dwmac1000_rx_ipc_enable,
@@ -307,10 +377,12 @@ static const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
- .set_eee_mode = dwmac1000_set_eee_mode,
- .reset_eee_mode = dwmac1000_reset_eee_mode,
- .set_eee_timer = dwmac1000_set_eee_timer,
- .set_eee_pls = dwmac1000_set_eee_pls,
+ .set_eee_mode = dwmac1000_set_eee_mode,
+ .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_eee_timer = dwmac1000_set_eee_timer,
+ .set_eee_pls = dwmac1000_set_eee_pls,
+ .ctrl_ane = dwmac1000_ctrl_ane,
+ .get_adv = dwmac1000_get_adv,
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index bf83c03bfd06..2c431b616058 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
#include "dwmac1000.h"
#include "dwmac_dma.h"
-static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
- int mb, int burst_len, u32 dma_tx, u32 dma_rx)
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int burst_len, u32 dma_tx, u32 dma_rx, int atds)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
int limit;
@@ -60,7 +60,7 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
* depending on pbl value.
*/
value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
- (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+ (pbl << DMA_BUS_MODE_RPBL_SHIFT));
/* Set the Fixed burst mode */
if (fb)
@@ -73,6 +73,10 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
#ifdef CONFIG_STMMAC_DA
value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
#endif
+
+ if (atds)
+ value |= DMA_BUS_MODE_ATDS;
+
writel(value, ioaddr + DMA_BUS_MODE);
/* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
@@ -90,14 +94,16 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
*
* For Non Fixed Burst Mode: provide the maximum value of the
* burst length. Any burst equal or below the provided burst
- * length would be allowed to perform. */
+ * length would be allowed to perform.
+ */
writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
- /* The base address of the RX/TX descriptor lists must be written into
- * DMA CSR3 and CSR4, respectively. */
+ /* RX/TX descriptor base address lists must be written into
+ * DMA CSR3 and CSR4, respectively
+ */
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
@@ -105,7 +111,7 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
}
static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
- int rxmode)
+ int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -114,11 +120,12 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
/* Transmit COE type 2 cannot be done in cut-through mode. */
csr6 |= DMA_CONTROL_TSF;
/* Operating on second frame increase the performance
- * especially when transmit store-and-forward is used.*/
+ * especially when transmit store-and-forward is used.
+ */
csr6 |= DMA_CONTROL_OSF;
} else {
- CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
- " (threshold = %d)\n", txmode);
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling TX SF (threshold %d)\n",
+ txmode);
csr6 &= ~DMA_CONTROL_TSF;
csr6 &= DMA_CONTROL_TC_TX_MASK;
/* Set the transmit threshold */
@@ -138,8 +145,8 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
csr6 |= DMA_CONTROL_RSF;
} else {
- CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
- " (threshold = %d)\n", rxmode);
+ CHIP_DBG(KERN_DEBUG "GMAC: disable RX SF mode (threshold %d)\n",
+ rxmode);
csr6 &= ~DMA_CONTROL_RSF;
csr6 &= DMA_CONTROL_TC_RX_MASK;
if (rxmode <= 32)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f83210e7c221..007bb2be3f10 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -47,8 +47,7 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
{
pr_info("\t----------------------------------------------\n"
"\t DWMAC 100 CSR (base addr = 0x%p)\n"
- "\t----------------------------------------------\n",
- ioaddr);
+ "\t----------------------------------------------\n", ioaddr);
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
readl(ioaddr + MAC_CONTROL));
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
@@ -72,7 +71,8 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
return 0;
}
-static int dwmac100_irq_status(void __iomem *ioaddr)
+static int dwmac100_irq_status(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
{
return 0;
}
@@ -91,7 +91,7 @@ static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
static void dwmac100_set_filter(struct net_device *dev, int id)
{
- void __iomem *ioaddr = (void __iomem *) dev->base_addr;
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
u32 value = readl(ioaddr + MAC_CONTROL);
if (dev->flags & IFF_PROMISC) {
@@ -112,7 +112,8 @@ static void dwmac100_set_filter(struct net_device *dev, int id)
struct netdev_hw_addr *ha;
/* Perfect filter mode for physical address and Hash
- filter for multicast */
+ * filter for multicast
+ */
value |= MAC_CONTROL_HP;
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
MAC_CONTROL_IF | MAC_CONTROL_HO);
@@ -120,12 +121,13 @@ static void dwmac100_set_filter(struct net_device *dev, int id)
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
/* The upper 6 bits of the calculated CRC are used to
- * index the contens of the hash table */
- int bit_nr =
- ether_crc(ETH_ALEN, ha->addr) >> 26;
+ * index the contens of the hash table
+ */
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
/* The most significant bit determines the register to
* use (H/L) while the other 5 bits determine the bit
- * within the register. */
+ * within the register.
+ */
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
@@ -134,10 +136,9 @@ static void dwmac100_set_filter(struct net_device *dev, int id)
writel(value, ioaddr + MAC_CONTROL);
- CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
- "HI 0x%08x, LO 0x%08x\n",
- __func__, readl(ioaddr + MAC_CONTROL),
- readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+ CHIP_DBG(KERN_INFO "%s: Filter: 0x%08x Hash: HI 0x%08x, LO 0x%08x\n",
+ __func__, readl(ioaddr + MAC_CONTROL),
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
}
static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
@@ -150,9 +151,7 @@ static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
writel(flow, ioaddr + MAC_FLOW_CTRL);
}
-/* No PMT module supported for this Ethernet Controller.
- * Tested on ST platforms only.
- */
+/* No PMT module supported on ST boards with this Eth chip. */
static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
{
return;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index c2b4d55a79b6..67551c154138 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
#include "dwmac100.h"
#include "dwmac_dma.h"
-static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
- int mb, int burst_len, u32 dma_tx, u32 dma_rx)
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int burst_len, u32 dma_tx, u32 dma_rx, int atds)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
int limit;
@@ -52,22 +52,25 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
- ioaddr + DMA_BUS_MODE);
+ ioaddr + DMA_BUS_MODE);
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
- /* The base address of the RX/TX descriptor lists must be written into
- * DMA CSR3 and CSR4, respectively. */
+ /* RX/TX descriptor base addr lists must be written into
+ * DMA CSR3 and CSR4, respectively
+ */
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
return 0;
}
-/* Store and Forward capability is not used at all..
- * The transmit threshold can be programmed by
- * setting the TTC bits in the DMA control register.*/
+/* Store and Forward capability is not used at all.
+ *
+ * The transmit threshold can be programmed by setting the TTC bits in the DMA
+ * control register.
+ */
static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
int rxmode)
{
@@ -90,16 +93,15 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
for (i = 0; i < 9; i++)
pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
- (DMA_BUS_MODE + i * 4),
- readl(ioaddr + DMA_BUS_MODE + i * 4));
+ (DMA_BUS_MODE + i * 4),
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
- DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
- DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
}
-/* DMA controller has two counters to track the number of
- * the receive missed frames. */
+/* DMA controller has two counters to track the number of the missed frames. */
static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index ab4896ecac1c..8e5662ce488b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -102,7 +102,7 @@
#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
-#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
@@ -112,6 +112,6 @@ extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
extern void dwmac_dma_start_rx(void __iomem *ioaddr);
extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
extern int dwmac_dma_interrupt(void __iomem *ioaddr,
- struct stmmac_extra_stats *x);
+ struct stmmac_extra_stats *x);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 2fc8ef95f97a..0fbc8fafa706 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -150,6 +150,57 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
+static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p)
+{
+ if (unlikely(p->basic.des01.erx.rx_mac_addr)) {
+ if (p->des4.erx.ip_hdr_err)
+ x->ip_hdr_err++;
+ if (p->des4.erx.ip_payload_err)
+ x->ip_payload_err++;
+ if (p->des4.erx.ip_csum_bypassed)
+ x->ip_csum_bypassed++;
+ if (p->des4.erx.ipv4_pkt_rcvd)
+ x->ipv4_pkt_rcvd++;
+ if (p->des4.erx.ipv6_pkt_rcvd)
+ x->ipv6_pkt_rcvd++;
+ if (p->des4.erx.msg_type == RDES_EXT_SYNC)
+ x->rx_msg_type_sync++;
+ else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP)
+ x->rx_msg_type_follow_up++;
+ else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ x->rx_msg_type_delay_req++;
+ else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
+ x->rx_msg_type_delay_resp++;
+ else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ x->rx_msg_type_pdelay_req++;
+ else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
+ x->rx_msg_type_pdelay_resp++;
+ else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ x->rx_msg_type_pdelay_follow_up++;
+ else
+ x->rx_msg_type_ext_no_ptp++;
+ if (p->des4.erx.ptp_frame_type)
+ x->ptp_frame_type++;
+ if (p->des4.erx.ptp_ver)
+ x->ptp_ver++;
+ if (p->des4.erx.timestamp_dropped)
+ x->timestamp_dropped++;
+ if (p->des4.erx.av_pkt_rcvd)
+ x->av_pkt_rcvd++;
+ if (p->des4.erx.av_tagged_pkt_rcvd)
+ x->av_tagged_pkt_rcvd++;
+ if (p->des4.erx.vlan_tag_priority_val)
+ x->vlan_tag_priority_val++;
+ if (p->des4.erx.l3_filter_match)
+ x->l3_filter_match++;
+ if (p->des4.erx.l4_filter_match)
+ x->l4_filter_match++;
+ if (p->des4.erx.l3_l4_filter_no_match)
+ x->l3_l4_filter_no_match++;
+ }
+}
+
static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
@@ -198,7 +249,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
- p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+ p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
if (unlikely(p->des01.erx.dribbling)) {
CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
@@ -225,34 +276,32 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
x->rx_vlan++;
}
#endif
+
return ret;
}
-static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
+static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.erx.own = 1;
- p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
- ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1));
+ if (mode == STMMAC_CHAIN_MODE)
+ ehn_desc_rx_set_on_chain(p, end);
+ else
+ ehn_desc_rx_set_on_ring(p, end);
- if (disable_rx_ic)
- p->des01.erx.disable_ic = 1;
- p++;
- }
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
}
-static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- int i;
-
- for (i = 0; i < ring_size; i++) {
- p->des01.etx.own = 0;
- ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1));
- p++;
- }
+ p->des01.etx.own = 0;
+ if (mode == STMMAC_CHAIN_MODE)
+ ehn_desc_tx_set_on_chain(p, end);
+ else
+ ehn_desc_tx_set_on_ring(p, end);
}
static int enh_desc_get_tx_owner(struct dma_desc *p)
@@ -280,20 +329,26 @@ static int enh_desc_get_tx_ls(struct dma_desc *p)
return p->des01.etx.last_segment;
}
-static void enh_desc_release_tx_desc(struct dma_desc *p)
+static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
{
int ter = p->des01.etx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2));
- enh_desc_end_tx_desc(p, ter);
+ if (mode == STMMAC_CHAIN_MODE)
+ enh_desc_end_tx_desc_on_chain(p, ter);
+ else
+ enh_desc_end_tx_desc_on_ring(p, ter);
}
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
+ int csum_flag, int mode)
{
p->des01.etx.first_segment = is_fs;
- enh_set_tx_desc_len(p, len);
+ if (mode == STMMAC_CHAIN_MODE)
+ enh_set_tx_desc_len_on_chain(p, len);
+ else
+ enh_set_tx_desc_len_on_ring(p, len);
if (likely(csum_flag))
p->des01.etx.checksum_insertion = cic_full;
@@ -323,6 +378,49 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
return p->des01.erx.frame_length;
}
+static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des01.etx.time_stamp_enable = 1;
+}
+
+static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return p->des01.etx.time_stamp_status;
+}
+
+static u64 enh_desc_get_timestamp(void *desc, u32 ats)
+{
+ u64 ns;
+
+ if (ats) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+ ns = p->des6;
+ /* convert high/sec time stamp value to nanosecond */
+ ns += p->des7 * 1000000000ULL;
+ } else {
+ struct dma_desc *p = (struct dma_desc *)desc;
+ ns = p->des2;
+ ns += p->des3 * 1000000000ULL;
+ }
+
+ return ns;
+}
+
+static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+{
+ if (ats) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+ return p->basic.des01.erx.ipc_csum_error;
+ } else {
+ struct dma_desc *p = (struct dma_desc *)desc;
+ if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
+ /* timestamp is corrupted, hence don't store it */
+ return 0;
+ else
+ return 1;
+ }
+}
+
const struct stmmac_desc_ops enh_desc_ops = {
.tx_status = enh_desc_get_tx_status,
.rx_status = enh_desc_get_rx_status,
@@ -339,4 +437,9 @@ const struct stmmac_desc_ops enh_desc_ops = {
.set_tx_owner = enh_desc_set_tx_owner,
.set_rx_owner = enh_desc_set_rx_owner,
.get_rx_frame_len = enh_desc_get_rx_frame_len,
+ .rx_extended_status = enh_desc_get_ext_status,
+ .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
+ .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
+ .get_timestamp = enh_desc_get_timestamp,
+ .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 67995ef25251..48ec001566b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -28,8 +28,7 @@
/* MMC control register */
/* When set, all counter are reset */
#define MMC_CNTRL_COUNTER_RESET 0x1
-/* When set, do not roll over zero
- * after reaching the max value*/
+/* When set, do not roll over zero after reaching the max value*/
#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2
#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */
#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 0c74a702d461..50617c5a0bdb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
{
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 68962c549a2d..11775b99afc5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -79,8 +79,8 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("ndesc Error: Oversized Ethernet "
- "frame spanned multiple buffers\n");
+ pr_warn("%s: Oversized frame spanned multiple buffers\n",
+ __func__);
stats->rx_length_errors++;
return discard_frame;
}
@@ -122,30 +122,28 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
+static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
+ int end)
{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.rx.own = 1;
- p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
- ndesc_rx_set_on_ring_chain(p, (i == ring_size - 1));
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_rx_set_on_chain(p, end);
+ else
+ ndesc_rx_set_on_ring(p, end);
- if (disable_rx_ic)
- p->des01.rx.disable_ic = 1;
- p++;
- }
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
}
-static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.tx.own = 0;
- ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1)));
- p++;
- }
+ p->des01.tx.own = 0;
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_tx_set_on_chain(p, end);
+ else
+ ndesc_tx_set_on_ring(p, end);
}
static int ndesc_get_tx_owner(struct dma_desc *p)
@@ -173,19 +171,25 @@ static int ndesc_get_tx_ls(struct dma_desc *p)
return p->des01.tx.last_segment;
}
-static void ndesc_release_tx_desc(struct dma_desc *p)
+static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
{
int ter = p->des01.tx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2));
- ndesc_end_tx_desc(p, ter);
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_end_tx_desc_on_chain(p, ter);
+ else
+ ndesc_end_tx_desc_on_ring(p, ter);
}
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
+ int csum_flag, int mode)
{
p->des01.tx.first_segment = is_fs;
- norm_set_tx_desc_len(p, len);
+ if (mode == STMMAC_CHAIN_MODE)
+ norm_set_tx_desc_len_on_chain(p, len);
+ else
+ norm_set_tx_desc_len_on_ring(p, len);
if (likely(csum_flag))
p->des01.tx.checksum_insertion = cic_full;
@@ -215,6 +219,39 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
return p->des01.rx.frame_length;
}
+static void ndesc_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des01.tx.time_stamp_enable = 1;
+}
+
+static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return p->des01.tx.time_stamp_status;
+}
+
+static u64 ndesc_get_timestamp(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns;
+
+ ns = p->des2;
+ /* convert high/sec time stamp value to nanosecond */
+ ns += p->des3 * 1000000000ULL;
+
+ return ns;
+}
+
+static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+
+ if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
+ /* timestamp is corrupted, hence don't store it */
+ return 0;
+ else
+ return 1;
+}
+
const struct stmmac_desc_ops ndesc_ops = {
.tx_status = ndesc_get_tx_status,
.rx_status = ndesc_get_rx_status,
@@ -231,4 +268,8 @@ const struct stmmac_desc_ops ndesc_ops = {
.set_tx_owner = ndesc_set_tx_owner,
.set_rx_owner = ndesc_set_rx_owner,
.get_rx_frame_len = ndesc_get_rx_frame_len,
+ .enable_tx_timestamp = ndesc_enable_tx_timestamp,
+ .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
+ .get_timestamp = ndesc_get_timestamp,
+ .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 4b785e10f2ed..c9d942a5c335 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -30,7 +30,7 @@
static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
- struct stmmac_priv *priv = (struct stmmac_priv *) p;
+ struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
unsigned int entry = priv->cur_tx % txsize;
struct dma_desc *desc = priv->dma_tx + entry;
@@ -48,25 +48,30 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
- csum);
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
+ STMMAC_RING_MODE);
wmb();
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
len, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+ STMMAC_RING_MODE);
wmb();
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
+ STMMAC_RING_MODE);
}
return entry;
@@ -82,27 +87,23 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
return ret;
}
-static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
+static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
{
- /* Fill DES3 in case of RING mode */
- if (bfsize >= BUF_SIZE_8KiB)
- p->des3 = p->des2 + BUF_SIZE_8KiB;
-}
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
-/* In ring mode we need to fill the desc3 because it is used
- * as buffer */
-static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
-{
- if (unlikely(des3_as_data_buf))
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+ if (unlikely(priv->plat->has_gmac))
+ /* Fill DES3 in case of RING mode */
+ if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
}
-static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
- unsigned int size)
+/* In ring mode we need to fill the desc3 because it is used as buffer */
+static void stmmac_init_desc3(struct dma_desc *p)
{
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
}
-static void stmmac_clean_desc3(struct dma_desc *p)
+static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
if (unlikely(p->des3))
p->des3 = 0;
@@ -121,7 +122,6 @@ const struct stmmac_ring_mode_ops ring_mode_ops = {
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
.init_desc3 = stmmac_init_desc3,
- .init_dma_chain = stmmac_init_dma_chain,
.clean_desc3 = stmmac_clean_desc3,
.set_16kib_bfsize = stmmac_set_16kib_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b05df8983be5..c922fde929a1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,43 +24,56 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "Nov_2012"
+#define DRV_MODULE_VERSION "March_2013"
#include <linux/clk.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/pci.h>
#include "common.h"
+#include <linux/ptp_clock_kernel.h>
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
- struct dma_desc *dma_tx ____cacheline_aligned;
- dma_addr_t dma_tx_phy;
+ struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
+ struct dma_desc *dma_tx;
struct sk_buff **tx_skbuff;
unsigned int cur_tx;
unsigned int dirty_tx;
unsigned int dma_tx_size;
+ u32 tx_count_frames;
+ u32 tx_coal_frames;
+ u32 tx_coal_timer;
+ dma_addr_t *tx_skbuff_dma;
+ dma_addr_t dma_tx_phy;
int tx_coalesce;
+ int hwts_tx_en;
+ spinlock_t tx_lock;
+ bool tx_path_in_lpi_mode;
+ struct timer_list txtimer;
- struct dma_desc *dma_rx ;
+ struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
+ struct dma_extended_desc *dma_erx;
+ struct sk_buff **rx_skbuff;
unsigned int cur_rx;
unsigned int dirty_rx;
- struct sk_buff **rx_skbuff;
+ unsigned int dma_rx_size;
+ unsigned int dma_buf_sz;
+ u32 rx_riwt;
+ int hwts_rx_en;
dma_addr_t *rx_skbuff_dma;
+ dma_addr_t dma_rx_phy;
+ struct napi_struct napi ____cacheline_aligned_in_smp;
+
+ void __iomem *ioaddr;
struct net_device *dev;
- dma_addr_t dma_rx_phy;
- unsigned int dma_rx_size;
- unsigned int dma_buf_sz;
struct device *device;
struct mac_device_info *hw;
- void __iomem *ioaddr;
-
- struct stmmac_extra_stats xstats;
- struct napi_struct napi;
int no_csum_insertion;
+ spinlock_t lock;
- struct phy_device *phydev;
+ struct phy_device *phydev ____cacheline_aligned_in_smp;
int oldlink;
int speed;
int oldduplex;
@@ -69,30 +82,31 @@ struct stmmac_priv {
struct mii_bus *mii;
int mii_irq[PHY_MAX_ADDR];
- u32 msg_enable;
- spinlock_t lock;
- spinlock_t tx_lock;
- int wolopts;
- int wol_irq;
+ struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
struct plat_stmmacenet_data *plat;
- struct stmmac_counters mmc;
struct dma_features dma_cap;
+ struct stmmac_counters mmc;
int hw_cap_support;
+ int synopsys_id;
+ u32 msg_enable;
+ int wolopts;
+ int wol_irq;
struct clk *stmmac_clk;
int clk_csr;
- int synopsys_id;
struct timer_list eee_ctrl_timer;
- bool tx_path_in_lpi_mode;
int lpi_irq;
int eee_enabled;
int eee_active;
int tx_lpi_timer;
- struct timer_list txtimer;
- u32 tx_count_frames;
- u32 tx_coal_frames;
- u32 tx_coal_timer;
+ int pcs;
+ unsigned int mode;
+ int extend_desc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_ops;
+ unsigned int default_addend;
+ u32 adv_ts;
int use_riwt;
- u32 rx_riwt;
+ spinlock_t ptp_lock;
};
extern int phyaddr;
@@ -102,6 +116,9 @@ extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern int stmmac_ptp_register(struct stmmac_priv *priv);
+extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_freeze(struct net_device *ndev);
int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
@@ -125,6 +142,7 @@ static inline int stmmac_register_platform(void)
return err;
}
+
static inline void stmmac_unregister_platform(void)
{
platform_driver_unregister(&stmmac_pltfr_driver);
@@ -136,6 +154,7 @@ static inline int stmmac_register_platform(void)
return 0;
}
+
static inline void stmmac_unregister_platform(void)
{
}
@@ -153,6 +172,7 @@ static inline int stmmac_register_pci(void)
return err;
}
+
static inline void stmmac_unregister_pci(void)
{
pci_unregister_driver(&stmmac_pci_driver);
@@ -164,6 +184,7 @@ static inline int stmmac_register_pci(void)
return 0;
}
+
static inline void stmmac_unregister_pci(void)
{
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d1ac39c1b05d..c5f9cb85c8ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/net_tstamp.h>
#include <asm/io.h>
#include "stmmac.h"
@@ -108,6 +109,33 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
STMMAC_STAT(phy_eee_wakeup_error_n),
+ /* Extended RDES status */
+ STMMAC_STAT(ip_hdr_err),
+ STMMAC_STAT(ip_payload_err),
+ STMMAC_STAT(ip_csum_bypassed),
+ STMMAC_STAT(ipv4_pkt_rcvd),
+ STMMAC_STAT(ipv6_pkt_rcvd),
+ STMMAC_STAT(rx_msg_type_ext_no_ptp),
+ STMMAC_STAT(rx_msg_type_sync),
+ STMMAC_STAT(rx_msg_type_follow_up),
+ STMMAC_STAT(rx_msg_type_delay_req),
+ STMMAC_STAT(rx_msg_type_delay_resp),
+ STMMAC_STAT(rx_msg_type_pdelay_req),
+ STMMAC_STAT(rx_msg_type_pdelay_resp),
+ STMMAC_STAT(rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(ptp_frame_type),
+ STMMAC_STAT(ptp_ver),
+ STMMAC_STAT(timestamp_dropped),
+ STMMAC_STAT(av_pkt_rcvd),
+ STMMAC_STAT(av_tagged_pkt_rcvd),
+ STMMAC_STAT(vlan_tag_priority_val),
+ STMMAC_STAT(l3_filter_match),
+ STMMAC_STAT(l4_filter_match),
+ STMMAC_STAT(l3_l4_filter_no_match),
+ /* PCS */
+ STMMAC_STAT(irq_pcs_ane_n),
+ STMMAC_STAT(irq_pcs_link_n),
+ STMMAC_STAT(irq_rgmii_n),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -219,6 +247,70 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phy = priv->phydev;
int rc;
+
+ if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ struct rgmii_adv adv;
+
+ if (!priv->xstats.pcs_link) {
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
+ return 0;
+ }
+ cmd->duplex = priv->xstats.pcs_duplex;
+
+ ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
+
+ /* Get and convert ADV/LP_ADV from the HW AN registers */
+ if (priv->hw->mac->get_adv)
+ priv->hw->mac->get_adv(priv->ioaddr, &adv);
+ else
+ return -EOPNOTSUPP; /* should never happen indeed */
+
+ /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
+
+ if (adv.pause & STMMAC_PCS_PAUSE)
+ cmd->advertising |= ADVERTISED_Pause;
+ if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ if (adv.lp_pause & STMMAC_PCS_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Pause;
+ if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+
+ /* Reg49[3] always set because ANE is always supported */
+ cmd->autoneg = ADVERTISED_Autoneg;
+ cmd->supported |= SUPPORTED_Autoneg;
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->lp_advertising |= ADVERTISED_Autoneg;
+
+ if (adv.duplex) {
+ cmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Full);
+ cmd->advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ } else {
+ cmd->supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+ cmd->advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ }
+ if (adv.lp_duplex)
+ cmd->lp_advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ else
+ cmd->lp_advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ cmd->port = PORT_OTHER;
+
+ return 0;
+ }
+
if (phy == NULL) {
pr_err("%s: %s: PHY is not registered\n",
__func__, dev->name);
@@ -243,6 +335,30 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
struct phy_device *phy = priv->phydev;
int rc;
+ if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
+
+ /* Only support ANE */
+ if (cmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ mask &= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full);
+
+ spin_lock(&priv->lock);
+ if (priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->ioaddr, 1);
+ spin_unlock(&priv->lock);
+ }
+
+ return 0;
+ }
+
spin_lock(&priv->lock);
rc = phy_ethtool_sset(phy, cmd);
spin_unlock(&priv->lock);
@@ -312,6 +428,9 @@ stmmac_get_pauseparam(struct net_device *netdev,
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ if (priv->pcs) /* FIXME */
+ return;
+
spin_lock(&priv->lock);
pause->rx_pause = 0;
@@ -335,6 +454,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
int new_pause = FLOW_OFF;
int ret = 0;
+ if (priv->pcs) /* FIXME */
+ return -EOPNOTSUPP;
+
spin_lock(&priv->lock);
if (pause->rx_pause)
@@ -604,6 +726,38 @@ static int stmmac_set_coalesce(struct net_device *dev,
return 0;
}
+static int stmmac_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (priv->ptp_clock)
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+ return 0;
+ } else
+ return ethtool_op_get_ts_info(dev, info);
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -623,7 +777,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
new file mode 100644
index 000000000000..def7e75e1d57
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -0,0 +1,148 @@
+/*******************************************************************************
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This implements all the API for managing HW timestamp & PTP.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "common.h"
+#include "stmmac_ptp.h"
+
+static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
+{
+ writel(data, ioaddr + PTP_TCR);
+}
+
+static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + PTP_TCR);
+ unsigned long data;
+
+ /* Convert the ptp_clock to nano second
+ * formula = (1/ptp_clock) * 1000000000
+ * where, ptp_clock = 50MHz.
+ */
+ data = (1000000000ULL / 50000000);
+
+ /* 0.465ns accuracy */
+ if (value & PTP_TCR_TSCTRLSSR)
+ data = (data * 100) / 465;
+
+ writel(data, ioaddr + PTP_SSIR);
+}
+
+static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
+{
+ int limit;
+ u32 value;
+
+ writel(sec, ioaddr + PTP_STSUR);
+ writel(nsec, ioaddr + PTP_STNSUR);
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSINIT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time initialize to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
+{
+ u32 value;
+ int limit;
+
+ writel(addend, ioaddr + PTP_TAR);
+ /* issue command to update the addend value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSADDREG;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present addend update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub)
+{
+ u32 value;
+ int limit;
+
+ writel(sec, ioaddr + PTP_STSUR);
+ writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
+ ioaddr + PTP_STNSUR);
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSUPDT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time adjust/update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static u64 stmmac_get_systime(void __iomem *ioaddr)
+{
+ u64 ns;
+
+ ns = readl(ioaddr + PTP_STNSR);
+ /* convert sec time value to nanosecond */
+ ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+
+ return ns;
+}
+
+const struct stmmac_hwtimestamp stmmac_ptp = {
+ .config_hw_tstamping = stmmac_config_hw_tstamping,
+ .init_systime = stmmac_init_systime,
+ .config_sub_second_increment = stmmac_config_sub_second_increment,
+ .config_addend = stmmac_config_addend,
+ .adjust_systime = stmmac_adjust_systime,
+ .get_systime = stmmac_get_systime,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 39c6c5524633..618446ae1ec1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -46,7 +46,9 @@
#ifdef CONFIG_STMMAC_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#endif
+#endif /* CONFIG_STMMAC_DEBUG_FS */
+#include <linux/net_tstamp.h>
+#include "stmmac_ptp.h"
#include "stmmac.h"
#undef STMMAC_DEBUG
@@ -79,14 +81,14 @@
#define JUMBO_LEN 9000
/* Module parameters */
-#define TX_TIMEO 5000 /* default 5 seconds */
+#define TX_TIMEO 5000
static int watchdog = TX_TIMEO;
module_param(watchdog, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
-static int debug = -1; /* -1: default, 0: no output, 16: all */
+static int debug = -1;
module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
@@ -130,6 +132,13 @@ module_param(eee_timer, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+/* By default the driver will use the ring mode to manage tx and rx descriptors
+ * but passing this value so user can force to use the chain instead of the ring
+ */
+static unsigned int chain_mode;
+module_param(chain_mode, int, S_IRUGO);
+MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
+
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -164,6 +173,18 @@ static void stmmac_verify_args(void)
eee_timer = STMMAC_DEFAULT_LPI_TIMER;
}
+/**
+ * stmmac_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ * Note:
+ * If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed (as reported in the driver
+ * documentation). Viceversa the driver will try to set the MDC
+ * clock dynamically according to the actual clock input.
+ */
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
u32 clk_rate;
@@ -171,7 +192,12 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
clk_rate = clk_get_rate(priv->stmmac_clk);
/* Platform provided default clk_csr would be assumed valid
- * for all other cases except for the below mentioned ones. */
+ * for all other cases except for the below mentioned ones.
+ * For values higher than the IEEE 802.3 specified frequency
+ * we can not estimate the proper divider as it is not known
+ * the frequency of clk_csr_i. So we do not change the default
+ * divider.
+ */
if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
if (clk_rate < CSR_F_35M)
priv->clk_csr = STMMAC_CSR_20_35M;
@@ -185,10 +211,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_150_250M;
else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
- } /* For values higher than the IEEE 802.3 specified frequency
- * we can not estimate the proper divider as it is not known
- * the frequency of clk_csr_i. So we do not change the default
- * divider. */
+ }
}
#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
@@ -213,18 +236,25 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
}
-/* On some ST platforms, some HW system configuraton registers have to be
- * set according to the link speed negotiated.
+/**
+ * stmmac_hw_fix_mac_speed: callback for speed selection
+ * @priv: driver private structure
+ * Description: on some platforms (e.g. ST), some HW system configuraton
+ * registers have to be set according to the link speed negotiated.
*/
static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
{
struct phy_device *phydev = priv->phydev;
if (likely(priv->plat->fix_mac_speed))
- priv->plat->fix_mac_speed(priv->plat->bsp_priv,
- phydev->speed);
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
}
+/**
+ * stmmac_enable_eee_mode: Check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode for EEE.
+ */
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
/* Check and enter in LPI mode */
@@ -233,19 +263,24 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
priv->hw->mac->set_eee_mode(priv->ioaddr);
}
+/**
+ * stmmac_disable_eee_mode: disable/exit from EEE
+ * @priv: driver private structure
+ * Description: this function is to exit and disable EEE in case of
+ * LPI state is true. This is called by the xmit.
+ */
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
{
- /* Exit and disable EEE in case of we are are in LPI state. */
priv->hw->mac->reset_eee_mode(priv->ioaddr);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
}
/**
- * stmmac_eee_ctrl_timer
+ * stmmac_eee_ctrl_timer: EEE TX SW timer.
* @arg : data hook
* Description:
- * If there is no data transfer and if we are not in LPI state,
+ * if there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
*/
static void stmmac_eee_ctrl_timer(unsigned long arg)
@@ -257,8 +292,8 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
}
/**
- * stmmac_eee_init
- * @priv: private device pointer
+ * stmmac_eee_init: init EEE
+ * @priv: driver private structure
* Description:
* If the EEE support has been enabled while configuring the driver,
* if the GMAC actually supports the EEE (from the HW cap reg) and the
@@ -294,16 +329,359 @@ out:
return ret;
}
+/**
+ * stmmac_eee_adjust: adjust HW EEE according to the speed
+ * @priv: driver private structure
+ * Description:
+ * When the EEE has been already initialised we have to
+ * modify the PLS bit in the LPI ctrl & status reg according
+ * to the PHY link status. For this reason.
+ */
static void stmmac_eee_adjust(struct stmmac_priv *priv)
{
- /* When the EEE has been already initialised we have to
- * modify the PLS bit in the LPI ctrl & status reg according
- * to the PHY link status. For this reason.
- */
if (priv->eee_enabled)
priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
}
+/* stmmac_get_tx_hwtstamp: get HW TX timestamps
+ * @priv: driver private structure
+ * @entry : descriptor index to be used.
+ * @skb : the socket buffer
+ * Description :
+ * This function will read timestamp from the descriptor & pass it to stack.
+ * and also perform some sanity checks.
+ */
+static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
+ unsigned int entry, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamp;
+ u64 ns;
+ void *desc = NULL;
+
+ if (!priv->hwts_tx_en)
+ return;
+
+ /* exit if skb doesn't support hw tstamp */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ return;
+
+ if (priv->adv_ts)
+ desc = (priv->dma_etx + entry);
+ else
+ desc = (priv->dma_tx + entry);
+
+ /* check tx tstamp status */
+ if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
+ return;
+
+ /* get the valid tstamp */
+ ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+
+ return;
+}
+
+/* stmmac_get_rx_hwtstamp: get HW RX timestamps
+ * @priv: driver private structure
+ * @entry : descriptor index to be used.
+ * @skb : the socket buffer
+ * Description :
+ * This function will read received packet's timestamp from the descriptor
+ * and pass it to stack. It also perform some sanity checks.
+ */
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
+ unsigned int entry, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamp = NULL;
+ u64 ns;
+ void *desc = NULL;
+
+ if (!priv->hwts_rx_en)
+ return;
+
+ if (priv->adv_ts)
+ desc = (priv->dma_erx + entry);
+ else
+ desc = (priv->dma_rx + entry);
+
+ /* exit if rx tstamp is not valid */
+ if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
+ return;
+
+ /* get valid tstamp */
+ ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * stmmac_hwtstamp_ioctl - control hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function configures the MAC to enable/disable both outgoing(TX)
+ * and incoming(RX) packets time stamping based on user input.
+ * Return Value:
+ * 0 on success and an appropriate -ve integer on failure.
+ */
+static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+ struct timespec now;
+ u64 temp = 0;
+ u32 ptp_v2 = 0;
+ u32 tstamp_all = 0;
+ u32 ptp_over_ipv4_udp = 0;
+ u32 ptp_over_ipv6_udp = 0;
+ u32 ptp_over_ethernet = 0;
+ u32 snap_type_sel = 0;
+ u32 ts_master_en = 0;
+ u32 ts_event_en = 0;
+ u32 value = 0;
+
+ if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
+ netdev_alert(priv->dev, "No support for HW time stamping\n");
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data,
+ sizeof(struct hwtstamp_config)))
+ return -EFAULT;
+
+ pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (priv->adv_ts) {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* time stamp no incoming packet at all */
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ /* PTP v1, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /* PTP v1, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ /* PTP v2, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* PTP v2, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* PTP v2, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* PTP v2/802.AS1 any layer, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_ALL:
+ /* time stamp any incoming packet */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_all = PTP_TCR_TSENALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+ } else {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ default:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ }
+ }
+ priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+
+ if (!priv->hwts_tx_en && !priv->hwts_rx_en)
+ priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
+ else {
+ value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
+ tstamp_all | ptp_v2 | ptp_over_ethernet |
+ ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
+ ts_master_en | snap_type_sel);
+
+ priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
+
+ /* program Sub Second Increment reg */
+ priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
+ * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
+ * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
+ * achive 20ns accuracy.
+ *
+ * 2^x * y == (y << x), hence
+ * 2^32 * 50000000 ==> (50000000 << 32)
+ */
+ temp = (u64) (50000000ULL << 32);
+ priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
+ priv->hw->ptp->config_addend(priv->ioaddr,
+ priv->default_addend);
+
+ /* initialize system time */
+ getnstimeofday(&now);
+ priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
+ now.tv_nsec);
+ }
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_init_ptp: init PTP
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PTPv1 or v2.
+ * This is done by looking at the HW cap. register.
+ * Also it registers the ptp driver.
+ */
+static int stmmac_init_ptp(struct stmmac_priv *priv)
+{
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ if (netif_msg_hw(priv)) {
+ if (priv->dma_cap.time_stamp) {
+ pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+ priv->adv_ts = 0;
+ }
+ if (priv->dma_cap.atime_stamp && priv->extend_desc) {
+ pr_debug
+ ("IEEE 1588-2008 Advanced Time Stamp supported\n");
+ priv->adv_ts = 1;
+ }
+ }
+
+ priv->hw->ptp = &stmmac_ptp;
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return stmmac_ptp_register(priv);
+}
+
+static void stmmac_release_ptp(struct stmmac_priv *priv)
+{
+ stmmac_ptp_unregister(priv);
+}
+
/**
* stmmac_adjust_link
* @dev: net device structure
@@ -349,7 +727,7 @@ static void stmmac_adjust_link(struct net_device *dev)
case 1000:
if (likely(priv->plat->has_gmac))
ctrl &= ~priv->hw->link.port;
- stmmac_hw_fix_mac_speed(priv);
+ stmmac_hw_fix_mac_speed(priv);
break;
case 100:
case 10:
@@ -367,8 +745,8 @@ static void stmmac_adjust_link(struct net_device *dev)
break;
default:
if (netif_msg_link(priv))
- pr_warning("%s: Speed (%d) is not 10"
- " or 100!\n", dev->name, phydev->speed);
+ pr_warn("%s: Speed (%d) not 10/100\n",
+ dev->name, phydev->speed);
break;
}
@@ -399,6 +777,31 @@ static void stmmac_adjust_link(struct net_device *dev)
}
/**
+ * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PCS.
+ * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
+ * configured for the TBI, RTBI, or SGMII PHY interface.
+ */
+static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+{
+ int interface = priv->plat->interface;
+
+ if (priv->dma_cap.pcs) {
+ if ((interface & PHY_INTERFACE_MODE_RGMII) ||
+ (interface & PHY_INTERFACE_MODE_RGMII_ID) ||
+ (interface & PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (interface & PHY_INTERFACE_MODE_RGMII_TXID)) {
+ pr_debug("STMMAC: PCS RGMII support enable\n");
+ priv->pcs = STMMAC_PCS_RGMII;
+ } else if (interface & PHY_INTERFACE_MODE_SGMII) {
+ pr_debug("STMMAC: PCS SGMII support enable\n");
+ priv->pcs = STMMAC_PCS_SGMII;
+ }
+ }
+}
+
+/**
* stmmac_init_phy - PHY initialization
* @dev: net device structure
* Description: it initializes the driver's PHY state, and attaches the PHY
@@ -419,10 +822,10 @@ static int stmmac_init_phy(struct net_device *dev)
if (priv->plat->phy_bus_name)
snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
- priv->plat->phy_bus_name, priv->plat->bus_id);
+ priv->plat->phy_bus_name, priv->plat->bus_id);
else
snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ priv->plat->bus_id);
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
@@ -461,29 +864,57 @@ static int stmmac_init_phy(struct net_device *dev)
}
/**
- * display_ring
- * @p: pointer to the ring.
+ * stmmac_display_ring: display ring
+ * @head: pointer to the head of the ring passed.
* @size: size of the ring.
- * Description: display all the descriptors within the ring.
+ * @extend_desc: to verify if extended descriptors are used.
+ * Description: display the control/status and buffer descriptors.
*/
-static void display_ring(struct dma_desc *p, int size)
+static void stmmac_display_ring(void *head, int size, int extend_desc)
{
- struct tmp_s {
- u64 a;
- unsigned int b;
- unsigned int c;
- };
int i;
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ struct dma_desc *p = (struct dma_desc *)head;
+
for (i = 0; i < size; i++) {
- struct tmp_s *x = (struct tmp_s *)(p + i);
- pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
- i, (unsigned int)virt_to_phys(&p[i]),
- (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
- x->b, x->c);
+ u64 x;
+ if (extend_desc) {
+ x = *(u64 *) ep;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(ep),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ } else {
+ x = *(u64 *) p;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+ i, (unsigned int)virt_to_phys(p),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
pr_info("\n");
}
}
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+
+ if (priv->extend_desc) {
+ pr_info("Extended RX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
+ pr_info("Extended TX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
+ } else {
+ pr_info("RX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
+ pr_info("TX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+ }
+}
+
static int stmmac_set_bfsize(int mtu, int bufsize)
{
int ret = bufsize;
@@ -501,6 +932,65 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
}
/**
+ * stmmac_clear_descriptors: clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the tx and rx descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+{
+ int i;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+
+ /* Clear the Rx/Tx descriptors */
+ for (i = 0; i < rxsize; i++)
+ if (priv->extend_desc)
+ priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
+ priv->use_riwt, priv->mode,
+ (i == rxsize - 1));
+ else
+ priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
+ priv->use_riwt, priv->mode,
+ (i == rxsize - 1));
+ for (i = 0; i < txsize; i++)
+ if (priv->extend_desc)
+ priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->mode,
+ (i == txsize - 1));
+ else
+ priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->mode,
+ (i == txsize - 1));
+}
+
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+ int i)
+{
+ struct sk_buff *skb;
+
+ skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
+ GFP_KERNEL);
+ if (unlikely(skb == NULL)) {
+ pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ return 1;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ priv->rx_skbuff[i] = skb;
+ priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
+
+ p->des2 = priv->rx_skbuff_dma[i];
+
+ if ((priv->mode == STMMAC_RING_MODE) &&
+ (priv->dma_buf_sz == BUF_SIZE_16KiB))
+ priv->hw->ring->init_desc3(p);
+
+ return 0;
+}
+
+/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
* Description: this function initializes the DMA RX/TX descriptors
@@ -511,110 +1001,114 @@ static void init_dma_desc_rings(struct net_device *dev)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
- struct sk_buff *skb;
unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size;
- unsigned int bfsize;
- int dis_ic = 0;
- int des3_as_data_buf = 0;
+ unsigned int bfsize = 0;
/* Set the max buffer size according to the DESC mode
- * and the MTU. Note that RING mode allows 16KiB bsize. */
- bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+ * and the MTU. Note that RING mode allows 16KiB bsize.
+ */
+ if (priv->mode == STMMAC_RING_MODE)
+ bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
- if (bfsize == BUF_SIZE_16KiB)
- des3_as_data_buf = 1;
- else
+ if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
txsize, rxsize, bfsize);
- priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
- GFP_KERNEL);
- priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- priv->dma_rx =
- (struct dma_desc *)dma_alloc_coherent(priv->device,
- rxsize *
+ if (priv->extend_desc) {
+ priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if ((!priv->dma_erx) || (!priv->dma_etx))
+ return;
+ } else {
+ priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
- priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- priv->dma_tx =
- (struct dma_desc *)dma_alloc_coherent(priv->device,
- txsize *
+ priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct dma_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
-
- if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
- pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
- return;
+ if ((!priv->dma_rx) || (!priv->dma_tx))
+ return;
}
- DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, "
- "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
- dev->name, priv->dma_rx, priv->dma_tx,
- (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+ priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (netif_msg_drv(priv))
+ pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
+ (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
/* RX INITIALIZATION */
- DBG(probe, INFO, "stmmac: SKB addresses:\n"
- "skb\t\tskb data\tdma data\n");
-
+ DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n");
for (i = 0; i < rxsize; i++) {
- struct dma_desc *p = priv->dma_rx + i;
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((priv->dma_erx + i)->basic);
+ else
+ p = priv->dma_rx + i;
- skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN,
- GFP_KERNEL);
- if (unlikely(skb == NULL)) {
- pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ if (stmmac_init_rx_buffers(priv, p, i))
break;
- }
- skb_reserve(skb, NET_IP_ALIGN);
- priv->rx_skbuff[i] = skb;
- priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
- bfsize, DMA_FROM_DEVICE);
-
- p->des2 = priv->rx_skbuff_dma[i];
-
- priv->hw->ring->init_desc3(des3_as_data_buf, p);
DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
- priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
+ priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
}
priv->cur_rx = 0;
priv->dirty_rx = (unsigned int)(i - rxsize);
priv->dma_buf_sz = bfsize;
buf_sz = bfsize;
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc) {
+ priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
+ rxsize, 1);
+ priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
+ txsize, 1);
+ } else {
+ priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
+ rxsize, 0);
+ priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
+ txsize, 0);
+ }
+ }
+
/* TX INITIALIZATION */
for (i = 0; i < txsize; i++) {
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((priv->dma_etx + i)->basic);
+ else
+ p = priv->dma_tx + i;
+ p->des2 = 0;
+ priv->tx_skbuff_dma[i] = 0;
priv->tx_skbuff[i] = NULL;
- priv->dma_tx[i].des2 = 0;
}
- /* In case of Chained mode this sets the des3 to the next
- * element in the chain */
- priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize);
- priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize);
-
priv->dirty_tx = 0;
priv->cur_tx = 0;
- if (priv->use_riwt)
- dis_ic = 1;
- /* Clear the Rx/Tx descriptors */
- priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
- priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
+ stmmac_clear_descriptors(priv);
- if (netif_msg_hw(priv)) {
- pr_info("RX descriptor ring:\n");
- display_ring(priv->dma_rx, rxsize);
- pr_info("TX descriptor ring:\n");
- display_ring(priv->dma_tx, txsize);
- }
+ if (netif_msg_hw(priv))
+ stmmac_display_rings(priv);
}
static void dma_free_rx_skbufs(struct stmmac_priv *priv)
@@ -637,13 +1131,20 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
for (i = 0; i < priv->dma_tx_size; i++) {
if (priv->tx_skbuff[i] != NULL) {
- struct dma_desc *p = priv->dma_tx + i;
- if (p->des2)
- dma_unmap_single(priv->device, p->des2,
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((priv->dma_etx + i)->basic);
+ else
+ p = priv->dma_tx + i;
+
+ if (priv->tx_skbuff_dma[i])
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[i],
priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
+ priv->tx_skbuff_dma[i] = 0;
}
}
}
@@ -654,29 +1155,38 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
dma_free_rx_skbufs(priv);
dma_free_tx_skbufs(priv);
- /* Free the region of consistent memory previously allocated for
- * the DMA */
- dma_free_coherent(priv->device,
- priv->dma_tx_size * sizeof(struct dma_desc),
- priv->dma_tx, priv->dma_tx_phy);
- dma_free_coherent(priv->device,
- priv->dma_rx_size * sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc) {
+ dma_free_coherent(priv->device,
+ priv->dma_tx_size * sizeof(struct dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device,
+ priv->dma_rx_size * sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ } else {
+ dma_free_coherent(priv->device, priv->dma_tx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_etx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ }
kfree(priv->rx_skbuff_dma);
kfree(priv->rx_skbuff);
+ kfree(priv->tx_skbuff_dma);
kfree(priv->tx_skbuff);
}
/**
* stmmac_dma_operation_mode - HW DMA operation mode
- * @priv : pointer to the private device structure.
+ * @priv: driver private structure
* Description: it sets the DMA operation mode: tx/rx DMA thresholds
* or Store-And-Forward capability.
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
if (likely(priv->plat->force_sf_dma_mode ||
- ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
+ ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
/*
* In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on:
@@ -684,8 +1194,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* 2) There is no bugged Jumbo frame support
* that needs to not insert csum in the TDES.
*/
- priv->hw->dma->dma_mode(priv->ioaddr,
- SF_DMA_MODE, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
tc = SF_DMA_MODE;
} else
priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
@@ -693,7 +1202,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/**
* stmmac_tx_clean:
- * @priv: private data pointer
+ * @priv: driver private structure
* Description: it reclaims resources after transmission completes.
*/
static void stmmac_tx_clean(struct stmmac_priv *priv)
@@ -708,40 +1217,50 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
int last;
unsigned int entry = priv->dirty_tx % txsize;
struct sk_buff *skb = priv->tx_skbuff[entry];
- struct dma_desc *p = priv->dma_tx + entry;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ p = priv->dma_tx + entry;
/* Check if the descriptor is owned by the DMA. */
if (priv->hw->desc->get_tx_owner(p))
break;
- /* Verify tx error by looking at the last segment */
+ /* Verify tx error by looking at the last segment. */
last = priv->hw->desc->get_tx_ls(p);
if (likely(last)) {
int tx_error =
- priv->hw->desc->tx_status(&priv->dev->stats,
- &priv->xstats, p,
- priv->ioaddr);
+ priv->hw->desc->tx_status(&priv->dev->stats,
+ &priv->xstats, p,
+ priv->ioaddr);
if (likely(tx_error == 0)) {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
} else
priv->dev->stats.tx_errors++;
+
+ stmmac_get_tx_hwtstamp(priv, entry, skb);
}
TX_DBG("%s: curr %d, dirty %d\n", __func__,
- priv->cur_tx, priv->dirty_tx);
+ priv->cur_tx, priv->dirty_tx);
- if (likely(p->des2))
- dma_unmap_single(priv->device, p->des2,
+ if (likely(priv->tx_skbuff_dma[entry])) {
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[entry],
priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
- priv->hw->ring->clean_desc3(p);
+ priv->tx_skbuff_dma[entry] = 0;
+ }
+ priv->hw->ring->clean_desc3(priv, p);
if (likely(skb != NULL)) {
dev_kfree_skb(skb);
priv->tx_skbuff[entry] = NULL;
}
- priv->hw->desc->release_tx_desc(p);
+ priv->hw->desc->release_tx_desc(p, priv->mode);
priv->dirty_tx++;
}
@@ -749,7 +1268,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
netif_tx_lock(priv->dev);
if (netif_queue_stopped(priv->dev) &&
- stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
TX_DBG("%s: restart transmit\n", __func__);
netif_wake_queue(priv->dev);
}
@@ -773,20 +1292,29 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
priv->hw->dma->disable_dma_irq(priv->ioaddr);
}
-
/**
- * stmmac_tx_err:
- * @priv: pointer to the private device structure
+ * stmmac_tx_err: irq tx error mng function
+ * @priv: driver private structure
* Description: it cleans the descriptors and restarts the transmission
* in case of errors.
*/
static void stmmac_tx_err(struct stmmac_priv *priv)
{
+ int i;
+ int txsize = priv->dma_tx_size;
netif_stop_queue(priv->dev);
priv->hw->dma->stop_tx(priv->ioaddr);
dma_free_tx_skbufs(priv);
- priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ for (i = 0; i < txsize; i++)
+ if (priv->extend_desc)
+ priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->mode,
+ (i == txsize - 1));
+ else
+ priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->mode,
+ (i == txsize - 1));
priv->dirty_tx = 0;
priv->cur_tx = 0;
priv->hw->dma->start_tx(priv->ioaddr);
@@ -795,6 +1323,14 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
netif_wake_queue(priv->dev);
}
+/**
+ * stmmac_dma_interrupt: DMA ISR
+ * @priv: driver private structure
+ * Description: this is the DMA ISR. It is called by the main ISR.
+ * It calls the dwmac dma routine to understand which type of interrupt
+ * happened. In case of there is a Normal interrupt and either TX or RX
+ * interrupt happened so the NAPI is scheduled.
+ */
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
int status;
@@ -817,13 +1353,16 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
stmmac_tx_err(priv);
}
+/**
+ * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
+ * @priv: driver private structure
+ * Description: this masks the MMC irq, in fact, the counters are managed in SW.
+ */
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
- MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- /* Mask MMC irq, counters are managed in SW and registers
- * are cleared on each READ eventually. */
dwmac_mmc_intr_all_mask(priv->ioaddr);
if (priv->dma_cap.rmon) {
@@ -837,8 +1376,7 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
{
u32 hwid = priv->hw->synopsys_uid;
- /* Only check valid Synopsys Id because old MAC chips
- * have no HW registers where get the ID */
+ /* Check Synopsys Id (not available on old chips) */
if (likely(hwid)) {
u32 uid = ((hwid & 0x0000ff00) >> 8);
u32 synid = (hwid & 0x000000ff);
@@ -852,14 +1390,24 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
}
/**
- * stmmac_selec_desc_mode
- * @priv : private structure
- * Description: select the Enhanced/Alternate or Normal descriptors
+ * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors
+ * @priv: driver private structure
+ * Description: select the Enhanced/Alternate or Normal descriptors.
+ * In case of Enhanced/Alternate, it looks at the extended descriptors are
+ * supported by the HW cap. register.
*/
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
if (priv->plat->enh_desc) {
pr_info(" Enhanced/Alternate descriptors\n");
+
+ /* GMAC older than 3.50 has no extended descriptors */
+ if (priv->synopsys_id >= DWMAC_CORE_3_50) {
+ pr_info("\tEnabled extended descriptors\n");
+ priv->extend_desc = 1;
+ } else
+ pr_warn("Extended descriptors not supported\n");
+
priv->hw->desc = &enh_desc_ops;
} else {
pr_info(" Normal descriptors\n");
@@ -868,8 +1416,8 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
}
/**
- * stmmac_get_hw_features
- * @priv : private device pointer
+ * stmmac_get_hw_features: get MAC capabilities from the HW cap. register.
+ * @priv: driver private structure
* Description:
* new GMAC chip generations have a new register to indicate the
* presence of the optional feature/functions.
@@ -887,69 +1435,78 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
- priv->dma_cap.multi_addr =
- (hw_cap & DMA_HW_FEAT_ADDMACADRSEL) >> 5;
+ priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
priv->dma_cap.pmt_remote_wake_up =
- (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+ (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
priv->dma_cap.pmt_magic_frame =
- (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+ (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
/* MMC */
priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
- /* IEEE 1588-2002*/
+ /* IEEE 1588-2002 */
priv->dma_cap.time_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
- /* IEEE 1588-2008*/
+ (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
+ /* IEEE 1588-2008 */
priv->dma_cap.atime_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
+ (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
/* 802.3az - Energy-Efficient Ethernet (EEE) */
priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
/* TX and RX csum */
priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
priv->dma_cap.rx_coe_type1 =
- (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+ (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
priv->dma_cap.rx_coe_type2 =
- (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+ (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
priv->dma_cap.rxfifo_over_2048 =
- (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
+ (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
/* TX and RX number of channels */
priv->dma_cap.number_rx_channel =
- (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+ (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
priv->dma_cap.number_tx_channel =
- (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
- /* Alternate (enhanced) DESC mode*/
- priv->dma_cap.enh_desc =
- (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+ (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
+ /* Alternate (enhanced) DESC mode */
+ priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
}
return hw_cap;
}
+/**
+ * stmmac_check_ether_addr: check if the MAC addr is valid
+ * @priv: driver private structure
+ * Description:
+ * it is to verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address
+ */
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
- /* verify if the MAC address is valid, in case of failures it
- * generates a random MAC address */
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
priv->hw->mac->get_umac_addr((void __iomem *)
priv->dev->base_addr,
priv->dev->dev_addr, 0);
- if (!is_valid_ether_addr(priv->dev->dev_addr))
+ if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
}
- pr_warning("%s: device MAC address %pM\n", priv->dev->name,
- priv->dev->dev_addr);
+ pr_warn("%s: device MAC address %pM\n", priv->dev->name,
+ priv->dev->dev_addr);
}
+/**
+ * stmmac_init_dma_engine: DMA init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the DMA invoking the specific MAC/GMAC callback.
+ * Some DMA parameters can be passed from the platform;
+ * in case of these are not passed a default is kept for the MAC or GMAC.
+ */
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
int mixed_burst = 0;
+ int atds = 0;
- /* Some DMA parameters can be passed from the platform;
- * in case of these are not passed we keep a default
- * (good for all the chips) and init the DMA! */
if (priv->plat->dma_cfg) {
pbl = priv->plat->dma_cfg->pbl;
fixed_burst = priv->plat->dma_cfg->fixed_burst;
@@ -957,13 +1514,16 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
burst_len = priv->plat->dma_cfg->burst_len;
}
+ if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
+ atds = 1;
+
return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
burst_len, priv->dma_tx_phy,
- priv->dma_rx_phy);
+ priv->dma_rx_phy, atds);
}
/**
- * stmmac_tx_timer:
+ * stmmac_tx_timer: mitigation sw timer for tx.
* @data: data pointer
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
@@ -976,8 +1536,8 @@ static void stmmac_tx_timer(unsigned long data)
}
/**
- * stmmac_tx_timer:
- * @priv: private data structure
+ * stmmac_init_tx_coalesce: init tx mitigation options.
+ * @priv: driver private structure
* Description:
* This inits the transmit coalesce parameters: i.e. timer rate,
* timer handler and default threshold used for enabling the
@@ -1012,10 +1572,14 @@ static int stmmac_open(struct net_device *dev)
stmmac_check_ether_addr(priv);
- ret = stmmac_init_phy(dev);
- if (unlikely(ret)) {
- pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
- goto open_error;
+ if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+ priv->pcs != STMMAC_PCS_RTBI) {
+ ret = stmmac_init_phy(dev);
+ if (ret) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n",
+ __func__, ret);
+ goto open_error;
+ }
}
/* Create and initialize the TX/RX descriptors chains. */
@@ -1043,7 +1607,7 @@ static int stmmac_open(struct net_device *dev)
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
+ IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
@@ -1055,8 +1619,8 @@ static int stmmac_open(struct net_device *dev)
ret = request_irq(priv->wol_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
- pr_err("%s: ERROR: allocating the ext WoL IRQ %d "
- "(error: %d)\n", __func__, priv->wol_irq, ret);
+ pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
+ __func__, priv->wol_irq, ret);
goto open_error_wolirq;
}
}
@@ -1084,10 +1648,14 @@ static int stmmac_open(struct net_device *dev)
stmmac_mmc_setup(priv);
+ ret = stmmac_init_ptp(priv);
+ if (ret)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+
#ifdef CONFIG_STMMAC_DEBUG_FS
ret = stmmac_init_fs(dev);
if (ret < 0)
- pr_warning("%s: failed debugFS registration\n", __func__);
+ pr_warn("%s: failed debugFS registration\n", __func__);
#endif
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
@@ -1104,7 +1672,13 @@ static int stmmac_open(struct net_device *dev)
phy_start(priv->phydev);
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
- priv->eee_enabled = stmmac_eee_init(priv);
+
+ /* Using PCS we cannot dial with the phy registers at this stage
+ * so we do not support extra feature like EEE.
+ */
+ if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+ priv->pcs != STMMAC_PCS_RTBI)
+ priv->eee_enabled = stmmac_eee_init(priv);
stmmac_init_tx_coalesce(priv);
@@ -1113,6 +1687,9 @@ static int stmmac_open(struct net_device *dev)
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
}
+ if (priv->pcs && priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
+
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1184,21 +1761,25 @@ static int stmmac_release(struct net_device *dev)
#endif
clk_disable_unprepare(priv->stmmac_clk);
+ stmmac_release_ptp(priv);
+
return 0;
}
/**
- * stmmac_xmit:
+ * stmmac_xmit: Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
- * Description : Tx entry point of the driver.
+ * Description : this is the tx entry point of the driver.
+ * It programs the chain or the ring and supports oversized frames
+ * and SG feature.
*/
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int txsize = priv->dma_tx_size;
unsigned int entry;
- int i, csum_insertion = 0;
+ int i, csum_insertion = 0, is_jumbo = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
unsigned int nopaged_len = skb_headlen(skb);
@@ -1207,8 +1788,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
/* This is a hard error, log it. */
- pr_err("%s: BUG! Tx Ring full when queue awake\n",
- __func__);
+ pr_err("%s: Tx Ring full when queue awake\n", __func__);
}
return NETDEV_TX_BUSY;
}
@@ -1222,10 +1802,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef STMMAC_XMIT_DEBUG
if ((skb->len > ETH_FRAME_LEN) || nfrags)
- pr_debug("stmmac xmit: [entry %d]\n"
- "\tskb addr %p - len: %d - nopaged_len: %d\n"
+ pr_debug("%s: [entry %d]: skb addr %p len: %d nopagedlen: %d\n"
"\tn_frags: %d - ip_summed: %d - %s gso\n"
- "\ttx_count_frames %d\n", entry,
+ "\ttx_count_frames %d\n", __func__, entry,
skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
!skb_is_gso(skb) ? "isn't" : "is",
priv->tx_count_frames);
@@ -1233,7 +1812,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
- desc = priv->dma_tx + entry;
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
+
first = desc;
#ifdef STMMAC_XMIT_DEBUG
@@ -1244,28 +1827,46 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
priv->tx_skbuff[entry] = skb;
- if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) {
- entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion);
- desc = priv->dma_tx + entry;
+ /* To program the descriptors according to the size of the frame */
+ if (priv->mode == STMMAC_RING_MODE) {
+ is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
+ priv->plat->enh_desc);
+ if (unlikely(is_jumbo))
+ entry = priv->hw->ring->jumbo_frm(priv, skb,
+ csum_insertion);
} else {
+ is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
+ priv->plat->enh_desc);
+ if (unlikely(is_jumbo))
+ entry = priv->hw->chain->jumbo_frm(priv, skb,
+ csum_insertion);
+ }
+ if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
+ nopaged_len, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
- }
+ csum_insertion, priv->mode);
+ } else
+ desc = first;
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
entry = (++priv->cur_tx) % txsize;
- desc = priv->dma_tx + entry;
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
priv->tx_skbuff[entry] = NULL;
- priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
+ priv->mode);
wmb();
priv->hw->desc->set_tx_owner(desc);
wmb();
@@ -1298,11 +1899,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef STMMAC_XMIT_DEBUG
if (netif_msg_pktdata(priv)) {
- pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
- "first=%p, nfrags=%d\n",
- (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
- entry, first, nfrags);
- display_ring(priv->dma_tx, txsize);
+ pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d"
+ __func__, (priv->cur_tx % txsize),
+ (priv->dirty_tx % txsize), entry, first, nfrags);
+ if (priv->extend_desc)
+ stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
+ else
+ stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+
pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
}
@@ -1314,7 +1918,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- skb_tx_timestamp(skb);
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->enable_tx_timestamp(first);
+ }
+
+ if (!priv->hwts_tx_en)
+ skb_tx_timestamp(skb);
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
@@ -1323,14 +1935,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+/**
+ * stmmac_rx_refill: refill used skb preallocated buffers
+ * @priv: driver private structure
+ * Description : this is to reallocate the skb for the reception process
+ * that is based on zero-copy.
+ */
static inline void stmmac_rx_refill(struct stmmac_priv *priv)
{
unsigned int rxsize = priv->dma_rx_size;
int bfsize = priv->dma_buf_sz;
- struct dma_desc *p = priv->dma_rx;
for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
unsigned int entry = priv->dirty_rx % rxsize;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(priv->dma_erx + entry);
+ else
+ p = priv->dma_rx + entry;
+
if (likely(priv->rx_skbuff[entry] == NULL)) {
struct sk_buff *skb;
@@ -1344,80 +1968,116 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
dma_map_single(priv->device, skb->data, bfsize,
DMA_FROM_DEVICE);
- (p + entry)->des2 = priv->rx_skbuff_dma[entry];
+ p->des2 = priv->rx_skbuff_dma[entry];
- if (unlikely(priv->plat->has_gmac))
- priv->hw->ring->refill_desc3(bfsize, p + entry);
+ priv->hw->ring->refill_desc3(priv, p);
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
wmb();
- priv->hw->desc->set_rx_owner(p + entry);
+ priv->hw->desc->set_rx_owner(p);
wmb();
}
}
+/**
+ * stmmac_rx_refill: refill used skb preallocated buffers
+ * @priv: driver private structure
+ * @limit: napi bugget.
+ * Description : this the function called by the napi poll method.
+ * It gets all the frames inside the ring.
+ */
static int stmmac_rx(struct stmmac_priv *priv, int limit)
{
unsigned int rxsize = priv->dma_rx_size;
unsigned int entry = priv->cur_rx % rxsize;
unsigned int next_entry;
unsigned int count = 0;
- struct dma_desc *p = priv->dma_rx + entry;
- struct dma_desc *p_next;
+ int coe = priv->plat->rx_coe;
#ifdef STMMAC_RX_DEBUG
if (netif_msg_hw(priv)) {
pr_debug(">>> stmmac_rx: descriptor ring:\n");
- display_ring(priv->dma_rx, rxsize);
+ if (priv->extend_desc)
+ stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
+ else
+ stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
}
#endif
- while (!priv->hw->desc->get_rx_owner(p)) {
+ while (count < limit) {
int status;
+ struct dma_desc *p;
- if (count >= limit)
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(priv->dma_erx + entry);
+ else
+ p = priv->dma_rx + entry;
+
+ if (priv->hw->desc->get_rx_owner(p))
break;
count++;
next_entry = (++priv->cur_rx) % rxsize;
- p_next = priv->dma_rx + next_entry;
- prefetch(p_next);
+ if (priv->extend_desc)
+ prefetch(priv->dma_erx + next_entry);
+ else
+ prefetch(priv->dma_rx + next_entry);
/* read the status of the incoming frame */
- status = (priv->hw->desc->rx_status(&priv->dev->stats,
- &priv->xstats, p));
- if (unlikely(status == discard_frame))
+ status = priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p);
+ if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
+ priv->hw->desc->rx_extended_status(&priv->dev->stats,
+ &priv->xstats,
+ priv->dma_erx +
+ entry);
+ if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
- else {
+ if (priv->hwts_rx_en && !priv->extend_desc) {
+ /* DESC2 & DESC3 will be overwitten by device
+ * with timestamp value, hence reinitialize
+ * them in stmmac_rx_refill() function so that
+ * device can reuse it.
+ */
+ priv->rx_skbuff[entry] = NULL;
+ dma_unmap_single(priv->device,
+ priv->rx_skbuff_dma[entry],
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
+ }
+ } else {
struct sk_buff *skb;
int frame_len;
- frame_len = priv->hw->desc->get_rx_frame_len(p,
- priv->plat->rx_coe);
+ frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
+
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP) */
+ * Type frames (LLC/LLC-SNAP)
+ */
if (unlikely(status != llc_snap))
frame_len -= ETH_FCS_LEN;
#ifdef STMMAC_RX_DEBUG
if (frame_len > ETH_FRAME_LEN)
pr_debug("\tRX frame size %d, COE status: %d\n",
- frame_len, status);
+ frame_len, status);
if (netif_msg_hw(priv))
pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, p->des2);
+ p, entry, p->des2);
#endif
skb = priv->rx_skbuff[entry];
if (unlikely(!skb)) {
pr_err("%s: Inconsistent Rx descriptor chain\n",
- priv->dev->name);
+ priv->dev->name);
priv->dev->stats.rx_dropped++;
break;
}
prefetch(skb->data - NET_IP_ALIGN);
priv->rx_skbuff[entry] = NULL;
+ stmmac_get_rx_hwtstamp(priv, entry, skb);
+
skb_put(skb, frame_len);
dma_unmap_single(priv->device,
priv->rx_skbuff_dma[entry],
@@ -1430,7 +2090,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
#endif
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!priv->plat->rx_coe))
+ if (unlikely(!coe))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1441,7 +2101,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
priv->dev->stats.rx_bytes += frame_len;
}
entry = next_entry;
- p = p_next; /* use prefetched values */
}
stmmac_rx_refill(priv);
@@ -1499,18 +2158,16 @@ static int stmmac_config(struct net_device *dev, struct ifmap *map)
/* Don't allow changing the I/O address */
if (map->base_addr != dev->base_addr) {
- pr_warning("%s: can't change I/O address\n", dev->name);
+ pr_warn("%s: can't change I/O address\n", dev->name);
return -EOPNOTSUPP;
}
/* Don't allow changing the IRQ */
if (map->irq != dev->irq) {
- pr_warning("%s: can't change IRQ number %d\n",
- dev->name, dev->irq);
+ pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
return -EOPNOTSUPP;
}
- /* ignore other fields */
return 0;
}
@@ -1570,7 +2227,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
}
static netdev_features_t stmmac_fix_features(struct net_device *dev,
- netdev_features_t features)
+ netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1584,13 +2241,22 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
/* Some GMAC devices have a bugged Jumbo frame support that
* needs to have the Tx COE disabled for oversized frames
* (due to limited buffer sizes). In this case we disable
- * the TX csum insertionin the TDES and not use SF. */
+ * the TX csum insertionin the TDES and not use SF.
+ */
if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_ALL_CSUM;
return features;
}
+/**
+ * stmmac_interrupt - main ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the main driver interrupt service routine.
+ * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
+ * interrupts.
+ */
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
@@ -1604,30 +2270,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* To handle GMAC own interrupts */
if (priv->plat->has_gmac) {
int status = priv->hw->mac->host_irq_status((void __iomem *)
- dev->base_addr);
+ dev->base_addr,
+ &priv->xstats);
if (unlikely(status)) {
- if (status & core_mmc_tx_irq)
- priv->xstats.mmc_tx_irq_n++;
- if (status & core_mmc_rx_irq)
- priv->xstats.mmc_rx_irq_n++;
- if (status & core_mmc_rx_csum_offload_irq)
- priv->xstats.mmc_rx_csum_offload_irq_n++;
- if (status & core_irq_receive_pmt_irq)
- priv->xstats.irq_receive_pmt_irq_n++;
-
/* For LPI we need to save the tx status */
- if (status & core_irq_tx_path_in_lpi_mode) {
- priv->xstats.irq_tx_path_in_lpi_mode_n++;
+ if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
priv->tx_path_in_lpi_mode = true;
- }
- if (status & core_irq_tx_path_exit_lpi_mode) {
- priv->xstats.irq_tx_path_exit_lpi_mode_n++;
+ if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
- }
- if (status & core_irq_rx_path_in_lpi_mode)
- priv->xstats.irq_rx_path_in_lpi_mode_n++;
- if (status & core_irq_rx_path_exit_lpi_mode)
- priv->xstats.irq_rx_path_exit_lpi_mode_n++;
}
}
@@ -1639,7 +2289,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling receive - used by NETCONSOLE and other diagnostic tools
- * to allow network I/O with interrupts disabled. */
+ * to allow network I/O with interrupts disabled.
+ */
static void stmmac_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
@@ -1655,21 +2306,30 @@ static void stmmac_poll_controller(struct net_device *dev)
* a proprietary structure used to pass information to the driver.
* @cmd: IOCTL command
* Description:
- * Currently there are no special functionality supported in IOCTL, just the
- * phy_mii_ioctl(...) can be invoked.
+ * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
*/
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret;
+ int ret = -EOPNOTSUPP;
if (!netif_running(dev))
return -EINVAL;
- if (!priv->phydev)
- return -EINVAL;
-
- ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ return -EINVAL;
+ ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ break;
+ case SIOCSHWTSTAMP:
+ ret = stmmac_hwtstamp_ioctl(dev, rq);
+ break;
+ default:
+ break;
+ }
return ret;
}
@@ -1679,40 +2339,51 @@ static struct dentry *stmmac_fs_dir;
static struct dentry *stmmac_rings_status;
static struct dentry *stmmac_dma_cap;
-static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+static void sysfs_display_ring(void *head, int size, int extend_desc,
+ struct seq_file *seq)
{
- struct tmp_s {
- u64 a;
- unsigned int b;
- unsigned int c;
- };
int i;
- struct net_device *dev = seq->private;
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ struct dma_desc *p = (struct dma_desc *)head;
- seq_printf(seq, "=======================\n");
- seq_printf(seq, " RX descriptor ring\n");
- seq_printf(seq, "=======================\n");
-
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i);
- seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
- i, (unsigned int)(x->a),
- (unsigned int)((x->a) >> 32), x->b, x->c);
+ for (i = 0; i < size; i++) {
+ u64 x;
+ if (extend_desc) {
+ x = *(u64 *) ep;
+ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(ep),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ } else {
+ x = *(u64 *) p;
+ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(ep),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
seq_printf(seq, "\n");
}
+}
- seq_printf(seq, "\n");
- seq_printf(seq, "=======================\n");
- seq_printf(seq, " TX descriptor ring\n");
- seq_printf(seq, "=======================\n");
+static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
- for (i = 0; i < priv->dma_tx_size; i++) {
- struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i);
- seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
- i, (unsigned int)(x->a),
- (unsigned int)((x->a) >> 32), x->b, x->c);
- seq_printf(seq, "\n");
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended RX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
+ seq_printf(seq, "Extended TX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
+ } else {
+ seq_printf(seq, "RX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
+ seq_printf(seq, "TX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
}
return 0;
@@ -1817,8 +2488,8 @@ static int stmmac_init_fs(struct net_device *dev)
/* Entry to report DMA RX/TX rings */
stmmac_rings_status = debugfs_create_file("descriptors_status",
- S_IRUGO, stmmac_fs_dir, dev,
- &stmmac_rings_status_fops);
+ S_IRUGO, stmmac_fs_dir, dev,
+ &stmmac_rings_status_fops);
if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
pr_info("ERROR creating stmmac ring debugfs file\n");
@@ -1868,7 +2539,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
/**
* stmmac_hw_init - Init the MAC device
- * @priv : pointer to the private device structure.
+ * @priv: driver private structure
* Description: this function detects which MAC device
* (GMAC/MAC10-100) has to attached, checks the HW capability
* (if supported) and sets the driver's features (for example
@@ -1877,7 +2548,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
*/
static int stmmac_hw_init(struct stmmac_priv *priv)
{
- int ret = 0;
+ int ret;
struct mac_device_info *mac;
/* Identify the MAC HW device */
@@ -1892,12 +2563,23 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->hw = mac;
- /* To use the chained or ring mode */
- priv->hw->ring = &ring_mode_ops;
-
/* Get and dump the chip ID */
priv->synopsys_id = stmmac_get_synopsys_id(priv);
+ /* To use alternate (extended) or normal descriptor structures */
+ stmmac_selec_desc_mode(priv);
+
+ /* To use the chained or ring mode */
+ if (chain_mode) {
+ priv->hw->chain = &chain_mode_ops;
+ pr_info(" Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ } else {
+ priv->hw->ring = &ring_mode_ops;
+ pr_info(" Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ }
+
/* Get the HW capability (new GMAC newer than 3.50a) */
priv->hw_cap_support = stmmac_get_hw_features(priv);
if (priv->hw_cap_support) {
@@ -1921,14 +2603,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
} else
pr_info(" No HW DMA feature register supported");
- /* Select the enhnaced/normal descriptor structures */
- stmmac_selec_desc_mode(priv);
-
- /* Enable the IPC (Checksum Offload) and check if the feature has been
- * enabled during the core configuration. */
ret = priv->hw->mac->rx_ipc(priv->ioaddr);
if (!ret) {
- pr_warning(" RX IPC Checksum Offload not configured.\n");
+ pr_warn(" RX IPC Checksum Offload not configured.\n");
priv->plat->rx_coe = STMMAC_RX_COE_NONE;
}
@@ -1943,7 +2620,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
device_set_wakeup_capable(priv->device, 1);
}
- return ret;
+ return 0;
}
/**
@@ -1984,12 +2661,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
stmmac_verify_args();
/* Override with kernel parameters if supplied XXX CRS XXX
- * this needs to have multiple instances */
+ * this needs to have multiple instances
+ */
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
/* Init MAC and get the capabilities */
- stmmac_hw_init(priv);
+ ret = stmmac_hw_init(priv);
+ if (ret)
+ goto error_free_netdev;
ndev->netdev_ops = &stmmac_netdev_ops;
@@ -1999,7 +2679,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
- ndev->features |= NETIF_F_HW_VLAN_RX;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
@@ -2029,7 +2709,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
if (IS_ERR(priv->stmmac_clk)) {
- pr_warning("%s: warning: cannot get CSR clock\n", __func__);
+ pr_warn("%s: warning: cannot get CSR clock\n", __func__);
goto error_clk_get;
}
@@ -2044,12 +2724,17 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
else
priv->clk_csr = priv->plat->clk_csr;
- /* MDIO bus Registration */
- ret = stmmac_mdio_register(ndev);
- if (ret < 0) {
- pr_debug("%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
- goto error_mdio_register;
+ stmmac_check_pcs_mode(priv);
+
+ if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+ priv->pcs != STMMAC_PCS_RTBI) {
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ pr_debug("%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
+ goto error_mdio_register;
+ }
}
return priv;
@@ -2060,6 +2745,7 @@ error_clk_get:
unregister_netdev(ndev);
error_netdev_register:
netif_napi_del(&priv->napi);
+error_free_netdev:
free_netdev(ndev);
return NULL;
@@ -2081,7 +2767,9 @@ int stmmac_dvr_remove(struct net_device *ndev)
priv->hw->dma->stop_tx(priv->ioaddr);
stmmac_set_mac(priv->ioaddr, false);
- stmmac_mdio_unregister(ndev);
+ if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+ priv->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
netif_carrier_off(ndev);
unregister_netdev(ndev);
free_netdev(ndev);
@@ -2093,7 +2781,6 @@ int stmmac_dvr_remove(struct net_device *ndev)
int stmmac_suspend(struct net_device *ndev)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- int dis_ic = 0;
unsigned long flags;
if (!ndev || !netif_running(ndev))
@@ -2107,18 +2794,13 @@ int stmmac_suspend(struct net_device *ndev)
netif_device_detach(ndev);
netif_stop_queue(ndev);
- if (priv->use_riwt)
- dis_ic = 1;
-
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
priv->hw->dma->stop_tx(priv->ioaddr);
priv->hw->dma->stop_rx(priv->ioaddr);
- /* Clear the Rx/Tx descriptors */
- priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
- dis_ic);
- priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+
+ stmmac_clear_descriptors(priv);
/* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device))
@@ -2146,7 +2828,8 @@ int stmmac_resume(struct net_device *ndev)
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
* this bit because it can generate problems while resuming
- * from another devices (e.g. serial console). */
+ * from another devices (e.g. serial console).
+ */
if (device_may_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, 0);
else
@@ -2257,6 +2940,9 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
+ } else if (!strncmp(opt, "chain_mode:", 11)) {
+ if (kstrtoint(opt + 11, 0, &chain_mode))
+ goto err;
}
}
return 0;
@@ -2267,7 +2953,7 @@ err:
}
__setup("stmmaceth=", stmmac_cmdline_opt);
-#endif
+#endif /* MODULE */
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0b9829fe3eea..cc15039eaa47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- new_bus->name, priv->plat->bus_id);
+ new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
new_bus->irq = irqlist;
new_bus->phy_mask = mdio_bus_data->phy_mask;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 19b3a2567a46..023b7c29cb2f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -88,7 +88,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
continue;
addr = pci_iomap(pdev, i, 0);
if (addr == NULL) {
- pr_err("%s: ERROR: cannot map register memory, aborting",
+ pr_err("%s: ERROR: cannot map register memory aborting",
__func__);
ret = -EIO;
goto err_out_map_failed;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index b43d68b40e50..1d3780f55ba2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -88,11 +88,9 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- addr = devm_request_and_ioremap(dev, res);
- if (!addr) {
- pr_err("%s: ERROR: memory mapping failed", __func__);
- return -ENOMEM;
- }
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
if (pdev->dev.of_node) {
plat_dat = devm_kzalloc(&pdev->dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
new file mode 100644
index 000000000000..b8b0eeed0f92
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -0,0 +1,211 @@
+/*******************************************************************************
+ PTP 1588 clock using the STMMAC.
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+*******************************************************************************/
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+/**
+ * stmmac_adjust_freq
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ppb: desired period change in parts ber billion
+ *
+ * Description: this function will adjust the frequency of hardware clock.
+ */
+static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 diff, addend;
+ int neg_adj = 0;
+ u64 adj;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ addend = priv->default_addend;
+ adj = addend;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+ addend = neg_adj ? (addend - diff) : (addend + diff);
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->config_addend(priv->ioaddr, addend);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/**
+ * stmmac_adjust_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @delta: desired change in nanoseconds
+ *
+ * Description: this function will shift/adjust the hardware clock time.
+ */
+static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 sec, nsec;
+ u32 quotient, reminder;
+ int neg_adj = 0;
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/**
+ * stmmac_get_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: pointer to hold time/result
+ *
+ * Description: this function will read the current time from the
+ * hardware clock and store it in @ts.
+ */
+static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u64 ns;
+ u32 reminder;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ ns = priv->hw->ptp->get_systime(priv->ioaddr);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder);
+ ts->tv_nsec = reminder;
+
+ return 0;
+}
+
+/**
+ * stmmac_set_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: time value to set
+ *
+ * Description: this function will set the current time on the
+ * hardware clock.
+ */
+static int stmmac_set_time(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+static int stmmac_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/* structure describing a PTP hardware clock */
+static struct ptp_clock_info stmmac_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .name = "stmmac_ptp_clock",
+ .max_adj = 62500000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+ .gettime = stmmac_get_time,
+ .settime = stmmac_set_time,
+ .enable = stmmac_enable,
+};
+
+/**
+ * stmmac_ptp_register
+ * @priv: driver private structure
+ * Description: this function will register the ptp clock driver
+ * to kernel. It also does some house keeping work.
+ */
+int stmmac_ptp_register(struct stmmac_priv *priv)
+{
+ spin_lock_init(&priv->ptp_lock);
+ priv->ptp_clock_ops = stmmac_ptp_clock_ops;
+
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
+ priv->device);
+ if (IS_ERR(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
+ } else
+ pr_debug("Added PTP HW clock successfully on %s\n",
+ priv->dev->name);
+
+ return 0;
+}
+
+/**
+ * stmmac_ptp_unregister
+ * @priv: driver private structure
+ * Description: this function will remove/unregister the ptp clock driver
+ * from the kernel.
+ */
+void stmmac_ptp_unregister(struct stmmac_priv *priv)
+{
+ if (priv->ptp_clock) {
+ ptp_clock_unregister(priv->ptp_clock);
+ pr_debug("Removed PTP HW clock successfully on %s\n",
+ priv->dev->name);
+ }
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
new file mode 100644
index 000000000000..3dbc047622fa
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -0,0 +1,74 @@
+/******************************************************************************
+ PTP Header file
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+******************************************************************************/
+
+#ifndef __STMMAC_PTP_H__
+#define __STMMAC_PTP_H__
+
+#define STMMAC_SYSCLOCK 62500000
+
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x0700 /* Timestamp Control Reg */
+#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
+#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
+#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
+#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
+#define PTP_TSR 0x0728 /* Timestamp Status */
+
+#define PTP_STNSUR_ADDSUB_SHIFT 31
+
+/* PTP TCR defines */
+#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
+#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
+#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
+#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
+/* Timestamp Interrupt Trigger Enable */
+#define PTP_TCR_TSTRIG 0x00000010
+#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
+#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
+/* Timestamp Digital or Binary Rollover Control */
+#define PTP_TCR_TSCTRLSSR 0x00000200
+
+/* Enable PTP packet Processing for Version 2 Format */
+#define PTP_TCR_TSVER2ENA 0x00000400
+/* Enable Processing of PTP over Ethernet Frames */
+#define PTP_TCR_TSIPENA 0x00000800
+/* Enable Processing of PTP Frames Sent over IPv6-UDP */
+#define PTP_TCR_TSIPV6ENA 0x00001000
+/* Enable Processing of PTP Frames Sent over IPv4-UDP */
+#define PTP_TCR_TSIPV4ENA 0x00002000
+/* Enable Timestamp Snapshot for Event Messages */
+#define PTP_TCR_TSEVNTENA 0x00004000
+/* Enable Snapshot for Messages Relevant to Master */
+#define PTP_TCR_TSMSTRENA 0x00008000
+/* Select PTP packets for Taking Snapshots */
+#define PTP_TCR_SNAPTYPSEL_1 0x00010000
+/* Enable MAC address for PTP Frame Filtering */
+#define PTP_TCR_TSENMACADDR 0x00040000
+
+#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index e4c1c88e4c2a..95cff98d8a34 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6618,7 +6618,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
(len << TXHDR_LEN_SHIFT) |
((l3off / 2) << TXHDR_L3START_SHIFT) |
(ihl << TXHDR_IHL_SHIFT) |
- ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
+ ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
(ipv6 ? TXHDR_IP_VER : 0) |
csum_bits);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 5fafca065305..054975939a18 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1169,10 +1169,8 @@ static int bigmac_ether_init(struct platform_device *op,
bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
PAGE_SIZE,
&bp->bblock_dvma, GFP_ATOMIC);
- if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
- printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
+ if (bp->bmac_block == NULL || bp->bblock_dvma == 0)
goto fail_and_cleanup;
- }
/* Get the board revision of this BigMAC. */
bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index a1bff49a8155..436fa9d5a071 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2752,10 +2752,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
&hp->hblock_dvma,
GFP_ATOMIC);
err = -ENOMEM;
- if (!hp->happy_block) {
- printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
+ if (!hp->happy_block)
goto err_out_iounmap;
- }
/* Force check of the link first time we are brought up. */
hp->linkcheck = 0;
@@ -3068,14 +3066,11 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hp->happy_bursts = DMA_BURSTBITS;
#endif
- hp->happy_block = (struct hmeal_init_block *)
- dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL);
-
+ hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &hp->hblock_dvma, GFP_KERNEL);
err = -ENODEV;
- if (!hp->happy_block) {
- printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
+ if (!hp->happy_block)
goto err_out_iounmap;
- }
hp->linkcheck = 0;
hp->timer_state = asleep;
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 49bf3e2eb652..8182591bc187 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -414,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma;
- int elem = qep->rx_new, drops = 0;
+ int elem = qep->rx_new;
u32 flags;
this = &rxbase[elem];
@@ -436,7 +436,6 @@ static void qe_rx(struct sunqe *qep)
} else {
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- drops++;
dev->stats.rx_dropped++;
} else {
skb_reserve(skb, 2);
@@ -456,8 +455,6 @@ static void qe_rx(struct sunqe *qep)
this = &rxbase[elem];
}
qep->rx_new = elem;
- if (drops)
- printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
}
static void qe_tx_reclaim(struct sunqe *qep);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e15cc71b826d..571452e786d5 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -733,7 +733,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
* @ndev: network device
* @vid: VLAN vid to add
*/
-static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
__bdx_vlan_rx_vid(ndev, vid, 1);
return 0;
@@ -744,7 +744,7 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
* @ndev: network device
* @vid: VLAN vid to kill
*/
-static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
__bdx_vlan_rx_vid(ndev, vid, 0);
return 0;
@@ -1102,10 +1102,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
dno = bdx_rxdb_available(db) - 1;
while (dno > 0) {
skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
- if (!skb) {
- pr_err("NO MEM: netdev_alloc_skb failed\n");
+ if (!skb)
break;
- }
+
skb_reserve(skb, NET_IP_ALIGN);
idx = bdx_rxdb_alloc_elem(db);
@@ -1149,7 +1148,7 @@ NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
priv->ndev->name,
GET_RXD_VLAN_ID(rxd_vlan),
GET_RXD_VTAG(rxd_val1));
- __vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan));
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
}
netif_receive_skb(skb);
}
@@ -2018,12 +2017,12 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* so we can have them same for all ports of the board */
ndev->if_port = port;
ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
- | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
+ | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
/*| NETIF_F_FRAGLIST */
;
ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
+ NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
if (pci_using_dac)
ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index df32a090d08e..59c43918883e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -126,6 +126,13 @@ do { \
#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
+#define CPSW_INTPACEEN (0x3f << 16)
+#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
+#define CPSW_CMINTMAX_CNT 63
+#define CPSW_CMINTMIN_CNT 2
+#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
+#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
+
#define cpsw_enable_irq(priv) \
do { \
u32 i; \
@@ -139,6 +146,10 @@ do { \
disable_irq_nosync(priv->irqs_table[i]); \
} while (0);
+#define cpsw_slave_index(priv) \
+ ((priv->data.dual_emac) ? priv->emac_port : \
+ priv->data.active_slave)
+
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
@@ -160,6 +171,15 @@ struct cpsw_wr_regs {
u32 rx_en;
u32 tx_en;
u32 misc_en;
+ u32 mem_allign1[8];
+ u32 rx_thresh_stat;
+ u32 rx_stat;
+ u32 tx_stat;
+ u32 misc_stat;
+ u32 mem_allign2[8];
+ u32 rx_imax;
+ u32 tx_imax;
+
};
struct cpsw_ss_regs {
@@ -314,6 +334,8 @@ struct cpsw_priv {
struct cpsw_host_regs __iomem *host_port_regs;
u32 msg_enable;
u32 version;
+ u32 coal_intvl;
+ u32 bus_freq_mhz;
struct net_device_stats stats;
int rx_packet_max;
int host_port;
@@ -326,6 +348,7 @@ struct cpsw_priv {
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
+ bool irq_enabled;
struct cpts *cpts;
u32 emac_port;
};
@@ -333,12 +356,15 @@ struct cpsw_priv {
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
#define for_each_slave(priv, func, arg...) \
do { \
- int idx; \
+ struct cpsw_slave *slave; \
+ int n; \
if (priv->data.dual_emac) \
(func)((priv)->slaves + priv->emac_port, ##arg);\
else \
- for (idx = 0; idx < (priv)->data.slaves; idx++) \
- (func)((priv)->slaves + idx, ##arg); \
+ for (n = (priv)->data.slaves, \
+ slave = (priv)->slaves; \
+ n; n--) \
+ (func)(slave++, ##arg); \
} while (0)
#define cpsw_get_slave_ndev(priv, __slave_no__) \
(priv->slaves[__slave_no__].ndev)
@@ -436,7 +462,7 @@ void cpsw_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
- netif_start_queue(ndev);
+ netif_wake_queue(ndev);
cpts_tx_timestamp(priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;
@@ -446,62 +472,69 @@ void cpsw_tx_handler(void *token, int len, int status)
void cpsw_rx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
+ struct sk_buff *new_skb;
struct net_device *ndev = skb->dev;
struct cpsw_priv *priv = netdev_priv(ndev);
int ret = 0;
cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
- /* free and bail if we are shutting down */
- if (unlikely(!netif_running(ndev)) ||
- unlikely(!netif_carrier_ok(ndev))) {
+ if (unlikely(status < 0)) {
+ /* the interface is going down, skbs are purged */
dev_kfree_skb_any(skb);
return;
}
- if (likely(status >= 0)) {
+
+ new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
+ if (new_skb) {
skb_put(skb, len);
cpts_rx_timestamp(priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
priv->stats.rx_bytes += len;
priv->stats.rx_packets++;
- skb = NULL;
- }
-
- if (unlikely(!netif_running(ndev))) {
- if (skb)
- dev_kfree_skb_any(skb);
- return;
+ } else {
+ priv->stats.rx_dropped++;
+ new_skb = skb;
}
- if (likely(!skb)) {
- skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
- if (WARN_ON(!skb))
- return;
-
- ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), 0, GFP_KERNEL);
- }
- WARN_ON(ret < 0);
+ ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
+ skb_tailroom(new_skb), 0);
+ if (WARN_ON(ret < 0))
+ dev_kfree_skb_any(new_skb);
}
static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
+ u32 rx, tx, rx_thresh;
- if (likely(netif_running(priv->ndev))) {
- cpsw_intr_disable(priv);
+ rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
+ rx = __raw_readl(&priv->wr_regs->rx_stat);
+ tx = __raw_readl(&priv->wr_regs->tx_stat);
+ if (!rx_thresh && !rx && !tx)
+ return IRQ_NONE;
+
+ cpsw_intr_disable(priv);
+ if (priv->irq_enabled == true) {
cpsw_disable_irq(priv);
+ priv->irq_enabled = false;
+ }
+
+ if (netif_running(priv->ndev)) {
napi_schedule(&priv->napi);
- } else {
- priv = cpsw_get_slave_priv(priv, 1);
- if (likely(priv) && likely(netif_running(priv->ndev))) {
- cpsw_intr_disable(priv);
- cpsw_disable_irq(priv);
- napi_schedule(&priv->napi);
- }
+ return IRQ_HANDLED;
+ }
+
+ priv = cpsw_get_slave_priv(priv, 1);
+ if (!priv)
+ return IRQ_NONE;
+
+ if (netif_running(priv->ndev)) {
+ napi_schedule(&priv->napi);
+ return IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
static int cpsw_poll(struct napi_struct *napi, int budget)
@@ -515,10 +548,16 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
+ struct cpsw_priv *prim_cpsw;
+
napi_complete(napi);
cpsw_intr_enable(priv);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpsw_enable_irq(priv);
+ prim_cpsw = cpsw_get_slave_priv(priv, 0);
+ if (prim_cpsw->irq_enabled == false) {
+ cpsw_enable_irq(priv);
+ prim_cpsw->irq_enabled = true;
+ }
}
if (num_rx || num_tx)
@@ -612,6 +651,77 @@ static void cpsw_adjust_link(struct net_device *ndev)
}
}
+static int cpsw_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ coal->rx_coalesce_usecs = priv->coal_intvl;
+ return 0;
+}
+
+static int cpsw_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ u32 int_ctrl;
+ u32 num_interrupts = 0;
+ u32 prescale = 0;
+ u32 addnl_dvdr = 1;
+ u32 coal_intvl = 0;
+
+ if (!coal->rx_coalesce_usecs)
+ return -EINVAL;
+
+ coal_intvl = coal->rx_coalesce_usecs;
+
+ int_ctrl = readl(&priv->wr_regs->int_control);
+ prescale = priv->bus_freq_mhz * 4;
+
+ if (coal_intvl < CPSW_CMINTMIN_INTVL)
+ coal_intvl = CPSW_CMINTMIN_INTVL;
+
+ if (coal_intvl > CPSW_CMINTMAX_INTVL) {
+ /* Interrupt pacer works with 4us Pulse, we can
+ * throttle further by dilating the 4us pulse.
+ */
+ addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
+
+ if (addnl_dvdr > 1) {
+ prescale *= addnl_dvdr;
+ if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
+ coal_intvl = (CPSW_CMINTMAX_INTVL
+ * addnl_dvdr);
+ } else {
+ addnl_dvdr = 1;
+ coal_intvl = CPSW_CMINTMAX_INTVL;
+ }
+ }
+
+ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+ writel(num_interrupts, &priv->wr_regs->rx_imax);
+ writel(num_interrupts, &priv->wr_regs->tx_imax);
+
+ int_ctrl |= CPSW_INTPACEEN;
+ int_ctrl &= (~CPSW_INTPRESCALE_MASK);
+ int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
+ writel(int_ctrl, &priv->wr_regs->int_control);
+
+ cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
+ if (priv->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ priv = netdev_priv(priv->slaves[i].ndev);
+ priv->coal_intvl = coal_intvl;
+ }
+ } else {
+ priv->coal_intvl = coal_intvl;
+ }
+
+ return 0;
+}
+
static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
{
static char *leader = "........................................";
@@ -643,14 +753,14 @@ static inline int cpsw_tx_packet_submit(struct net_device *ndev,
{
if (!priv->data.dual_emac)
return cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, 0, GFP_KERNEL);
+ skb->len, 0);
if (ndev == cpsw_get_slave_ndev(priv, 0))
return cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, 1, GFP_KERNEL);
+ skb->len, 1);
else
return cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, 2, GFP_KERNEL);
+ skb->len, 2);
}
static inline void cpsw_add_dual_emac_def_ale_entries(
@@ -774,9 +884,19 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
}
}
+static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ if (!slave->phy)
+ return;
+ phy_stop(slave->phy);
+ phy_disconnect(slave->phy);
+ slave->phy = NULL;
+}
+
static int cpsw_ndo_open(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_priv *prim_cpsw;
int i, ret;
u32 reg;
@@ -819,14 +939,16 @@ static int cpsw_ndo_open(struct net_device *ndev)
struct sk_buff *skb;
ret = -ENOMEM;
- skb = netdev_alloc_skb_ip_align(priv->ndev,
- priv->rx_packet_max);
+ skb = __netdev_alloc_skb_ip_align(priv->ndev,
+ priv->rx_packet_max, GFP_KERNEL);
if (!skb)
- break;
+ goto err_cleanup;
ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), 0, GFP_KERNEL);
- if (WARN_ON(ret < 0))
- break;
+ skb_tailroom(skb), 0);
+ if (ret < 0) {
+ kfree_skb(skb);
+ goto err_cleanup;
+ }
}
/* continue even if we didn't manage to submit all
* receive descs
@@ -834,6 +956,22 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
}
+ /* Enable Interrupt pacing if configured */
+ if (priv->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ cpsw_set_coalesce(ndev, &coal);
+ }
+
+ prim_cpsw = cpsw_get_slave_priv(priv, 0);
+ if (prim_cpsw->irq_enabled == false) {
+ if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
+ prim_cpsw->irq_enabled = true;
+ cpsw_enable_irq(prim_cpsw);
+ }
+ }
+
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
napi_enable(&priv->napi);
@@ -843,15 +981,13 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (priv->data.dual_emac)
priv->slaves[priv->emac_port].open_stat = true;
return 0;
-}
-static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
-{
- if (!slave->phy)
- return;
- phy_stop(slave->phy);
- phy_disconnect(slave->phy);
- slave->phy = NULL;
+err_cleanup:
+ cpdma_ctlr_stop(priv->dma);
+ for_each_slave(priv, cpsw_slave_stop, priv);
+ pm_runtime_put_sync(&priv->pdev->dev);
+ netif_carrier_off(priv->ndev);
+ return ret;
}
static int cpsw_ndo_stop(struct net_device *ndev)
@@ -942,7 +1078,7 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
{
- struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
u32 ts_en, seq_id;
if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
@@ -971,7 +1107,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->data.dual_emac)
slave = &priv->slaves[priv->emac_port];
else
- slave = &priv->slaves[priv->data.cpts_active_slave];
+ slave = &priv->slaves[priv->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL);
ctrl &= ~CTRL_ALL_TS_MASK;
@@ -1056,14 +1192,26 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(req);
+ int slave_no = cpsw_slave_index(priv);
+
if (!netif_running(dev))
return -EINVAL;
+ switch (cmd) {
#ifdef CONFIG_TI_CPTS
- if (cmd == SIOCSHWTSTAMP)
+ case SIOCSHWTSTAMP:
return cpsw_hwtstamp_ioctl(dev, req);
#endif
- return -ENOTSUPP;
+ case SIOCGMIIPHY:
+ data->phy_id = priv->slaves[slave_no].phy->addr;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
}
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1138,7 +1286,7 @@ clean_vid:
}
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
- unsigned short vid)
+ __be16 proto, u16 vid)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1150,7 +1298,7 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
}
static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
- unsigned short vid)
+ __be16 proto, u16 vid)
{
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
@@ -1244,12 +1392,39 @@ static int cpsw_get_ts_info(struct net_device *ndev,
return 0;
}
+static int cpsw_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ts_info = cpsw_get_ts_info,
+ .get_settings = cpsw_get_settings,
+ .set_settings = cpsw_set_settings,
+ .get_coalesce = cpsw_get_coalesce,
+ .set_coalesce = cpsw_set_coalesce,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1282,12 +1457,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->slaves = prop;
- if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
- pr_err("Missing cpts_active_slave property in the DT.\n");
+ if (of_property_read_u32(node, "active_slave", &prop)) {
+ pr_err("Missing active_slave property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->cpts_active_slave = prop;
+ data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
pr_err("Missing cpts_clock_mult property in the DT.\n");
@@ -1380,7 +1555,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
if (data->dual_emac) {
- if (of_property_read_u32(node, "dual_emac_res_vlan",
+ if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
&prop)) {
pr_err("Missing dual_emac_res_vlan in DT.\n");
slave_data->dual_emac_res_vlan = i+1;
@@ -1437,6 +1612,9 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->slaves = priv->slaves;
priv_sl2->clk = priv->clk;
+ priv_sl2->coal_intvl = 0;
+ priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
+
priv_sl2->cpsw_res = priv->cpsw_res;
priv_sl2->regs = priv->regs;
priv_sl2->host_port = priv->host_port;
@@ -1455,8 +1633,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->irqs_table[i] = priv->irqs_table[i];
priv_sl2->num_irqs = priv->num_irqs;
}
-
- ndev->features |= NETIF_F_HW_VLAN_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@ -1476,7 +1653,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
static int cpsw_probe(struct platform_device *pdev)
{
- struct cpsw_platform_data *data = pdev->dev.platform_data;
+ struct cpsw_platform_data *data;
struct net_device *ndev;
struct cpsw_priv *priv;
struct cpdma_params dma_params;
@@ -1501,6 +1678,7 @@ static int cpsw_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
+ priv->irq_enabled = true;
if (!ndev) {
pr_err("error allocating cpts\n");
goto clean_ndev_ret;
@@ -1546,6 +1724,8 @@ static int cpsw_probe(struct platform_device *pdev)
ret = -ENODEV;
goto clean_slave_ret;
}
+ priv->coal_intvl = 0;
+ priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!priv->cpsw_res) {
@@ -1687,12 +1867,12 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
priv->irqs_table[k] = i;
- priv->num_irqs = k;
+ priv->num_irqs = k + 1;
}
k++;
}
- ndev->features |= NETIF_F_HW_VLAN_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@ -1725,7 +1905,8 @@ static int cpsw_probe(struct platform_device *pdev)
return 0;
clean_irq_ret:
- free_irq(ndev->irq, priv);
+ for (i = 0; i < priv->num_irqs; i++)
+ free_irq(priv->irqs_table[i], priv);
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
@@ -1748,7 +1929,8 @@ clean_slave_ret:
pm_runtime_disable(&pdev->dev);
kfree(priv->slaves);
clean_ndev_ret:
- free_netdev(ndev);
+ kfree(priv->data.slave_data);
+ free_netdev(priv->ndev);
return ret;
}
@@ -1756,12 +1938,17 @@ static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
+ int i;
- pr_info("removing device");
platform_set_drvdata(pdev, NULL);
+ if (priv->data.dual_emac)
+ unregister_netdev(cpsw_get_slave_ndev(priv, 1));
+ unregister_netdev(ndev);
cpts_unregister(priv->cpts);
- free_irq(ndev->irq, priv);
+ for (i = 0; i < priv->num_irqs; i++)
+ free_irq(priv->irqs_table[i], priv);
+
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
@@ -1775,8 +1962,10 @@ static int cpsw_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
clk_put(priv->clk);
kfree(priv->slaves);
+ kfree(priv->data.slave_data);
+ if (priv->data.dual_emac)
+ free_netdev(cpsw_get_slave_ndev(priv, 1));
free_netdev(ndev);
-
return 0;
}
@@ -1812,6 +2001,7 @@ static const struct of_device_id cpsw_of_mtable[] = {
{ .compatible = "ti,cpsw", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
static struct platform_driver cpsw_driver = {
.driver = {
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 463597f919f1..8c351f100aca 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -94,7 +94,7 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
case CPTS_EV_HW:
break;
default:
- pr_err("cpts: unkown event type\n");
+ pr_err("cpts: unknown event type\n");
break;
}
if (type == match)
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index ee13dc78430c..49dfd592ac1e 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include "davinci_cpdma.h"
@@ -312,14 +313,16 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
}
if (ctlr->params.has_soft_reset) {
- unsigned long timeout = jiffies + HZ/10;
+ unsigned timeout = 10 * 100;
dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
- while (time_before(jiffies, timeout)) {
+ while (timeout) {
if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
break;
+ udelay(10);
+ timeout--;
}
- WARN_ON(!time_before(jiffies, timeout));
+ WARN_ON(!timeout);
}
for (i = 0; i < ctlr->num_chan; i++) {
@@ -673,7 +676,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
}
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, int directed, gfp_t gfp_mask)
+ int len, int directed)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc __iomem *desc;
@@ -773,6 +776,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc __iomem *desc;
int status, outlen;
+ int cb_status = 0;
struct cpdma_desc_pool *pool = ctlr->pool;
dma_addr_t desc_dma;
unsigned long flags;
@@ -808,8 +812,12 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
}
spin_unlock_irqrestore(&chan->lock, flags);
+ if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
+ cb_status = -ENOSYS;
+ else
+ cb_status = status;
- __cpdma_chan_free(chan, desc, outlen, status);
+ __cpdma_chan_free(chan, desc, outlen, cb_status);
return status;
unlock_ret:
@@ -868,7 +876,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
struct cpdma_desc_pool *pool = ctlr->pool;
unsigned long flags;
int ret;
- unsigned long timeout;
+ unsigned timeout;
spin_lock_irqsave(&chan->lock, flags);
if (chan->state != CPDMA_STATE_ACTIVE) {
@@ -883,14 +891,15 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
dma_reg_write(ctlr, chan->td, chan_linear(chan));
/* wait for teardown complete */
- timeout = jiffies + HZ/10; /* 100 msec */
- while (time_before(jiffies, timeout)) {
+ timeout = 100 * 100; /* 100 ms */
+ while (timeout) {
u32 cp = chan_read(chan, cp);
if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
break;
- cpu_relax();
+ udelay(10);
+ timeout--;
}
- WARN_ON(!time_before(jiffies, timeout));
+ WARN_ON(!timeout);
chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
/* handle completed packets */
@@ -1031,3 +1040,5 @@ unlock_ret:
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_control_set);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index d9bcc6032fdc..86dee487f2f0 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -89,7 +89,7 @@ int cpdma_chan_dump(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, int directed, gfp_t gfp_mask);
+ int len, int directed);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ae1b77aa199f..860e15ddfbcb 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1037,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status)
recycle:
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), 0, GFP_KERNEL);
+ skb_tailroom(skb), 0);
WARN_ON(ret == -ENOMEM);
if (unlikely(ret < 0))
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
- netif_start_queue(ndev);
+ netif_wake_queue(ndev);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
@@ -1092,7 +1092,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
- 0, GFP_KERNEL);
+ 0);
if (unlikely(ret_code != 0)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
@@ -1438,7 +1438,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
* Polled functionality used by netconsole and others in non interrupt mode
*
*/
-void emac_poll_controller(struct net_device *ndev)
+static void emac_poll_controller(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
@@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev)
break;
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), 0, GFP_KERNEL);
+ skb_tailroom(skb), 0);
if (WARN_ON(ret < 0))
break;
}
@@ -1865,21 +1865,18 @@ static int davinci_emac_probe(struct platform_device *pdev)
/* obtain emac clock from kernel */
- emac_clk = clk_get(&pdev->dev, NULL);
+ emac_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(emac_clk)) {
dev_err(&pdev->dev, "failed to get EMAC clock\n");
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
- clk_put(emac_clk);
/* TODO: Probe PHY here if possible */
ndev = alloc_etherdev(sizeof(struct emac_priv));
- if (!ndev) {
- rc = -ENOMEM;
- goto no_ndev;
- }
+ if (!ndev)
+ return -ENOMEM;
platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
@@ -1893,7 +1890,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
rc = -ENODEV;
- goto probe_quit;
+ goto no_pdata;
}
/* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1913,23 +1910,23 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (!res) {
dev_err(&pdev->dev,"error getting res\n");
rc = -ENOENT;
- goto probe_quit;
+ goto no_pdata;
}
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
size = resource_size(res);
- if (!request_mem_region(res->start, size, ndev->name)) {
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ size, ndev->name)) {
dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
rc = -ENXIO;
- goto probe_quit;
+ goto no_pdata;
}
- priv->remap_addr = ioremap(res->start, size);
+ priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size);
if (!priv->remap_addr) {
dev_err(&pdev->dev, "unable to map IO\n");
rc = -ENOMEM;
- release_mem_region(res->start, size);
- goto probe_quit;
+ goto no_pdata;
}
priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
ndev->base_addr = (unsigned long)priv->remap_addr;
@@ -1962,7 +1959,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (!priv->dma) {
dev_err(&pdev->dev, "error initializing DMA\n");
rc = -ENOMEM;
- goto no_dma;
+ goto no_pdata;
}
priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
@@ -1971,14 +1968,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
emac_rx_handler);
if (WARN_ON(!priv->txchan || !priv->rxchan)) {
rc = -ENOMEM;
- goto no_irq_res;
+ goto no_cpdma_chan;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev, "error getting irq res\n");
rc = -ENOENT;
- goto no_irq_res;
+ goto no_cpdma_chan;
}
ndev->irq = res->start;
@@ -2000,7 +1997,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (rc) {
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
- goto no_irq_res;
+ goto no_cpdma_chan;
}
@@ -2015,20 +2012,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
return 0;
-no_irq_res:
+no_cpdma_chan:
if (priv->txchan)
cpdma_chan_destroy(priv->txchan);
if (priv->rxchan)
cpdma_chan_destroy(priv->rxchan);
cpdma_ctlr_destroy(priv->dma);
-no_dma:
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- iounmap(priv->remap_addr);
-
-probe_quit:
+no_pdata:
free_netdev(ndev);
-no_ndev:
return rc;
}
@@ -2041,14 +2032,12 @@ no_ndev:
*/
static int davinci_emac_remove(struct platform_device *pdev)
{
- struct resource *res;
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_priv *priv = netdev_priv(ndev);
dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
platform_set_drvdata(pdev, NULL);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (priv->txchan)
cpdma_chan_destroy(priv->txchan);
@@ -2056,10 +2045,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_chan_destroy(priv->rxchan);
cpdma_ctlr_destroy(priv->dma);
- release_mem_region(res->start, resource_size(res));
-
unregister_netdev(ndev);
- iounmap(priv->remap_addr);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index d04a622b08d4..12aec173564c 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -485,6 +485,7 @@ static const struct of_device_id davinci_mdio_of_mtable[] = {
{ .compatible = "ti,davinci_mdio", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
static struct platform_driver davinci_mdio_driver = {
.driver = {
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 22725386c5de..60c400f6d01f 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -320,6 +320,7 @@ static void tlan_remove_one(struct pci_dev *pdev)
free_netdev(dev);
pci_set_drvdata(pdev, NULL);
+ cancel_work_sync(&priv->tlan_tqueue);
}
static void tlan_start(struct net_device *dev)
@@ -1911,10 +1912,8 @@ static void tlan_reset_lists(struct net_device *dev)
list->frame_size = TLAN_MAX_FRAME_SIZE;
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
- if (!skb) {
- netdev_err(dev, "Out of memory for received data\n");
+ if (!skb)
break;
- }
list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data,
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 445c0595c997..ad32af67e618 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -58,13 +58,6 @@ MODULE_DESCRIPTION("Gelic Network driver");
MODULE_LICENSE("GPL");
-static inline void gelic_card_enable_rxdmac(struct gelic_card *card);
-static inline void gelic_card_disable_rxdmac(struct gelic_card *card);
-static inline void gelic_card_disable_txdmac(struct gelic_card *card);
-static inline void gelic_card_reset_chain(struct gelic_card *card,
- struct gelic_descr_chain *chain,
- struct gelic_descr *start_descr);
-
/* set irq_mask */
int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
{
@@ -78,12 +71,12 @@ int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
return status;
}
-static inline void gelic_card_rx_irq_on(struct gelic_card *card)
+static void gelic_card_rx_irq_on(struct gelic_card *card)
{
card->irq_mask |= GELIC_CARD_RXINT;
gelic_card_set_irq_mask(card, card->irq_mask);
}
-static inline void gelic_card_rx_irq_off(struct gelic_card *card)
+static void gelic_card_rx_irq_off(struct gelic_card *card)
{
card->irq_mask &= ~GELIC_CARD_RXINT;
gelic_card_set_irq_mask(card, card->irq_mask);
@@ -127,6 +120,120 @@ static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
return 0;
}
+/**
+ * gelic_card_disable_txdmac - disables the transmit DMA controller
+ * @card: card structure
+ *
+ * gelic_card_disable_txdmac terminates processing on the DMA controller by
+ * turing off DMA and issuing a force end
+ */
+static void gelic_card_disable_txdmac(struct gelic_card *card)
+{
+ int status;
+
+ /* this hvc blocks until the DMA in progress really stopped */
+ status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_stop_tx_dma failed, status=%d\n", status);
+}
+
+/**
+ * gelic_card_enable_rxdmac - enables the receive DMA controller
+ * @card: card structure
+ *
+ * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
+ * in the GDADMACCNTR register
+ */
+static void gelic_card_enable_rxdmac(struct gelic_card *card)
+{
+ int status;
+
+#ifdef DEBUG
+ if (gelic_descr_get_status(card->rx_chain.head) !=
+ GELIC_DESCR_DMA_CARDOWNED) {
+ printk(KERN_ERR "%s: status=%x\n", __func__,
+ be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
+ printk(KERN_ERR "%s: nextphy=%x\n", __func__,
+ be32_to_cpu(card->rx_chain.head->next_descr_addr));
+ printk(KERN_ERR "%s: head=%p\n", __func__,
+ card->rx_chain.head);
+ }
+#endif
+ status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
+ card->rx_chain.head->bus_addr, 0);
+ if (status)
+ dev_info(ctodev(card),
+ "lv1_net_start_rx_dma failed, status=%d\n", status);
+}
+
+/**
+ * gelic_card_disable_rxdmac - disables the receive DMA controller
+ * @card: card structure
+ *
+ * gelic_card_disable_rxdmac terminates processing on the DMA controller by
+ * turing off DMA and issuing a force end
+ */
+static void gelic_card_disable_rxdmac(struct gelic_card *card)
+{
+ int status;
+
+ /* this hvc blocks until the DMA in progress really stopped */
+ status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_stop_rx_dma failed, %d\n", status);
+}
+
+/**
+ * gelic_descr_set_status -- sets the status of a descriptor
+ * @descr: descriptor to change
+ * @status: status to set in the descriptor
+ *
+ * changes the status to the specified value. Doesn't change other bits
+ * in the status
+ */
+static void gelic_descr_set_status(struct gelic_descr *descr,
+ enum gelic_descr_dma_status status)
+{
+ descr->dmac_cmd_status = cpu_to_be32(status |
+ (be32_to_cpu(descr->dmac_cmd_status) &
+ ~GELIC_DESCR_DMA_STAT_MASK));
+ /*
+ * dma_cmd_status field is used to indicate whether the descriptor
+ * is valid or not.
+ * Usually caller of this function wants to inform that to the
+ * hardware, so we assure here the hardware sees the change.
+ */
+ wmb();
+}
+
+/**
+ * gelic_card_reset_chain - reset status of a descriptor chain
+ * @card: card structure
+ * @chain: address of chain
+ * @start_descr: address of descriptor array
+ *
+ * Reset the status of dma descriptors to ready state
+ * and re-initialize the hardware chain for later use
+ */
+static void gelic_card_reset_chain(struct gelic_card *card,
+ struct gelic_descr_chain *chain,
+ struct gelic_descr *start_descr)
+{
+ struct gelic_descr *descr;
+
+ for (descr = start_descr; start_descr != descr->next; descr++) {
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
+ descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
+ }
+
+ chain->head = start_descr;
+ chain->tail = (descr - 1);
+
+ (descr - 1)->next_descr_addr = 0;
+}
+
void gelic_card_up(struct gelic_card *card)
{
pr_debug("%s: called\n", __func__);
@@ -183,29 +290,6 @@ gelic_descr_get_status(struct gelic_descr *descr)
}
/**
- * gelic_descr_set_status -- sets the status of a descriptor
- * @descr: descriptor to change
- * @status: status to set in the descriptor
- *
- * changes the status to the specified value. Doesn't change other bits
- * in the status
- */
-static void gelic_descr_set_status(struct gelic_descr *descr,
- enum gelic_descr_dma_status status)
-{
- descr->dmac_cmd_status = cpu_to_be32(status |
- (be32_to_cpu(descr->dmac_cmd_status) &
- ~GELIC_DESCR_DMA_STAT_MASK));
- /*
- * dma_cmd_status field is used to indicate whether the descriptor
- * is valid or not.
- * Usually caller of this function wants to inform that to the
- * hardware, so we assure here the hardware sees the change.
- */
- wmb();
-}
-
-/**
* gelic_card_free_chain - free descriptor chain
* @card: card structure
* @descr_in: address of desc
@@ -286,31 +370,6 @@ iommu_error:
}
/**
- * gelic_card_reset_chain - reset status of a descriptor chain
- * @card: card structure
- * @chain: address of chain
- * @start_descr: address of descriptor array
- *
- * Reset the status of dma descriptors to ready state
- * and re-initialize the hardware chain for later use
- */
-static void gelic_card_reset_chain(struct gelic_card *card,
- struct gelic_descr_chain *chain,
- struct gelic_descr *start_descr)
-{
- struct gelic_descr *descr;
-
- for (descr = start_descr; start_descr != descr->next; descr++) {
- gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
- descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
- }
-
- chain->head = start_descr;
- chain->tail = (descr - 1);
-
- (descr - 1)->next_descr_addr = 0;
-}
-/**
* gelic_descr_prepare_rx - reinitializes a rx descriptor
* @card: card structure
* @descr: descriptor to re-init
@@ -599,71 +658,6 @@ void gelic_net_set_multi(struct net_device *netdev)
}
/**
- * gelic_card_enable_rxdmac - enables the receive DMA controller
- * @card: card structure
- *
- * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
- * in the GDADMACCNTR register
- */
-static inline void gelic_card_enable_rxdmac(struct gelic_card *card)
-{
- int status;
-
-#ifdef DEBUG
- if (gelic_descr_get_status(card->rx_chain.head) !=
- GELIC_DESCR_DMA_CARDOWNED) {
- printk(KERN_ERR "%s: status=%x\n", __func__,
- be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
- printk(KERN_ERR "%s: nextphy=%x\n", __func__,
- be32_to_cpu(card->rx_chain.head->next_descr_addr));
- printk(KERN_ERR "%s: head=%p\n", __func__,
- card->rx_chain.head);
- }
-#endif
- status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
- card->rx_chain.head->bus_addr, 0);
- if (status)
- dev_info(ctodev(card),
- "lv1_net_start_rx_dma failed, status=%d\n", status);
-}
-
-/**
- * gelic_card_disable_rxdmac - disables the receive DMA controller
- * @card: card structure
- *
- * gelic_card_disable_rxdmac terminates processing on the DMA controller by
- * turing off DMA and issuing a force end
- */
-static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
-{
- int status;
-
- /* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
- if (status)
- dev_err(ctodev(card),
- "lv1_net_stop_rx_dma failed, %d\n", status);
-}
-
-/**
- * gelic_card_disable_txdmac - disables the transmit DMA controller
- * @card: card structure
- *
- * gelic_card_disable_txdmac terminates processing on the DMA controller by
- * turing off DMA and issuing a force end
- */
-static inline void gelic_card_disable_txdmac(struct gelic_card *card)
-{
- int status;
-
- /* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
- if (status)
- dev_err(ctodev(card),
- "lv1_net_stop_tx_dma failed, status=%d\n", status);
-}
-
-/**
* gelic_net_stop - called upon ifconfig down
* @netdev: interface device structure
*
@@ -746,7 +740,7 @@ static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
}
}
-static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
+static struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
unsigned short tag)
{
struct vlan_ethhdr *veth;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f1b91fd7e41c..c655fe60121e 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -352,8 +352,7 @@ spider_net_init_chain(struct spider_net_card *card,
alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
- &chain->dma_addr, GFP_KERNEL);
-
+ &chain->dma_addr, GFP_KERNEL);
if (!chain->hwring)
return -ENOMEM;
@@ -2330,8 +2329,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
if (SPIDER_NET_RX_CSUM_DEFAULT)
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
- /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- * NETIF_F_HW_VLAN_FILTER */
+ /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ * NETIF_F_HW_VLAN_CTAG_FILTER */
netdev->irq = card->pdev->irq;
card->num_rx_ints = 0;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 8fa947a2d929..3c69a0460832 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,27 +1308,16 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name);
}
- data->rxring = dma_alloc_coherent(NULL, rxring_size,
- &data->rxdma, GFP_KERNEL);
-
- if (!data->rxring) {
- printk(KERN_DEBUG
- "TSI108_ETH: failed to allocate memory for rxring!\n");
+ data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!data->rxring)
return -ENOMEM;
- } else {
- memset(data->rxring, 0, rxring_size);
- }
-
- data->txring = dma_alloc_coherent(NULL, txring_size,
- &data->txdma, GFP_KERNEL);
+ data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma,
+ GFP_KERNEL | __GFP_ZERO);
if (!data->txring) {
- printk(KERN_DEBUG
- "TSI108_ETH: failed to allocate memory for txring!\n");
pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
return -ENOMEM;
- } else {
- memset(data->txring, 0, txring_size);
}
for (i = 0; i < TSI108_RXRING_LEN; i++) {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 185c721c52d7..ca98acabf1b4 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -508,8 +508,10 @@ static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
-static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_add_vid(struct net_device *dev,
+ __be16 proto, u16 vid);
+static int rhine_vlan_rx_kill_vid(struct net_device *dev,
+ __be16 proto, u16 vid);
static void rhine_restart_tx(struct net_device *dev);
static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
@@ -1026,8 +1028,9 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
if (pdev->revision >= VT6105M)
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
@@ -1414,7 +1417,7 @@ static void rhine_update_vcam(struct net_device *dev)
rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
}
-static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1425,7 +1428,7 @@ static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0;
}
-static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1933,7 +1936,7 @@ static int rhine_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
if (unlikely(desc_length & DescTag))
- __vlan_hwaccel_put_tag(skb, vlan_tci);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
netif_receive_skb(skb);
u64_stats_update_begin(&rp->rx_stats.syncp);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1bc7f9fd2583..fb6248956ee2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -525,7 +525,8 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
}
-static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_add_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -536,7 +537,8 @@ static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
return 0;
}
-static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_kill_vid(struct net_device *dev,
+ __be16 proto, u16 vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -2078,7 +2080,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
if (rd->rdesc0.RSR & RSR_DETAG) {
u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
netif_rx(skb);
@@ -2810,9 +2812,10 @@ static int velocity_found1(struct pci_dev *pdev,
dev->ethtool_ops = &velocity_ethtool_ops;
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
- dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_HW_VLAN_CTAG_TX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM;
ret = register_netdev(dev);
if (ret < 0)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 545043cc4c0b..a518dcab396e 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -754,7 +754,7 @@ static int w5100_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int w5100_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -787,7 +787,7 @@ static int w5100_resume(struct device *dev)
}
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 7cbd0e6fc6f3..6e00e3f94ce4 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -666,7 +666,7 @@ static int w5300_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int w5300_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -699,7 +699,7 @@ static int w5300_resume(struct device *dev)
}
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9fc2ada4c3c2..57c2e5ef2804 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -245,39 +245,30 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
- if (!lp->tx_bd_v) {
- dev_err(&ndev->dev,
- "unable to allocate DMA TX buffer descriptors");
+ &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ if (!lp->tx_bd_v)
goto out;
- }
+
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
- if (!lp->rx_bd_v) {
- dev_err(&ndev->dev,
- "unable to allocate DMA RX buffer descriptors");
+ &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ if (!lp->rx_bd_v)
goto out;
- }
- memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
}
- memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
-
- if (skb == 0) {
- dev_err(&ndev->dev, "alloc_skb error %d\n", i);
+ if (!skb)
goto out;
- }
+
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -789,9 +780,7 @@ static void ll_temac_recv(struct net_device *ndev)
new_skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
-
- if (new_skb == 0) {
- dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ if (!new_skb) {
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
}
@@ -1029,9 +1018,9 @@ static int temac_of_probe(struct platform_device *op)
ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
- ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
- ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
- ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 278c9db3b5b8..24748e8367a1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -204,41 +204,31 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p,
- GFP_KERNEL);
- if (!lp->tx_bd_v) {
- dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
- "descriptors");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!lp->tx_bd_v)
goto out;
- }
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p,
- GFP_KERNEL);
- if (!lp->rx_bd_v) {
- dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
- "descriptors");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!lp->rx_bd_v)
goto out;
- }
- memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) *
((i + 1) % TX_BD_NUM);
}
- memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) *
((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!skb) {
- dev_err(&ndev->dev, "alloc_skb error %d\n", i);
+ if (!skb)
goto out;
- }
lp->rx_bd_v[i].sw_id_offset = (u32) skb;
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -777,10 +767,9 @@ static void axienet_recv(struct net_device *ndev)
packets++;
new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!new_skb) {
- dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ if (!new_skb)
return;
- }
+
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 98e09d0d3ce2..bdd20b888cf6 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1041,7 +1041,6 @@ xirc2ps_interrupt(int irq, void *dev_id)
/* 1 extra so we can use insw */
skb = netdev_alloc_skb(dev, pktlen + 3);
if (!skb) {
- pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
dev->stats.rx_dropped++;
} else { /* okay get the packet */
skb_reserve(skb, 2);
@@ -1775,21 +1774,7 @@ static struct pcmcia_driver xirc2ps_cs_driver = {
.suspend = xirc2ps_suspend,
.resume = xirc2ps_resume,
};
-
-static int __init
-init_xirc2ps_cs(void)
-{
- return pcmcia_register_driver(&xirc2ps_cs_driver);
-}
-
-static void __exit
-exit_xirc2ps_cs(void)
-{
- pcmcia_unregister_driver(&xirc2ps_cs_driver);
-}
-
-module_init(init_xirc2ps_cs);
-module_exit(exit_xirc2ps_cs);
+module_pcmcia_driver(xirc2ps_cs_driver);
#ifndef MODULE
static int __init setup_xirc2ps_cs(char *str)