summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-10-04 22:57:00 +0200
committerArnd Bergmann <arnd@arndb.de>2012-10-04 22:57:51 +0200
commitc37d6154c0b9163c27e53cc1d0be3867b4abd760 (patch)
tree7a24522c56d1cb284dff1d3c225bbdaba0901bb5 /drivers/net
parentasm-generic: Add default clkdev.h (diff)
parentUAPI: (Scripted) Disintegrate include/asm-generic (diff)
downloadlinux-c37d6154c0b9163c27e53cc1d0be3867b4abd760.tar.xz
linux-c37d6154c0b9163c27e53cc1d0be3867b4abd760.zip
Merge branch 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers into asm-generic
Patches from David Howells <dhowells@redhat.com>: This is to complete part of the UAPI disintegration for which the preparatory patches were pulled recently. Note that there are some fixup patches which are at the base of the branch aimed at you, plus all arches get the asm-generic branch merged in too. * 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers: UAPI: (Scripted) Disintegrate include/asm-generic UAPI: Fix conditional header installation handling (notably kvm_para.h on m68k) c6x: remove c6x signal.h UAPI: Split compound conditionals containing __KERNEL__ in Arm64 UAPI: Fix the guards on various asm/unistd.h files Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig17
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bonding/bond_main.c140
-rw-r--r--drivers/net/can/c_can/c_can.c130
-rw-r--r--drivers/net/can/c_can/c_can.h14
-rw-r--r--drivers/net/can/c_can/c_can_pci.c6
-rw-r--r--drivers/net/can/c_can/c_can_platform.c123
-rw-r--r--drivers/net/can/flexcan.c29
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c8
-rw-r--r--drivers/net/can/sja1000/sja1000.c31
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c8
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/Kconfig9
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h109
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1701
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c54
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c35
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c116
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h5
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c586
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c966
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c341
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c740
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h80
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h185
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h97
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c55
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c98
-rw-r--r--drivers/net/ethernet/freescale/Kconfig7
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c549
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.h52
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h11
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c274
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig2
-rw-r--r--drivers/net/ethernet/i825xx/znet.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c44
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c48
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c31
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c29
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h41
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c198
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c725
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c677
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c300
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c577
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c105
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c274
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c122
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c245
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c246
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c173
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c2
-rw-r--r--drivers/net/ethernet/mipsnet.c345
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c31
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c17
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c48
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/sfc/Kconfig7
-rw-r--r--drivers/net/ethernet/sfc/Makefile1
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h22
-rw-r--r--drivers/net/ethernet/sfc/efx.c250
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.c108
-rw-r--r--drivers/net/ethernet/sfc/filter.h7
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c49
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h12
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h29
-rw-r--r--drivers/net/ethernet/sfc/mtd.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h78
-rw-r--r--drivers/net/ethernet/sfc/nic.c6
-rw-r--r--drivers/net/ethernet/sfc/nic.h36
-rw-r--r--drivers/net/ethernet/sfc/ptp.c1484
-rw-r--r--drivers/net/ethernet/sfc/rx.c20
-rw-r--r--drivers/net/ethernet/sfc/selftest.c3
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c8
-rw-r--r--drivers/net/ethernet/sfc/tx.c627
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c39
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c21
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/ti/Kconfig4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c179
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c41
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c3
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h4
-rw-r--r--drivers/net/hyperv/netvsc.c22
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c60
-rw-r--r--drivers/net/ieee802154/Kconfig47
-rw-r--r--drivers/net/ieee802154/Makefile4
-rw-r--r--drivers/net/ieee802154/at86rf230.c958
-rw-r--r--drivers/net/ieee802154/fakehard.c448
-rw-r--r--drivers/net/ieee802154/fakelb.c294
-rw-r--r--drivers/net/ieee802154/mrf24j40.c767
-rw-r--r--drivers/net/irda/irtty-sir.c10
-rw-r--r--drivers/net/irda/pxaficp_ir.c30
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/lxt.c127
-rw-r--r--drivers/net/phy/mdio-gpio.c132
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c171
-rw-r--r--drivers/net/phy/phy.c74
-rw-r--r--drivers/net/ppp/ppp_generic.c58
-rw-r--r--drivers/net/team/Kconfig4
-rw-r--r--drivers/net/team/team.c342
-rw-r--r--drivers/net/team/team_mode_broadcast.c8
-rw-r--r--drivers/net/team/team_mode_roundrobin.c8
-rw-r--r--drivers/net/tun.c47
-rw-r--r--drivers/net/usb/asix_devices.c40
-rw-r--r--drivers/net/usb/catc.c55
-rw-r--r--drivers/net/usb/cx82310_eth.c11
-rw-r--r--drivers/net/usb/gl620a.c10
-rw-r--r--drivers/net/usb/hso.c19
-rw-r--r--drivers/net/usb/kaweth.c134
-rw-r--r--drivers/net/usb/net1080.c51
-rw-r--r--drivers/net/usb/qmi_wwan.c47
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/usb/sierra_net.c25
-rw-r--r--drivers/net/usb/smsc75xx.c240
-rw-r--r--drivers/net/usb/smsc95xx.c560
-rw-r--r--drivers/net/usb/smsc95xx.h12
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vxlan.c1219
-rw-r--r--drivers/net/wimax/i2400m/driver.c3
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/airo.c55
-rw-r--r--drivers/net/wireless/at76c50x-usb.c58
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c45
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c117
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c288
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c197
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h95
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h1231
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c72
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c94
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c819
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c15
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h5
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c16
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c16
-rw-r--r--drivers/net/wireless/b43/Kconfig4
-rw-r--r--drivers/net/wireless/b43/Makefile1
-rw-r--r--drivers/net/wireless/b43/b43.h10
-rw-r--r--drivers/net/wireless/b43/main.c54
-rw-r--r--drivers/net/wireless/b43/phy_common.c17
-rw-r--r--drivers/net/wireless/b43/phy_common.h6
-rw-r--r--drivers/net/wireless/b43/phy_n.c668
-rw-r--r--drivers/net/wireless/b43/phy_n.h1
-rw-r--r--drivers/net/wireless/b43/radio_2057.c141
-rw-r--r--drivers/net/wireless/b43/radio_2057.h430
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c75
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h10
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c27
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h62
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c73
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c65
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c1047
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c353
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c3135
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h296
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c13
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h5
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c10
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c19
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c11
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c12
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c26
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h8
-rw-r--r--drivers/net/wireless/iwlegacy/common.c19
-rw-r--r--drivers/net/wireless/iwlegacy/common.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c56
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c167
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c91
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c119
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c51
-rw-r--r--drivers/net/wireless/libertas/cmd.c16
-rw-r--r--drivers/net/wireless/libertas/cmd.h1
-rw-r--r--drivers/net/wireless/libertas/main.c4
-rw-r--r--drivers/net/wireless/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c59
-rw-r--r--drivers/net/wireless/mwifiex/11n.c64
-rw-r--r--drivers/net/wireless/mwifiex/11n.h20
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c115
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h10
-rw-r--r--drivers/net/wireless/mwifiex/Makefile2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c460
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c7
-rw-r--r--drivers/net/wireless/mwifiex/decl.h9
-rw-r--r--drivers/net/wireless/mwifiex/fw.h93
-rw-r--r--drivers/net/wireless/mwifiex/ie.c88
-rw-r--r--drivers/net/wireless/mwifiex/init.c126
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h14
-rw-r--r--drivers/net/wireless/mwifiex/main.c39
-rw-r--r--drivers/net/wireless/mwifiex/main.h87
-rw-r--r--drivers/net/wireless/mwifiex/scan.c15
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c150
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c77
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c74
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c124
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c44
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c12
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c11
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c62
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c290
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c340
-rw-r--r--drivers/net/wireless/mwifiex/util.c40
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c77
-rw-r--r--drivers/net/wireless/mwl8k.c17
-rw-r--r--drivers/net/wireless/orinoco/wext.c7
-rw-r--r--drivers/net/wireless/p54/eeprom.c108
-rw-r--r--drivers/net/wireless/p54/eeprom.h12
-rw-r--r--drivers/net/wireless/p54/lmac.h4
-rw-r--r--drivers/net/wireless/p54/main.c15
-rw-r--r--drivers/net/wireless/p54/p54pci.c88
-rw-r--r--drivers/net/wireless/p54/p54pci.h1
-rw-r--r--drivers/net/wireless/p54/txrx.c15
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h27
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h18
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h27
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h52
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c397
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h22
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c83
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c62
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h20
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c35
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c44
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h28
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h34
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig8
-rw-r--r--drivers/net/wireless/rtlwifi/base.c3
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c24
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.h1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c17
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h121
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c79
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h7
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c129
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h7
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c21
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h5
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/debug.h16
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c32
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c12
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c372
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c112
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h23
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h13
-rw-r--r--drivers/net/wireless/wl3501_cs.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c9
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/xen-netback/netback.c11
-rw-r--r--drivers/net/xen-netfront.c2
441 files changed, 27555 insertions, 9817 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0c2bd806950e..6a70184c3f23 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -107,8 +107,6 @@ config MII
or internal device. It is safe to say Y or M here even if your
ethernet card lacks MII.
-source "drivers/ieee802154/Kconfig"
-
config IFB
tristate "Intermediate Functional Block support"
depends on NET_CLS_ACT
@@ -151,6 +149,19 @@ config MACVTAP
To compile this driver as a module, choose M here: the module
will be called macvtap.
+config VXLAN
+ tristate "Virtual eXtensible Local Area Network (VXLAN)"
+ depends on EXPERIMENTAL && INET
+ ---help---
+ This allows one to create vxlan virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ For more information see:
+ http://tools.ietf.org/html/draft-mahalingam-dutt-dcops-vxlan-02
+
+ To compile this driver as a module, choose M here: the module
+ will be called vxlan.
+
config NETCONSOLE
tristate "Network console logging support"
---help---
@@ -290,6 +301,8 @@ source "drivers/net/wimax/Kconfig"
source "drivers/net/wan/Kconfig"
+source "drivers/net/ieee802154/Kconfig"
+
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3d375ca128a6..335db78fd987 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
+obj-$(CONFIG_VXLAN) += vxlan.o
#
# Networking Drivers
@@ -53,6 +54,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_IEEE802154) += ieee802154/
obj-$(CONFIG_VMXNET3) += vmxnet3/
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d688a8af432c..7858c58df4a3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1120,10 +1120,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
- netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER);
+ call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
if (should_notify_peers)
- netdev_bonding_change(bond->dev,
- NETDEV_NOTIFY_PEERS);
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
@@ -1558,8 +1558,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name,
bond_dev->type, slave_dev->type);
- res = netdev_bonding_change(bond_dev,
- NETDEV_PRE_TYPE_CHANGE);
+ res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
+ bond_dev);
res = notifier_to_errno(res);
if (res) {
pr_err("%s: refused to change device type\n",
@@ -1579,8 +1579,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
}
- netdev_bonding_change(bond_dev,
- NETDEV_POST_TYPE_CHANGE);
+ call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+ bond_dev);
}
} else if (bond_dev->type != slave_dev->type) {
pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1941,7 +1941,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
}
block_netpoll_tx();
- netdev_bonding_change(bond_dev, NETDEV_RELEASE);
+ call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2584,7 +2584,7 @@ re_arm:
read_unlock(&bond->lock);
return;
}
- netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
rtnl_unlock();
}
}
@@ -2811,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
arp_work.work);
struct slave *slave, *oldcurrent;
int do_failover = 0;
- int delta_in_ticks;
+ int delta_in_ticks, extra_ticks;
int i;
read_lock(&bond->lock);
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
+ extra_ticks = delta_in_ticks / 2;
if (bond->slave_cnt == 0)
goto re_arm;
@@ -2839,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link != BOND_LINK_UP) {
if (time_in_range(jiffies,
trans_start - delta_in_ticks,
- trans_start + delta_in_ticks) &&
+ trans_start + delta_in_ticks + extra_ticks) &&
time_in_range(jiffies,
slave->dev->last_rx - delta_in_ticks,
- slave->dev->last_rx + delta_in_ticks)) {
+ slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
slave->link = BOND_LINK_UP;
bond_set_active_slave(slave);
@@ -2872,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
*/
if (!time_in_range(jiffies,
trans_start - delta_in_ticks,
- trans_start + 2 * delta_in_ticks) ||
+ trans_start + 2 * delta_in_ticks + extra_ticks) ||
!time_in_range(jiffies,
slave->dev->last_rx - delta_in_ticks,
- slave->dev->last_rx + 2 * delta_in_ticks)) {
+ slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
slave->link = BOND_LINK_DOWN;
bond_set_backup_slave(slave);
@@ -2933,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
struct slave *slave;
int i, commit = 0;
unsigned long trans_start;
+ int extra_ticks;
+
+ /* All the time comparisons below need some extra time. Otherwise, on
+ * fast networks the ARP probe/reply may arrive within the same jiffy
+ * as it was sent. Then, the next time the ARP monitor is run, one
+ * arp_interval will already have passed in the comparisons.
+ */
+ extra_ticks = delta_in_ticks / 2;
bond_for_each_slave(bond, slave, i) {
slave->new_link = BOND_LINK_NOCHANGE;
@@ -2940,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
if (slave->link != BOND_LINK_UP) {
if (time_in_range(jiffies,
slave_last_rx(bond, slave) - delta_in_ticks,
- slave_last_rx(bond, slave) + delta_in_ticks)) {
+ slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
slave->new_link = BOND_LINK_UP;
commit++;
@@ -2956,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*/
if (time_in_range(jiffies,
slave->jiffies - delta_in_ticks,
- slave->jiffies + 2 * delta_in_ticks))
+ slave->jiffies + 2 * delta_in_ticks + extra_ticks))
continue;
/*
@@ -2976,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
!bond->current_arp_slave &&
!time_in_range(jiffies,
slave_last_rx(bond, slave) - delta_in_ticks,
- slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+ slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
slave->new_link = BOND_LINK_DOWN;
commit++;
@@ -2992,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
if (bond_is_active_slave(slave) &&
(!time_in_range(jiffies,
trans_start - delta_in_ticks,
- trans_start + 2 * delta_in_ticks) ||
+ trans_start + 2 * delta_in_ticks + extra_ticks) ||
!time_in_range(jiffies,
slave_last_rx(bond, slave) - delta_in_ticks,
- slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+ slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
slave->new_link = BOND_LINK_DOWN;
commit++;
@@ -3027,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
if ((!bond->curr_active_slave &&
time_in_range(jiffies,
trans_start - delta_in_ticks,
- trans_start + delta_in_ticks)) ||
+ trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
bond->curr_active_slave != slave) {
slave->link = BOND_LINK_UP;
if (bond->current_arp_slave) {
@@ -3203,7 +3212,7 @@ re_arm:
read_unlock(&bond->lock);
return;
}
- netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
rtnl_unlock();
}
}
@@ -3352,56 +3361,93 @@ static struct notifier_block bond_netdev_notifier = {
/*---------------------------- Hashing Policies -----------------------------*/
/*
+ * Hash for the output device based upon layer 2 data
+ */
+static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+{
+ struct ethhdr *data = (struct ethhdr *)skb->data;
+
+ if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
+ return (data->h_dest[5] ^ data->h_source[5]) % count;
+
+ return 0;
+}
+
+/*
* Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP mimic bond_xmit_hash_policy_l2()
+ * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
*/
static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
{
struct ethhdr *data = (struct ethhdr *)skb->data;
- struct iphdr *iph = ip_hdr(skb);
-
- if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ u32 v6hash;
+ __be32 *s, *d;
+
+ if (skb->protocol == htons(ETH_P_IP) &&
+ skb_network_header_len(skb) >= sizeof(*iph)) {
+ iph = ip_hdr(skb);
return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
(data->h_dest[5] ^ data->h_source[5])) % count;
+ } else if (skb->protocol == htons(ETH_P_IPV6) &&
+ skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+ ipv6h = ipv6_hdr(skb);
+ s = &ipv6h->saddr.s6_addr32[0];
+ d = &ipv6h->daddr.s6_addr32[0];
+ v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+ v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
+ return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
}
- return (data->h_dest[5] ^ data->h_source[5]) % count;
+ return bond_xmit_hash_policy_l2(skb, count);
}
/*
* Hash for the output device based upon layer 3 and layer 4 data. If
* the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
- * altogether not IP, mimic bond_xmit_hash_policy_l2()
+ * altogether not IP, fall back on bond_xmit_hash_policy_l2()
*/
static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
{
- struct ethhdr *data = (struct ethhdr *)skb->data;
- struct iphdr *iph = ip_hdr(skb);
- __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
- int layer4_xor = 0;
-
- if (skb->protocol == htons(ETH_P_IP)) {
+ u32 layer4_xor = 0;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ __be32 *s, *d;
+ __be16 *layer4hdr;
+
+ if (skb->protocol == htons(ETH_P_IP) &&
+ skb_network_header_len(skb) >= sizeof(*iph)) {
+ iph = ip_hdr(skb);
if (!ip_is_fragment(iph) &&
(iph->protocol == IPPROTO_TCP ||
- iph->protocol == IPPROTO_UDP)) {
- layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
+ iph->protocol == IPPROTO_UDP) &&
+ (skb_headlen(skb) - skb_network_offset(skb) >=
+ iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
+ layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
+ layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
}
return (layer4_xor ^
((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
-
+ } else if (skb->protocol == htons(ETH_P_IPV6) &&
+ skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+ ipv6h = ipv6_hdr(skb);
+ if ((ipv6h->nexthdr == IPPROTO_TCP ||
+ ipv6h->nexthdr == IPPROTO_UDP) &&
+ (skb_headlen(skb) - skb_network_offset(skb) >=
+ sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
+ layer4hdr = (__be16 *)(ipv6h + 1);
+ layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+ }
+ s = &ipv6h->saddr.s6_addr32[0];
+ d = &ipv6h->daddr.s6_addr32[0];
+ layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
+ layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
+ (layer4_xor >> 8);
+ return layer4_xor % count;
}
- return (data->h_dest[5] ^ data->h_source[5]) % count;
-}
-
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
-{
- struct ethhdr *data = (struct ethhdr *)skb->data;
-
- return (data->h_dest[5] ^ data->h_source[5]) % count;
+ return bond_xmit_hash_policy_l2(skb, count);
}
/*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 4c538e388655..e5180dfddba5 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -34,6 +34,7 @@
#include <linux/if_ether.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -45,6 +46,9 @@
#define IF_ENUM_REG_LEN 11
#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
+/* control extension register D_CAN specific */
+#define CONTROL_EX_PDR BIT(8)
+
/* control register */
#define CONTROL_TEST BIT(7)
#define CONTROL_CCE BIT(6)
@@ -64,6 +68,7 @@
#define TEST_BASIC BIT(2)
/* status register */
+#define STATUS_PDA BIT(10)
#define STATUS_BOFF BIT(7)
#define STATUS_EWARN BIT(6)
#define STATUS_EPASS BIT(5)
@@ -163,6 +168,9 @@
/* minimum timeout for checking BUSY status */
#define MIN_TIMEOUT_VALUE 6
+/* Wait for ~1 sec for INIT bit */
+#define INIT_WAIT_MS 1000
+
/* napi related */
#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
@@ -201,6 +209,30 @@ static const struct can_bittiming_const c_can_bittiming_const = {
.brp_inc = 1,
};
+static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
+{
+ if (priv->device)
+ pm_runtime_enable(priv->device);
+}
+
+static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
+{
+ if (priv->device)
+ pm_runtime_disable(priv->device);
+}
+
+static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
+{
+ if (priv->device)
+ pm_runtime_get_sync(priv->device);
+}
+
+static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
+{
+ if (priv->device)
+ pm_runtime_put_sync(priv->device);
+}
+
static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
{
return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
@@ -673,11 +705,15 @@ static int c_can_get_berr_counter(const struct net_device *dev,
unsigned int reg_err_counter;
struct c_can_priv *priv = netdev_priv(dev);
+ c_can_pm_runtime_get_sync(priv);
+
reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
ERR_CNT_REC_SHIFT;
bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
+ c_can_pm_runtime_put_sync(priv);
+
return 0;
}
@@ -1053,11 +1089,13 @@ static int c_can_open(struct net_device *dev)
int err;
struct c_can_priv *priv = netdev_priv(dev);
+ c_can_pm_runtime_get_sync(priv);
+
/* open the can device */
err = open_candev(dev);
if (err) {
netdev_err(dev, "failed to open can device\n");
- return err;
+ goto exit_open_fail;
}
/* register interrupt handler */
@@ -1079,6 +1117,8 @@ static int c_can_open(struct net_device *dev)
exit_irq_fail:
close_candev(dev);
+exit_open_fail:
+ c_can_pm_runtime_put_sync(priv);
return err;
}
@@ -1091,6 +1131,7 @@ static int c_can_close(struct net_device *dev)
c_can_stop(dev);
free_irq(dev->irq, dev);
close_candev(dev);
+ c_can_pm_runtime_put_sync(priv);
return 0;
}
@@ -1119,6 +1160,77 @@ struct net_device *alloc_c_can_dev(void)
}
EXPORT_SYMBOL_GPL(alloc_c_can_dev);
+#ifdef CONFIG_PM
+int c_can_power_down(struct net_device *dev)
+{
+ u32 val;
+ unsigned long time_out;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
+ WARN_ON(priv->type != BOSCH_D_CAN);
+
+ /* set PDR value so the device goes to power down mode */
+ val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
+ val |= CONTROL_EX_PDR;
+ priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
+
+ /* Wait for the PDA bit to get set */
+ time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
+ while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
+ time_after(time_out, jiffies))
+ cpu_relax();
+
+ if (time_after(jiffies, time_out))
+ return -ETIMEDOUT;
+
+ c_can_stop(dev);
+
+ c_can_pm_runtime_put_sync(priv);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(c_can_power_down);
+
+int c_can_power_up(struct net_device *dev)
+{
+ u32 val;
+ unsigned long time_out;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
+ WARN_ON(priv->type != BOSCH_D_CAN);
+
+ c_can_pm_runtime_get_sync(priv);
+
+ /* Clear PDR and INIT bits */
+ val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
+ val &= ~CONTROL_EX_PDR;
+ priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
+ val = priv->read_reg(priv, C_CAN_CTRL_REG);
+ val &= ~CONTROL_INIT;
+ priv->write_reg(priv, C_CAN_CTRL_REG, val);
+
+ /* Wait for the PDA bit to get clear */
+ time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
+ while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
+ time_after(time_out, jiffies))
+ cpu_relax();
+
+ if (time_after(jiffies, time_out))
+ return -ETIMEDOUT;
+
+ c_can_start(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(c_can_power_up);
+#endif
+
void free_c_can_dev(struct net_device *dev)
{
free_candev(dev);
@@ -1133,10 +1245,19 @@ static const struct net_device_ops c_can_netdev_ops = {
int register_c_can_dev(struct net_device *dev)
{
+ struct c_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ c_can_pm_runtime_enable(priv);
+
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
- return register_candev(dev);
+ err = register_candev(dev);
+ if (err)
+ c_can_pm_runtime_disable(priv);
+
+ return err;
}
EXPORT_SYMBOL_GPL(register_c_can_dev);
@@ -1144,10 +1265,9 @@ void unregister_c_can_dev(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
- /* disable all interrupts */
- c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
-
unregister_candev(dev);
+
+ c_can_pm_runtime_disable(priv);
}
EXPORT_SYMBOL_GPL(unregister_c_can_dev);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 01a7049ab990..e5ed41dafa1b 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -24,6 +24,7 @@
enum reg {
C_CAN_CTRL_REG = 0,
+ C_CAN_CTRL_EX_REG,
C_CAN_STS_REG,
C_CAN_ERR_CNT_REG,
C_CAN_BTR_REG,
@@ -104,6 +105,7 @@ static const u16 reg_map_c_can[] = {
static const u16 reg_map_d_can[] = {
[C_CAN_CTRL_REG] = 0x00,
+ [C_CAN_CTRL_EX_REG] = 0x02,
[C_CAN_STS_REG] = 0x04,
[C_CAN_ERR_CNT_REG] = 0x08,
[C_CAN_BTR_REG] = 0x0C,
@@ -143,8 +145,9 @@ static const u16 reg_map_d_can[] = {
};
enum c_can_dev_id {
- C_CAN_DEVTYPE,
- D_CAN_DEVTYPE,
+ BOSCH_C_CAN_PLATFORM,
+ BOSCH_C_CAN,
+ BOSCH_D_CAN,
};
/* c_can private data structure */
@@ -152,6 +155,7 @@ struct c_can_priv {
struct can_priv can; /* must be the first member */
struct napi_struct napi;
struct net_device *dev;
+ struct device *device;
int tx_object;
int current_status;
int last_status;
@@ -164,6 +168,7 @@ struct c_can_priv {
unsigned int tx_echo;
void *priv; /* for board-specific data */
u16 irqstatus;
+ enum c_can_dev_id type;
};
struct net_device *alloc_c_can_dev(void);
@@ -171,4 +176,9 @@ void free_c_can_dev(struct net_device *dev);
int register_c_can_dev(struct net_device *dev);
void unregister_c_can_dev(struct net_device *dev);
+#ifdef CONFIG_PM
+int c_can_power_up(struct net_device *dev);
+int c_can_power_down(struct net_device *dev);
+#endif
+
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 1011146ea513..3d7830bcd2bf 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -120,10 +120,10 @@ static int __devinit c_can_pci_probe(struct pci_dev *pdev,
/* Configure CAN type */
switch (c_can_pci_data->type) {
- case C_CAN_DEVTYPE:
+ case BOSCH_C_CAN:
priv->regs = reg_map_c_can;
break;
- case D_CAN_DEVTYPE:
+ case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
@@ -192,7 +192,7 @@ static void __devexit c_can_pci_remove(struct pci_dev *pdev)
}
static struct c_can_pci_data c_can_sta2x11= {
- .type = C_CAN_DEVTYPE,
+ .type = BOSCH_C_CAN,
.reg_align = C_CAN_REG_ALIGN_32,
.freq = 52000000, /* 52 Mhz */
};
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 6ff7ad006c30..ee1416132aba 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -30,6 +30,9 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/can/dev.h>
@@ -65,17 +68,58 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
writew(val, priv->base + 2 * priv->regs[index]);
}
+static struct platform_device_id c_can_id_table[] = {
+ [BOSCH_C_CAN_PLATFORM] = {
+ .name = KBUILD_MODNAME,
+ .driver_data = BOSCH_C_CAN,
+ },
+ [BOSCH_C_CAN] = {
+ .name = "c_can",
+ .driver_data = BOSCH_C_CAN,
+ },
+ [BOSCH_D_CAN] = {
+ .name = "d_can",
+ .driver_data = BOSCH_D_CAN,
+ }, {
+ }
+};
+
+static const struct of_device_id c_can_of_table[] = {
+ { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
+ { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
+ { /* sentinel */ },
+};
+
static int __devinit c_can_plat_probe(struct platform_device *pdev)
{
int ret;
void __iomem *addr;
struct net_device *dev;
struct c_can_priv *priv;
+ const struct of_device_id *match;
const struct platform_device_id *id;
+ struct pinctrl *pinctrl;
struct resource *mem;
int irq;
struct clk *clk;
+ if (pdev->dev.of_node) {
+ match = of_match_device(c_can_of_table, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Failed to find matching dt id\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ id = match->data;
+ } else {
+ id = platform_get_device_id(pdev);
+ }
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&pdev->dev,
+ "failed to configure pins from driver\n");
+
/* get the appropriate clk */
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
@@ -114,9 +158,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
}
priv = netdev_priv(dev);
- id = platform_get_device_id(pdev);
switch (id->driver_data) {
- case C_CAN_DEVTYPE:
+ case BOSCH_C_CAN:
priv->regs = reg_map_c_can;
switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_32BIT:
@@ -130,7 +173,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
break;
}
break;
- case D_CAN_DEVTYPE:
+ case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
@@ -143,8 +186,10 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
dev->irq = irq;
priv->base = addr;
+ priv->device = &pdev->dev;
priv->can.clock.freq = clk_get_rate(clk);
priv->priv = clk;
+ priv->type = id->driver_data;
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -195,27 +240,75 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id c_can_id_table[] = {
- {
- .name = KBUILD_MODNAME,
- .driver_data = C_CAN_DEVTYPE,
- }, {
- .name = "c_can",
- .driver_data = C_CAN_DEVTYPE,
- }, {
- .name = "d_can",
- .driver_data = D_CAN_DEVTYPE,
- }, {
+#ifdef CONFIG_PM
+static int c_can_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct c_can_priv *priv = netdev_priv(ndev);
+
+ if (priv->type != BOSCH_D_CAN) {
+ dev_warn(&pdev->dev, "Not supported\n");
+ return 0;
}
-};
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+
+ ret = c_can_power_down(ndev);
+ if (ret) {
+ netdev_err(ndev, "failed to enter power down mode\n");
+ return ret;
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+
+static int c_can_resume(struct platform_device *pdev)
+{
+ int ret;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct c_can_priv *priv = netdev_priv(ndev);
+
+ if (priv->type != BOSCH_D_CAN) {
+ dev_warn(&pdev->dev, "Not supported\n");
+ return 0;
+ }
+
+ ret = c_can_power_up(ndev);
+ if (ret) {
+ netdev_err(ndev, "Still in power down mode\n");
+ return ret;
+ }
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+
+ return 0;
+}
+#else
+#define c_can_suspend NULL
+#define c_can_resume NULL
+#endif
static struct platform_driver c_can_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(c_can_of_table),
},
.probe = c_can_plat_probe,
.remove = __devexit_p(c_can_plat_remove),
+ .suspend = c_can_suspend,
+ .resume = c_can_resume,
.id_table = c_can_id_table,
};
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index c5f143165f80..c78ecfca1e45 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -144,6 +144,10 @@
#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
+/* FLEXCAN hardware feature flags */
+#define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */
+#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* Broken error state handling */
+
/* Structure of the message buffer */
struct flexcan_mb {
u32 can_ctrl;
@@ -178,7 +182,7 @@ struct flexcan_regs {
};
struct flexcan_devtype_data {
- u32 hw_ver; /* hardware controller version */
+ u32 features; /* hardware controller features */
};
struct flexcan_priv {
@@ -197,11 +201,11 @@ struct flexcan_priv {
};
static struct flexcan_devtype_data fsl_p1010_devtype_data = {
- .hw_ver = 3,
+ .features = FLEXCAN_HAS_BROKEN_ERR_STATE,
};
-
+static struct flexcan_devtype_data fsl_imx28_devtype_data;
static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
- .hw_ver = 10,
+ .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_BROKEN_ERR_STATE,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -741,15 +745,19 @@ static int flexcan_chip_start(struct net_device *dev)
* enable tx and rx warning interrupt
* enable bus off interrupt
* (== FLEXCAN_CTRL_ERR_STATE)
- *
- * _note_: we enable the "error interrupt"
- * (FLEXCAN_CTRL_ERR_MSK), too. Otherwise we don't get any
- * warning or bus passive interrupts.
*/
reg_ctrl = flexcan_read(&regs->ctrl);
reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
- FLEXCAN_CTRL_ERR_STATE | FLEXCAN_CTRL_ERR_MSK;
+ FLEXCAN_CTRL_ERR_STATE;
+ /*
+ * enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK),
+ * on most Flexcan cores, too. Otherwise we don't get
+ * any error warning or passive interrupts.
+ */
+ if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
+ priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
/* save for later use */
priv->reg_ctrl_default = reg_ctrl;
@@ -772,7 +780,7 @@ static int flexcan_chip_start(struct net_device *dev)
flexcan_write(0x0, &regs->rx14mask);
flexcan_write(0x0, &regs->rx15mask);
- if (priv->devtype_data->hw_ver >= 10)
+ if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
flexcan_write(0x0, &regs->rxfgmask);
flexcan_transceiver_switch(priv, 1);
@@ -954,6 +962,7 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ /* sentinel */ },
};
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 06adf881ea24..c975999bb055 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
if (!clock_name || !strcmp(clock_name, "sys")) {
sys_clk = clk_get(&ofdev->dev, "sys_clk");
- if (!sys_clk) {
+ if (IS_ERR(sys_clk)) {
dev_err(&ofdev->dev, "couldn't get sys_clk\n");
goto exit_unmap;
}
@@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
if (clocksrc < 0) {
ref_clk = clk_get(&ofdev->dev, "ref_clk");
- if (!ref_clk) {
+ if (IS_ERR(ref_clk)) {
dev_err(&ofdev->dev, "couldn't get ref_clk\n");
goto exit_unmap;
}
@@ -380,12 +380,12 @@ static int mpc5xxx_can_resume(struct platform_device *ofdev)
}
#endif
-static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
+static const struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
.type = MSCAN_TYPE_MPC5200,
.get_clock = mpc52xx_can_get_clock,
};
-static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
+static const struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
.type = MSCAN_TYPE_MPC5121,
.get_clock = mpc512x_can_get_clock,
};
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 4c4f33d482d2..25011dbe1b96 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -156,8 +156,13 @@ static void set_normal_mode(struct net_device *dev)
}
/* set chip to normal mode */
- priv->write_reg(priv, REG_MOD, 0x00);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ priv->write_reg(priv, REG_MOD, MOD_LOM);
+ else
+ priv->write_reg(priv, REG_MOD, 0x00);
+
udelay(10);
+
status = priv->read_reg(priv, REG_MOD);
}
@@ -310,7 +315,10 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
can_put_echo_skb(skb, dev, 0);
- sja1000_write_cmdreg(priv, CMD_TR);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ sja1000_write_cmdreg(priv, CMD_TR | CMD_AT);
+ else
+ sja1000_write_cmdreg(priv, CMD_TR);
return NETDEV_TX_OK;
}
@@ -505,10 +513,18 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
netdev_warn(dev, "wakeup interrupt\n");
if (isrc & IRQ_TI) {
- /* transmission complete interrupt */
- stats->tx_bytes += priv->read_reg(priv, REG_FI) & 0xf;
- stats->tx_packets++;
- can_get_echo_skb(dev, 0);
+ /* transmission buffer released */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT &&
+ !(status & SR_TCS)) {
+ stats->tx_errors++;
+ can_free_echo_skb(dev, 0);
+ } else {
+ /* transmission complete */
+ stats->tx_bytes +=
+ priv->read_reg(priv, REG_FI) & 0xf;
+ stats->tx_packets++;
+ can_get_echo_skb(dev, 0);
+ }
netif_wake_queue(dev);
}
if (isrc & IRQ_RI) {
@@ -605,7 +621,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
priv->can.do_set_mode = sja1000_set_mode;
priv->can.do_get_berr_counter = sja1000_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
- CAN_CTRLMODE_BERR_REPORTING;
+ CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_ONE_SHOT;
spin_lock_init(&priv->cmdreg_lock);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index d2f91f737871..c4643c400d46 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -53,7 +53,7 @@ static struct peak_usb_adapter *peak_usb_adapters_list[] = {
* dump memory
*/
#define DUMP_WIDTH 16
-void dump_mem(char *prompt, void *p, int l)
+void pcan_dump_mem(char *prompt, void *p, int l)
{
pr_info("%s dumping %s (%d bytes):\n",
PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
@@ -203,9 +203,9 @@ static void peak_usb_read_bulk_callback(struct urb *urb)
if (dev->state & PCAN_USB_STATE_STARTED) {
err = dev->adapter->dev_decode_buf(dev, urb);
if (err)
- dump_mem("received usb message",
- urb->transfer_buffer,
- urb->transfer_buffer_length);
+ pcan_dump_mem("received usb message",
+ urb->transfer_buffer,
+ urb->transfer_buffer_length);
}
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 4c775b620be2..c8e5e91d7cb5 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -131,7 +131,7 @@ struct peak_usb_device {
struct peak_usb_device *next_siblings;
};
-void dump_mem(char *prompt, void *p, int l);
+void pcan_dump_mem(char *prompt, void *p, int l);
/* common timestamp management */
void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 629c4ba5d49d..e1626d92511a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -292,8 +292,8 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
if (!rec_len) {
netdev_err(dev->netdev,
"got unprocessed record in msg\n");
- dump_mem("rcvd rsp msg", pum->u.rec_buffer,
- actual_length);
+ pcan_dump_mem("rcvd rsp msg", pum->u.rec_buffer,
+ actual_length);
break;
}
@@ -756,8 +756,8 @@ static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb)
fail:
if (err)
- dump_mem("received msg",
- urb->transfer_buffer, urb->actual_length);
+ pcan_dump_mem("received msg",
+ urb->transfer_buffer, urb->actual_length);
return err;
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index b15366635147..bb9670f29b59 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -364,7 +364,7 @@ typhoon_inc_rxfree_index(u32 *index, const int count)
static inline void
typhoon_inc_tx_index(u32 *index, const int count)
{
- /* if we start using the Hi Tx ring, this needs updateing */
+ /* if we start using the Hi Tx ring, this needs updating */
typhoon_inc_index(index, count, TXLO_ENTRIES);
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index a11af5cc4844..e4ff38949112 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -89,15 +89,6 @@ source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
-
-config MIPS_SIM_NET
- tristate "MIPS simulator Network device"
- depends on MIPS_SIM
- ---help---
- The MIPSNET device is a simple Ethernet network device which is
- emulated by the MIPS Simulator.
- If you are not using a MIPSsim or are unsure, say N.
-
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 878ad32b93f2..d4473072654a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
-obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1bf5bbfe778e..55a2e3795055 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -149,7 +149,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
/* clear error status */
- pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA,
+ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
PCI_EXP_DEVSTA_NFED |
PCI_EXP_DEVSTA_FED |
PCI_EXP_DEVSTA_CED |
@@ -2685,7 +2685,7 @@ static void atl1c_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
-static struct pci_error_handlers atl1c_err_handler = {
+static const struct pci_error_handlers atl1c_err_handler = {
.error_detected = atl1c_io_error_detected,
.slot_reset = atl1c_io_slot_reset,
.resume = atl1c_io_resume,
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index a98acc8a956f..e213da29e73d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2489,7 +2489,7 @@ static void atl1e_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
-static struct pci_error_handlers atl1e_err_handler = {
+static const struct pci_error_handlers atl1e_err_handler = {
.error_detected = atl1e_io_error_detected,
.slot_reset = atl1e_io_slot_reset,
.resume = atl1e_io_resume,
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f15e72e81ac4..4bd416b72e65 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -101,6 +101,7 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
select PHYLIB
+ select HWMON
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e48312f2305d..d4310700c7a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8742,7 +8742,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
rtnl_unlock();
}
-static struct pci_error_handlers bnx2_err_handler = {
+static const struct pci_error_handlers bnx2_err_handler = {
.error_detected = bnx2_io_error_detected,
.slot_reset = bnx2_io_slot_reset,
.resume = bnx2_io_resume,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 6d1a24acb77e..72897c47b8c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.72.51-0"
-#define DRV_MODULE_RELDATE "2012/06/18"
+#define DRV_MODULE_VERSION "1.78.00-0"
+#define DRV_MODULE_RELDATE "2012/09/27"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -1458,7 +1458,7 @@ struct bnx2x {
int fw_stats_req_sz;
/*
- * FW statistics data shortcut (points at the begining of
+ * FW statistics data shortcut (points at the beginning of
* fw_stats buffer + fw_stats_req_sz).
*/
struct bnx2x_fw_stats_data *fw_stats_data;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e8e97a7d1d06..30f04a389227 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2285,7 +2285,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Wait for all pending SP commands to complete */
if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
BNX2X_ERR("Timeout waiting for SP elements to complete\n");
- bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
return -EBUSY;
}
@@ -2333,7 +2333,7 @@ load_error0:
}
/* must be called with rtnl_lock */
-int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
{
int i;
bool global = false;
@@ -2395,7 +2395,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Cleanup the chip if needed */
if (unload_mode != UNLOAD_RECOVERY)
- bnx2x_chip_cleanup(bp, unload_mode);
+ bnx2x_chip_cleanup(bp, unload_mode, keep_link);
else {
/* Send the UNLOAD_REQUEST to the MCP */
bnx2x_send_unload_req(bp, unload_mode);
@@ -2419,7 +2419,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */
- bnx2x_send_unload_done(bp);
+ bnx2x_send_unload_done(bp, false);
}
/*
@@ -3026,8 +3026,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
- mac_type);
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS,
+ 0);
/* header nbd */
SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
@@ -3077,13 +3078,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
&pbd_e2->dst_mac_addr_lo,
eth->h_dest);
}
+
+ SET_FLAG(pbd_e2_parsing_data,
+ ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
} else {
+ u16 global_data = 0;
pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
+ SET_FLAG(global_data,
+ ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
+ pbd_e1x->global_data |= cpu_to_le16(global_data);
}
/* Setup the data pointer of the first BD of the packet */
@@ -3770,7 +3778,7 @@ int bnx2x_reload_if_running(struct net_device *dev)
if (unlikely(!netif_running(dev)))
return 0;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
return bnx2x_nic_load(bp, LOAD_NORMAL);
}
@@ -3967,7 +3975,7 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(dev);
- bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index dfd86a55f1dc..9c5ea6c5b4c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -83,8 +83,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
* bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
*
* @bp: driver handle
+ * @keep_link: true iff link should be kept up
*/
-void bnx2x_send_unload_done(struct bnx2x *bp);
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
/**
* bnx2x_config_rss_pf - configure RSS parameters in a PF.
@@ -153,6 +154,14 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
void bnx2x_link_set(struct bnx2x *bp);
/**
+ * bnx2x_force_link_reset - Forces link reset, and put the PHY
+ * in reset as well.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_force_link_reset(struct bnx2x *bp);
+
+/**
* bnx2x_link_test - query link status.
*
* @bp: driver handle
@@ -312,12 +321,13 @@ void bnx2x_set_num_queues(struct bnx2x *bp);
*
* @bp: driver handle
* @unload_mode: COMMON, PORT, FUNCTION
+ * @keep_link: true iff link should be kept up.
*
* - Cleanup MAC configuration.
* - Closes clients.
* - etc.
*/
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
/**
* bnx2x_acquire_hw_lock - acquire HW lock.
@@ -446,7 +456,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err);
/* dev_close main block */
-int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
/* dev_open main block */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 8a73374e52a7..2245c3895409 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -91,25 +91,21 @@ static void bnx2x_pfc_set(struct bnx2x *bp)
/*
* Rx COS configuration
* Changing PFC RX configuration .
- * In RX COS0 will always be configured to lossy and COS1 to lossless
+ * In RX COS0 will always be configured to lossless and COS1 to lossy
*/
for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
pri_bit = 1 << i;
- if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
+ if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)))
val |= 1 << (i * 4);
}
pfc_params.pkt_priority_to_cos = val;
/* RX COS0 */
- pfc_params.llfc_low_priority_classes = 0;
+ pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
/* RX COS1 */
- pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
-
- /* BRB configuration */
- pfc_params.cos0_pauseable = false;
- pfc_params.cos1_pauseable = true;
+ pfc_params.llfc_high_priority_classes = 0;
bnx2x_acquire_phy_lock(bp);
bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ebf40cd7aa10..c65295dded39 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -905,6 +905,7 @@ static int bnx2x_nway_reset(struct net_device *dev)
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
bnx2x_link_set(bp);
}
@@ -1606,7 +1607,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
-static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
+static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
"register_test (offline) ",
"memory_test (offline) ",
"int_loopback_test (offline)",
@@ -1653,7 +1654,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+ eee_cfg = bp->link_vars.eee_status;
edata->supported =
bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
@@ -1690,7 +1691,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+ eee_cfg = bp->link_vars.eee_status;
if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
@@ -1739,6 +1740,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
/* Restart link to propogate changes */
if (netif_running(dev)) {
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_force_link_reset(bp);
bnx2x_link_set(bp);
}
@@ -2038,8 +2040,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
u16 pkt_prod, bd_prod;
struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd;
- struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
- struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
dma_addr_t mapping;
union eth_rx_cqe *cqe;
u8 cqe_fp_flags, cqe_fp_type;
@@ -2131,21 +2131,32 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
SET_FLAG(tx_start_bd->general_data,
- ETH_TX_START_BD_ETH_ADDR_TYPE,
- UNICAST_ADDRESS);
- SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_HDR_NBDS,
1);
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS,
+ 0);
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
- pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
- pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
-
- memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
- memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
-
+ if (CHIP_IS_E1x(bp)) {
+ u16 global_data = 0;
+ struct eth_tx_parse_bd_e1x *pbd_e1x =
+ &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+ memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+ SET_FLAG(global_data,
+ ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+ pbd_e1x->global_data = cpu_to_le16(global_data);
+ } else {
+ u32 parsing_data = 0;
+ struct eth_tx_parse_bd_e2 *pbd_e2 =
+ &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+ memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+ SET_FLAG(parsing_data,
+ ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
+ pbd_e2->parsing_data = cpu_to_le32(parsing_data);
+ }
wmb();
txdata->tx_db.data.prod += 2;
@@ -2263,7 +2274,7 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp)
if (!netif_running(bp->dev))
return BNX2X_EXT_LOOPBACK_FAILED;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
if (rc) {
DP(BNX2X_MSG_ETHTOOL,
@@ -2414,7 +2425,7 @@ static void bnx2x_self_test(struct net_device *dev,
link_up = bp->link_vars.link_up;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
rc = bnx2x_nic_load(bp, LOAD_DIAG);
if (rc) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -2446,7 +2457,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
}
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
/* restore input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
@@ -2534,7 +2545,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k, offset, start;
+ int i, j, k, start;
char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
@@ -2570,13 +2581,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
start = 0;
else
start = 4;
- for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
- i++, j++) {
- offset = sprintf(buf+32*i, "%s",
- bnx2x_tests_str_arr[j]);
- *(buf+offset) = '\0';
- }
- break;
+ memcpy(buf, bnx2x_tests_str_arr + start,
+ ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
}
}
@@ -2940,7 +2946,7 @@ static int bnx2x_set_channels(struct net_device *dev,
bnx2x_change_num_queues(bp, channels->combined_count);
return 0;
}
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_change_num_queues(bp, channels->combined_count);
return bnx2x_nic_load(bp, LOAD_NORMAL);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index bbc66ced9c25..620fe939ecfd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -88,9 +88,6 @@
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[101].base + ((assertListEntry) * IRO[101].m1))
-#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
-#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
- (IRO[108].base)
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
(IRO[201].base + ((pfId) * IRO[201].m1))
#define TSTORM_FUNC_EN_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 76b6e65790f8..18704929e642 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1286,6 +1286,9 @@ struct drv_func_mb {
#define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
#define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
+ #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002
+
+ #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
@@ -1909,6 +1912,54 @@ struct lldp_local_mib {
};
/***END OF DCBX STRUCTURES DECLARATIONS***/
+/***********************************************************/
+/* Elink section */
+/***********************************************************/
+#define SHMEM_LINK_CONFIG_SIZE 2
+struct shmem_lfa {
+ u32 req_duplex;
+ #define REQ_DUPLEX_PHY0_MASK 0x0000ffff
+ #define REQ_DUPLEX_PHY0_SHIFT 0
+ #define REQ_DUPLEX_PHY1_MASK 0xffff0000
+ #define REQ_DUPLEX_PHY1_SHIFT 16
+ u32 req_flow_ctrl;
+ #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff
+ #define REQ_FLOW_CTRL_PHY0_SHIFT 0
+ #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000
+ #define REQ_FLOW_CTRL_PHY1_SHIFT 16
+ u32 req_line_speed; /* Also determine AutoNeg */
+ #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff
+ #define REQ_LINE_SPD_PHY0_SHIFT 0
+ #define REQ_LINE_SPD_PHY1_MASK 0xffff0000
+ #define REQ_LINE_SPD_PHY1_SHIFT 16
+ u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE];
+ u32 additional_config;
+ #define REQ_FC_AUTO_ADV_MASK 0x0000ffff
+ #define REQ_FC_AUTO_ADV0_SHIFT 0
+ #define NO_LFA_DUE_TO_DCC_MASK 0x00010000
+ u32 lfa_sts;
+ #define LFA_LINK_FLAP_REASON_OFFSET 0
+ #define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+ #define LFA_LINK_DOWN 0x1
+ #define LFA_LOOPBACK_ENABLED 0x2
+ #define LFA_DUPLEX_MISMATCH 0x3
+ #define LFA_MFW_IS_TOO_OLD 0x4
+ #define LFA_LINK_SPEED_MISMATCH 0x5
+ #define LFA_FLOW_CTRL_MISMATCH 0x6
+ #define LFA_SPEED_CAP_MISMATCH 0x7
+ #define LFA_DCC_LFA_DISABLED 0x8
+ #define LFA_EEE_MISMATCH 0x9
+
+ #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+ #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+
+ #define LINK_FLAP_COUNT_OFFSET 16
+ #define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ #define LFA_FLAGS_MASK 0xff000000
+ #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24)
+};
+
struct ncsi_oem_fcoe_features {
u32 fcoe_features1;
#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
@@ -2738,8 +2789,8 @@ struct afex_stats {
};
#define BCM_5710_FW_MAJOR_VERSION 7
-#define BCM_5710_FW_MINOR_VERSION 2
-#define BCM_5710_FW_REVISION_VERSION 51
+#define BCM_5710_FW_MINOR_VERSION 8
+#define BCM_5710_FW_REVISION_VERSION 2
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3861,10 +3912,8 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6
-#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
-#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
u8 rss_result_mask;
u8 rss_mode;
__le32 __reserved2;
@@ -4080,27 +4129,29 @@ struct eth_tx_start_bd {
#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
-#define ETH_TX_START_BD_RESREVED (0x1<<5)
-#define ETH_TX_START_BD_RESREVED_SHIFT 5
-#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
-#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
+#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
+#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
+#define ETH_TX_START_BD_RESREVED (0x1<<7)
+#define ETH_TX_START_BD_RESREVED_SHIFT 7
};
/*
* Tx parsing BD structure for ETH E1/E1h
*/
struct eth_tx_parse_bd_e1x {
- u8 global_data;
+ __le16 global_data;
#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
-#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
-#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
-#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4)
+#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9
u8 tcp_flags;
#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
@@ -4119,7 +4170,6 @@ struct eth_tx_parse_bd_e1x {
#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
u8 ip_hlen_w;
- s8 reserved;
__le16 total_hlen_w;
__le16 tcp_pseudo_csum;
__le16 lso_mss;
@@ -4138,14 +4188,16 @@ struct eth_tx_parse_bd_e2 {
__le16 src_mac_addr_mid;
__le16 src_mac_addr_hi;
__le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
-#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
-#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30)
+#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30
};
/*
@@ -4913,7 +4965,8 @@ struct flow_control_configuration {
*
*/
struct function_start_data {
- __le16 function_mode;
+ u8 function_mode;
+ u8 reserved;
__le16 sd_vlan_tag;
__le16 vif_id;
u8 path_id;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 559c396d45cc..c8f10f0e8a0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -566,7 +566,7 @@ static const struct {
u32 e2; /* 57712 */
u32 e3; /* 578xx */
} reg_mask; /* Register mask (all valid bits) */
- char name[7]; /* Block's longest name is 6 characters long
+ char name[8]; /* Block's longest name is 7 characters long
* (name + suffix)
*/
} bnx2x_blocks_parity_data[] = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index b046beb435b2..e2e45ee5df33 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -161,120 +161,6 @@
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
-/* BRB default for class 0 E2 */
-#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170
-#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250
-#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10
-#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50
-
-/* BRB thresholds for E2*/
-#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
-#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
-
-#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250
-#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
-
-#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10
-#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90
-
-#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
-#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
-
-/* BRB default for class 0 E3A0 */
-#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290
-#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410
-#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10
-#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50
-
-/* BRB thresholds for E3A0 */
-#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
-#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
-
-#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410
-#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
-
-#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10
-#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170
-
-#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
-#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
-
-/* BRB default for E3B0 */
-#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330
-#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490
-#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15
-#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55
-
-/* BRB thresholds for E3B0 2 port mode*/
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
-
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025
-#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
-
-#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
-#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025
-
-#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50
-#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025
-
-/* only for E3B0*/
-#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025
-#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025
-
-/* Lossy +Lossless GUARANTIED == GUART */
-#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284
-/* Lossless +Lossless*/
-#define PFC_E3B0_2P_PAUSE_LB_GUART 236
-/* Lossy +Lossy*/
-#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342
-
-/* Lossy +Lossless*/
-#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284
-/* Lossless +Lossless*/
-#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236
-/* Lossy +Lossy*/
-#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336
-#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80
-
-#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0
-#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0
-
-/* BRB thresholds for E3B0 4 port mode */
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
-
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384
-#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
-
-#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
-#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304
-
-#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
-#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
-
-/* only for E3B0*/
-#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
-#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
-#define PFC_E3B0_4P_LB_GUART 120
-
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
-
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
-
-/* Pause defines*/
-#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330
-#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490
-#define DEFAULT_E3B0_LB_GUART 40
-
-#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40
-#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0
-
-#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40
-#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0
-
/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
@@ -321,6 +207,127 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
return val;
}
+/*
+ * bnx2x_check_lfa - This function checks if link reinitialization is required,
+ * or link flap can be avoided.
+ *
+ * @params: link parameters
+ * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed
+ * condition code.
+ */
+static int bnx2x_check_lfa(struct link_params *params)
+{
+ u32 link_status, cfg_idx, lfa_mask, cfg_size;
+ u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config;
+ u32 saved_val, req_val, eee_status;
+ struct bnx2x *bp = params->bp;
+
+ additional_config =
+ REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config));
+
+ /* NOTE: must be first condition checked -
+ * to verify DCC bit is cleared in any case!
+ */
+ if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
+ DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n");
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config),
+ additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
+ return LFA_DCC_LFA_DISABLED;
+ }
+
+ /* Verify that link is up */
+ link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status));
+ if (!(link_status & LINK_STATUS_LINK_UP))
+ return LFA_LINK_DOWN;
+
+ /* Verify that loopback mode is not set */
+ if (params->loopback_mode)
+ return LFA_LOOPBACK_ENABLED;
+
+ /* Verify that MFW supports LFA */
+ if (!params->lfa_base)
+ return LFA_MFW_IS_TOO_OLD;
+
+ if (params->num_phys == 3) {
+ cfg_size = 2;
+ lfa_mask = 0xffffffff;
+ } else {
+ cfg_size = 1;
+ lfa_mask = 0xffff;
+ }
+
+ /* Compare Duplex */
+ saved_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_duplex));
+ req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_DUPLEX_MISMATCH;
+ }
+ /* Compare Flow Control */
+ saved_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_flow_ctrl));
+ req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_FLOW_CTRL_MISMATCH;
+ }
+ /* Compare Link Speed */
+ saved_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_line_speed));
+ req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
+ if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
+ DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n",
+ (saved_val & lfa_mask), (req_val & lfa_mask));
+ return LFA_LINK_SPEED_MISMATCH;
+ }
+
+ for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) {
+ cur_speed_cap_mask = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ speed_cap_mask[cfg_idx]));
+
+ if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
+ DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n",
+ cur_speed_cap_mask,
+ params->speed_cap_mask[cfg_idx]);
+ return LFA_SPEED_CAP_MISMATCH;
+ }
+ }
+
+ cur_req_fc_auto_adv =
+ REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config)) &
+ REQ_FC_AUTO_ADV_MASK;
+
+ if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) {
+ DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n",
+ cur_req_fc_auto_adv, params->req_fc_auto_adv);
+ return LFA_FLOW_CTRL_MISMATCH;
+ }
+
+ eee_status = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]));
+
+ if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^
+ (params->eee_mode & EEE_MODE_ENABLE_LPI)) ||
+ ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
+ (params->eee_mode & EEE_MODE_ADV_LPI))) {
+ DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode,
+ eee_status);
+ return LFA_EEE_MISMATCH;
+ }
+
+ /* LFA conditions are met */
+ return 0;
+}
/******************************************************************/
/* EPIO/GPIO section */
/******************************************************************/
@@ -1307,93 +1314,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
}
/******************************************************************/
-/* EEE section */
-/******************************************************************/
-static u8 bnx2x_eee_has_cap(struct link_params *params)
-{
- struct bnx2x *bp = params->bp;
-
- if (REG_RD(bp, params->shmem2_base) <=
- offsetof(struct shmem2_region, eee_status[params->port]))
- return 0;
-
- return 1;
-}
-
-static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
-{
- switch (nvram_mode) {
- case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
- *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
- break;
- case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
- *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
- break;
- case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
- *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
- break;
- default:
- *idle_timer = 0;
- break;
- }
-
- return 0;
-}
-
-static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
-{
- switch (idle_timer) {
- case EEE_MODE_NVRAM_BALANCED_TIME:
- *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
- break;
- case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
- *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
- break;
- case EEE_MODE_NVRAM_LATENCY_TIME:
- *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
- break;
- default:
- *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
- break;
- }
-
- return 0;
-}
-
-static u32 bnx2x_eee_calc_timer(struct link_params *params)
-{
- u32 eee_mode, eee_idle;
- struct bnx2x *bp = params->bp;
-
- if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
- if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
- /* time value in eee_mode --> used directly*/
- eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
- } else {
- /* hsi value in eee_mode --> time */
- if (bnx2x_eee_nvram_to_time(params->eee_mode &
- EEE_MODE_NVRAM_MASK,
- &eee_idle))
- return 0;
- }
- } else {
- /* hsi values in nvram --> time*/
- eee_mode = ((REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_feature_config[params->port].
- eee_power_mode)) &
- PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
- PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
-
- if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
- return 0;
- }
-
- return eee_idle;
-}
-
-
-/******************************************************************/
/* PFC section */
/******************************************************************/
static void bnx2x_update_pfc_xmac(struct link_params *params,
@@ -1606,16 +1526,23 @@ static void bnx2x_set_xumac_nig(struct link_params *params,
NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
}
-static void bnx2x_umac_disable(struct link_params *params)
+static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en)
{
u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ u32 val;
struct bnx2x *bp = params->bp;
if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
return;
-
+ val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG);
+ if (en)
+ val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA);
+ else
+ val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA);
/* Disable RX and TX */
- REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0);
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
}
static void bnx2x_umac_enable(struct link_params *params,
@@ -1671,6 +1598,16 @@ static void bnx2x_umac_enable(struct link_params *params,
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
udelay(50);
+ /* Configure UMAC for EEE */
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ DP(NETIF_MSG_LINK, "configured UMAC for EEE\n");
+ REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL,
+ UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
+ REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
+ } else {
+ REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0);
+ }
+
/* Set MAC address for source TX Pause/PFC frames (under SW reset) */
REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
((params->mac_addr[2] << 24) |
@@ -1766,11 +1703,12 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
}
-static void bnx2x_xmac_disable(struct link_params *params)
+static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en)
{
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+ u32 val;
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -1784,7 +1722,12 @@ static void bnx2x_xmac_disable(struct link_params *params)
REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
(pfc_ctrl | (1<<1)));
DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
- REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
+ val = REG_RD(bp, xmac_base + XMAC_REG_CTRL);
+ if (en)
+ val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+ else
+ val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
}
}
@@ -2087,391 +2030,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
-/* PFC BRB internal port configuration params */
-struct bnx2x_pfc_brb_threshold_val {
- u32 pause_xoff;
- u32 pause_xon;
- u32 full_xoff;
- u32 full_xon;
-};
-
-struct bnx2x_pfc_brb_e3b0_val {
- u32 per_class_guaranty_mode;
- u32 lb_guarantied_hyst;
- u32 full_lb_xoff_th;
- u32 full_lb_xon_threshold;
- u32 lb_guarantied;
- u32 mac_0_class_t_guarantied;
- u32 mac_0_class_t_guarantied_hyst;
- u32 mac_1_class_t_guarantied;
- u32 mac_1_class_t_guarantied_hyst;
-};
-
-struct bnx2x_pfc_brb_th_val {
- struct bnx2x_pfc_brb_threshold_val pauseable_th;
- struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
- struct bnx2x_pfc_brb_threshold_val default_class0;
- struct bnx2x_pfc_brb_threshold_val default_class1;
-
-};
-static int bnx2x_pfc_brb_get_config_params(
- struct link_params *params,
- struct bnx2x_pfc_brb_th_val *config_val)
-{
- struct bnx2x *bp = params->bp;
- DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
-
- config_val->default_class1.pause_xoff = 0;
- config_val->default_class1.pause_xon = 0;
- config_val->default_class1.full_xoff = 0;
- config_val->default_class1.full_xon = 0;
-
- if (CHIP_IS_E2(bp)) {
- /* Class0 defaults */
- config_val->default_class0.pause_xoff =
- DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
- config_val->default_class0.pause_xon =
- DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
- config_val->default_class0.full_xoff =
- DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
- config_val->default_class0.full_xon =
- DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
- /* Pause able*/
- config_val->pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
- /* Non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else if (CHIP_IS_E3A0(bp)) {
- /* Class0 defaults */
- config_val->default_class0.pause_xoff =
- DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
- config_val->default_class0.pause_xon =
- DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
- config_val->default_class0.full_xoff =
- DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
- config_val->default_class0.full_xon =
- DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
- /* Pause able */
- config_val->pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
- /* Non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else if (CHIP_IS_E3B0(bp)) {
- /* Class0 defaults */
- config_val->default_class0.pause_xoff =
- DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
- config_val->default_class0.pause_xon =
- DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
- config_val->default_class0.full_xoff =
- DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
- config_val->default_class0.full_xon =
- DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
-
- if (params->phy[INT_PHY].flags &
- FLAGS_4_PORT_MODE) {
- config_val->pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* Non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else {
- config_val->pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* Non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- }
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(
- struct link_params *params,
- struct bnx2x_pfc_brb_e3b0_val
- *e3b0_val,
- struct bnx2x_nig_brb_pfc_port_params *pfc_params,
- const u8 pfc_enabled)
-{
- if (pfc_enabled && pfc_params) {
- e3b0_val->per_class_guaranty_mode = 1;
- e3b0_val->lb_guarantied_hyst = 80;
-
- if (params->phy[INT_PHY].flags &
- FLAGS_4_PORT_MODE) {
- e3b0_val->full_lb_xoff_th =
- PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
- e3b0_val->lb_guarantied =
- PFC_E3B0_4P_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
- } else {
- e3b0_val->full_lb_xoff_th =
- PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
- if (pfc_params->cos0_pauseable !=
- pfc_params->cos1_pauseable) {
- /* Nonpauseable= Lossy + pauseable = Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
- } else if (pfc_params->cos0_pauseable) {
- /* Lossless +Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
- } else {
- /* Lossy +Lossy*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_NON_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
- }
- }
- } else {
- e3b0_val->per_class_guaranty_mode = 0;
- e3b0_val->lb_guarantied_hyst = 0;
- e3b0_val->full_lb_xoff_th =
- DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
- e3b0_val->lb_guarantied =
- DEFAULT_E3B0_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
- }
-}
-static int bnx2x_update_pfc_brb(struct link_params *params,
- struct link_vars *vars,
- struct bnx2x_nig_brb_pfc_port_params
- *pfc_params)
-{
- struct bnx2x *bp = params->bp;
- struct bnx2x_pfc_brb_th_val config_val = { {0} };
- struct bnx2x_pfc_brb_threshold_val *reg_th_config =
- &config_val.pauseable_th;
- struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
- const int set_pfc = params->feature_config_flags &
- FEATURE_CONFIG_PFC_ENABLED;
- const u8 pfc_enabled = (set_pfc && pfc_params);
- int bnx2x_status = 0;
- u8 port = params->port;
-
- /* default - pause configuration */
- reg_th_config = &config_val.pauseable_th;
- bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
- if (bnx2x_status)
- return bnx2x_status;
-
- if (pfc_enabled) {
- /* First COS */
- if (pfc_params->cos0_pauseable)
- reg_th_config = &config_val.pauseable_th;
- else
- reg_th_config = &config_val.non_pauseable_th;
- } else
- reg_th_config = &config_val.default_class0;
- /* The number of free blocks below which the pause signal to class 0
- * of MAC #n is asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
- BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
- reg_th_config->pause_xoff);
- /* The number of free blocks above which the pause signal to class 0
- * of MAC #n is de-asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
- BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
- /* The number of free blocks below which the full signal to class 0
- * of MAC #n is asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
- BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
- /* The number of free blocks above which the full signal to class 0
- * of MAC #n is de-asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
- BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
-
- if (pfc_enabled) {
- /* Second COS */
- if (pfc_params->cos1_pauseable)
- reg_th_config = &config_val.pauseable_th;
- else
- reg_th_config = &config_val.non_pauseable_th;
- } else
- reg_th_config = &config_val.default_class1;
- /* The number of free blocks below which the pause signal to
- * class 1 of MAC #n is asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
- reg_th_config->pause_xoff);
-
- /* The number of free blocks above which the pause signal to
- * class 1 of MAC #n is de-asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
- reg_th_config->pause_xon);
- /* The number of free blocks below which the full signal to
- * class 1 of MAC #n is asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
- BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
- reg_th_config->full_xoff);
- /* The number of free blocks above which the full signal to
- * class 1 of MAC #n is de-asserted. n=0,1
- */
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
- BRB1_REG_FULL_1_XON_THRESHOLD_0,
- reg_th_config->full_xon);
-
- if (CHIP_IS_E3B0(bp)) {
- bnx2x_pfc_brb_get_e3b0_config_params(
- params,
- &e3b0_val,
- pfc_params,
- pfc_enabled);
-
- REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
- e3b0_val.per_class_guaranty_mode);
-
- /* The hysteresis on the guarantied buffer space for the Lb
- * port before signaling XON.
- */
- REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
- e3b0_val.lb_guarantied_hyst);
-
- /* The number of free blocks below which the full signal to the
- * LB port is asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
- e3b0_val.full_lb_xoff_th);
- /* The number of free blocks above which the full signal to the
- * LB port is de-asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
- e3b0_val.full_lb_xon_threshold);
- /* The number of blocks guarantied for the MAC #n port. n=0,1
- */
-
- /* The number of blocks guarantied for the LB port. */
- REG_WR(bp, BRB1_REG_LB_GUARANTIED,
- e3b0_val.lb_guarantied);
-
- /* The number of blocks guarantied for the MAC #n port. */
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
- 2 * e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
- 2 * e3b0_val.mac_1_class_t_guarantied);
- /* The number of blocks guarantied for class #t in MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- /* The hysteresis on the guarantied buffer space for class in
- * MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
-
- /* The number of blocks guarantied for class #t in MAC1.t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- /* The hysteresis on the guarantied buffer space for class #t
- * in MAC1. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
- }
-
- return bnx2x_status;
-}
-
/******************************************************************************
* Description:
* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
@@ -2529,16 +2087,6 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
port_mb[params->port].link_status), link_status);
}
-static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
-{
- struct bnx2x *bp = params->bp;
-
- if (bnx2x_eee_has_cap(params))
- REG_WR(bp, params->shmem2_base +
- offsetof(struct shmem2_region,
- eee_status[params->port]), eee_status);
-}
-
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2658,11 +2206,6 @@ int bnx2x_update_pfc(struct link_params *params,
/* Update NIG params */
bnx2x_update_pfc_nig(params, vars, pfc_params);
- /* Update BRB params */
- bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
- if (bnx2x_status)
- return bnx2x_status;
-
if (!vars->link_up)
return bnx2x_status;
@@ -2827,16 +2370,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
static int bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
- u8 is_lb)
+ u8 is_lb, u8 reset_bmac)
{
int rc = 0;
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
/* Reset and unreset the BigMac */
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- usleep_range(1000, 2000);
+ if (reset_bmac) {
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ usleep_range(1000, 2000);
+ }
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2868,37 +2413,28 @@ static int bnx2x_bmac_enable(struct link_params *params,
return rc;
}
-static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
+static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en)
{
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+ if (CHIP_IS_E2(bp))
+ bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL;
+ else
+ bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL;
/* Only if the bmac is out of reset */
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
nig_bmac_enable) {
-
- if (CHIP_IS_E2(bp)) {
- /* Clear Rx Enable bit in BMAC_CONTROL register */
- REG_RD_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
- wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
- } else {
- /* Clear Rx Enable bit in BMAC_CONTROL register */
- REG_RD_DMAE(bp, bmac_addr +
- BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ /* Clear Rx Enable bit in BMAC_CONTROL register */
+ REG_RD_DMAE(bp, bmac_addr, wb_data, 2);
+ if (en)
+ wb_data[0] |= BMAC_CONTROL_RX_ENABLE;
+ else
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
- }
+ REG_WR_DMAE(bp, bmac_addr, wb_data, 2);
usleep_range(1000, 2000);
}
}
@@ -3233,6 +2769,245 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
EMAC_MDIO_STATUS_10MB);
return rc;
}
+
+/******************************************************************/
+/* EEE section */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (REG_RD(bp, params->shmem2_base) <=
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+ switch (nvram_mode) {
+ case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+ *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+ *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+ *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+ break;
+ default:
+ *idle_timer = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+ switch (idle_timer) {
+ case EEE_MODE_NVRAM_BALANCED_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+ break;
+ case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+ break;
+ case EEE_MODE_NVRAM_LATENCY_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+ break;
+ default:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+ break;
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+ u32 eee_mode, eee_idle;
+ struct bnx2x *bp = params->bp;
+
+ if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* time value in eee_mode --> used directly*/
+ eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+ } else {
+ /* hsi value in eee_mode --> time */
+ if (bnx2x_eee_nvram_to_time(params->eee_mode &
+ EEE_MODE_NVRAM_MASK,
+ &eee_idle))
+ return 0;
+ }
+ } else {
+ /* hsi values in nvram --> time*/
+ eee_mode = ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+ if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+ return 0;
+ }
+
+ return eee_idle;
+}
+
+static int bnx2x_eee_set_timers(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 eee_idle = 0, eee_mode;
+ struct bnx2x *bp = params->bp;
+
+ eee_idle = bnx2x_eee_calc_timer(params);
+
+ if (eee_idle) {
+ REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+ eee_idle);
+ } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+ (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+ (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+ DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+ return -EINVAL;
+ }
+
+ vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* eee_idle in 1u --> eee_status in 16u */
+ eee_idle >>= 4;
+ vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+ SHMEM_EEE_TIME_OUTPUT_BIT;
+ } else {
+ if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+ return -EINVAL;
+ vars->eee_status |= eee_mode;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_initial_config(struct link_params *params,
+ struct link_vars *vars, u8 mode)
+{
+ vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT;
+
+ /* Propogate params' bits --> vars (for migration exposure) */
+ if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+ vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+ if (params->eee_mode & EEE_MODE_ADV_LPI)
+ vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+ return bnx2x_eee_set_timers(params, vars);
+}
+
+static int bnx2x_eee_disable(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+
+ /* Make Certain LPI is disabled */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+ return 0;
+}
+
+static int bnx2x_eee_advertise(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars, u8 modes)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val = 0;
+
+ /* Mask events preventing LPI generation */
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+ if (modes & SHMEM_EEE_10G_ADV) {
+ DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+ val |= 0x8;
+ }
+ if (modes & SHMEM_EEE_1G_ADV) {
+ DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n");
+ val |= 0x4;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+ vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+ return 0;
+}
+
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (bnx2x_eee_has_cap(params))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]), eee_status);
+}
+
+static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 adv = 0, lp = 0;
+ u32 lp_adv = 0;
+ u8 neg = 0;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp);
+
+ if (lp & 0x2) {
+ lp_adv |= SHMEM_EEE_100M_ADV;
+ if (adv & 0x2) {
+ if (vars->line_speed == SPEED_100)
+ neg = 1;
+ DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n");
+ }
+ }
+ if (lp & 0x14) {
+ lp_adv |= SHMEM_EEE_1G_ADV;
+ if (adv & 0x14) {
+ if (vars->line_speed == SPEED_1000)
+ neg = 1;
+ DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n");
+ }
+ }
+ if (lp & 0x68) {
+ lp_adv |= SHMEM_EEE_10G_ADV;
+ if (adv & 0x68) {
+ if (vars->line_speed == SPEED_10000)
+ neg = 1;
+ DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n");
+ }
+ }
+
+ vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+ vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ if (neg) {
+ DP(NETIF_MSG_LINK, "EEE is active\n");
+ vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+ }
+
+}
+
/******************************************************************/
/* BSC access functions from E3 */
/******************************************************************/
@@ -3754,6 +3529,19 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
+
+static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+}
+
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
@@ -4013,13 +3801,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
- /* Enable LPI pass through */
- DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_EEE_COMBO_CONTROL0,
- 0x7c);
- bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
+ bnx2x_warpcore_set_lpi_passthrough(phy, params);
/* 10G XFI Full Duplex */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4116,6 +3898,8 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+ bnx2x_warpcore_set_lpi_passthrough(phy, params);
+
if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
/* SGMII Autoneg */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4409,7 +4193,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
"serdes_net_if = 0x%x\n",
vars->line_speed, serdes_net_if);
bnx2x_set_aer_mmd(params, phy);
-
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
vars->phy_flags |= PHY_XGXS_FLAG;
if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
(phy->req_line_speed &&
@@ -4718,6 +4502,10 @@ void bnx2x_link_status_update(struct link_params *params,
vars->link_status = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[port].link_status));
+ if (bnx2x_eee_has_cap(params))
+ vars->eee_status = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]));
vars->phy_flags = PHY_XGXS_FLAG;
bnx2x_sync_link(params, vars);
@@ -6530,25 +6318,21 @@ static int bnx2x_update_link_down(struct link_params *params,
usleep_range(10000, 20000);
/* Reset BigMac/Xmac */
if (CHIP_IS_E1x(bp) ||
- CHIP_IS_E2(bp)) {
- bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- }
+ CHIP_IS_E2(bp))
+ bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
if (CHIP_IS_E3(bp)) {
/* Prevent LPI Generation by chip */
REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
0);
- REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
0);
vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
SHMEM_EEE_ACTIVE_BIT);
bnx2x_update_mng_eee(params, vars->eee_status);
- bnx2x_xmac_disable(params);
- bnx2x_umac_disable(params);
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
}
return 0;
@@ -6600,7 +6384,7 @@ static int bnx2x_update_link_up(struct link_params *params,
if ((CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) {
if (link_10g) {
- if (bnx2x_bmac_enable(params, vars, 0) ==
+ if (bnx2x_bmac_enable(params, vars, 0, 1) ==
-ESRCH) {
DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
vars->link_up = 0;
@@ -7207,6 +6991,22 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params,
msleep(500);
}
+static void bnx2x_8073_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ switch (action) {
+ case PHY_INIT:
+ /* Enable LASI */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
+ break;
+ }
+}
+
static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -7227,12 +7027,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* Enable LASI */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
-
+ bnx2x_8073_specific_func(phy, params, PHY_INIT);
bnx2x_8073_set_pause_cl37(params, phy, vars);
bnx2x_cl45_read(bp, phy,
@@ -8267,7 +8062,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
u32 action)
{
struct bnx2x *bp = params->bp;
-
+ u16 val;
switch (action) {
case DISABLE_TX:
bnx2x_sfp_set_transmitter(params, phy, 0);
@@ -8276,6 +8071,40 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
bnx2x_sfp_set_transmitter(params, phy, 1);
break;
+ case PHY_INIT:
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+ (1<<2) | (1<<5));
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006);
+ /* Make MOD_ABS give interrupt on change */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ &val);
+ val |= (1<<12);
+ if (phy->flags & FLAGS_NOC)
+ val |= (3<<5);
+ /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+ * status which reflect SFP+ module over-current
+ */
+ if (!(phy->flags & FLAGS_NOC))
+ val &= 0xff8f; /* Reset bits 4-6 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+ val);
+
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ 0xa001);
+ break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
action);
@@ -9058,28 +8887,15 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
u32 tx_en_mode;
- u16 tmp1, val, mod_abs, tmp2;
- u16 rx_alarm_ctrl_val;
- u16 lasi_ctrl_val;
+ u16 tmp1, mod_abs, tmp2;
struct bnx2x *bp = params->bp;
/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
bnx2x_wait_reset_complete(bp, phy, params);
- rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
- /* Should be 0x6 to enable XS on Tx side. */
- lasi_ctrl_val = 0x0006;
DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
- /* Enable LASI */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
- rx_alarm_ctrl_val);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
- 0);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
+ bnx2x_8727_specific_func(phy, params, PHY_INIT);
/* Initially configure MOD_ABS to interrupt when module is
* presence( bit 8)
*/
@@ -9095,25 +8911,9 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
-
/* Enable/Disable PHY transmitter output */
bnx2x_set_disable_pmd_transmit(params, phy, 0);
- /* Make MOD_ABS give interrupt on change */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
- &val);
- val |= (1<<12);
- if (phy->flags & FLAGS_NOC)
- val |= (3<<5);
-
- /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
- * status which reflect SFP+ module over-current
- */
- if (!(phy->flags & FLAGS_NOC))
- val &= 0xff8f; /* Reset bits 4-6 */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
-
bnx2x_8727_power_module(bp, phy, 1);
bnx2x_cl45_read(bp, phy,
@@ -9123,13 +8923,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
bnx2x_8727_config_speed(phy, params);
- /* Set 2-wire transfer rate of SFP+ module EEPROM
- * to 100Khz since some DACs(direct attached cables) do
- * not work at 400Khz.
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa001);
+
/* Set TX PreEmphasis if needed */
if ((params->feature_config_flags &
@@ -9558,6 +9352,29 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
0xFFFB, 0xFFFD);
}
+static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ switch (action) {
+ case PHY_INIT:
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+ }
+ /* This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
+ bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+ bnx2x_848xx_set_led(bp, phy);
+ break;
+ }
+}
+
static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -9565,22 +9382,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
- if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
- /* Save spirom version */
- bnx2x_save_848xx_spirom_version(phy, bp, params->port);
- }
- /* This phy uses the NIG latch mechanism since link indication
- * arrives through its LED4 and not via its LASI signal, so we
- * get steady signal instead of clear on read
- */
- bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
- 1 << NIG_LATCH_BC_ENABLE_MI_INT);
-
+ bnx2x_848xx_specific_func(phy, params, PHY_INIT);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
- bnx2x_848xx_set_led(bp, phy);
-
/* set 1000 speed advertisement */
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
@@ -9887,39 +9692,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_8483x_eee_timers(struct link_params *params,
- struct link_vars *vars)
-{
- u32 eee_idle = 0, eee_mode;
- struct bnx2x *bp = params->bp;
-
- eee_idle = bnx2x_eee_calc_timer(params);
-
- if (eee_idle) {
- REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
- eee_idle);
- } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
- (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
- (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
- DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
- return -EINVAL;
- }
-
- vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
- if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
- /* eee_idle in 1u --> eee_status in 16u */
- eee_idle >>= 4;
- vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
- SHMEM_EEE_TIME_OUTPUT_BIT;
- } else {
- if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
- return -EINVAL;
- vars->eee_status |= eee_mode;
- }
-
- return 0;
-}
-
static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -9930,10 +9702,6 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
- /* Make Certain LPI is disabled */
- REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
- REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
-
/* Prevent Phy from working in EEE and advertising it */
rc = bnx2x_84833_cmd_hdlr(phy, params,
PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
@@ -9942,10 +9710,7 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
return rc;
}
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
- vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
-
- return 0;
+ return bnx2x_eee_disable(phy, params, vars);
}
static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
@@ -9956,8 +9721,6 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 cmd_args = 1;
- DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
-
rc = bnx2x_84833_cmd_hdlr(phy, params,
PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc) {
@@ -9965,15 +9728,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
return rc;
}
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
-
- /* Mask events preventing LPI generation */
- REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
-
- vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
- vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
-
- return 0;
+ return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV);
}
#define PHY84833_CONSTANT_LATENCY 1193
@@ -10105,22 +9860,10 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_84833_TOP_CFG_FW_REV, &val);
/* Configure EEE support */
- if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
- phy->flags |= FLAGS_EEE_10GBT;
- vars->eee_status |= SHMEM_EEE_10G_ADV <<
- SHMEM_EEE_SUPPORTED_SHIFT;
- /* Propogate params' bits --> vars (for migration exposure) */
- if (params->eee_mode & EEE_MODE_ENABLE_LPI)
- vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
- else
- vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
-
- if (params->eee_mode & EEE_MODE_ADV_LPI)
- vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
- else
- vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
-
- rc = bnx2x_8483x_eee_timers(params, vars);
+ if ((val >= MDIO_84833_TOP_CFG_FW_EEE) &&
+ (val != MDIO_84833_TOP_CFG_FW_NO_EEE) &&
+ bnx2x_eee_has_cap(params)) {
+ rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
if (rc) {
DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
bnx2x_8483x_disable_eee(phy, params, vars);
@@ -10139,7 +9882,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
return rc;
}
} else {
- phy->flags &= ~FLAGS_EEE_10GBT;
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
}
@@ -10278,29 +10020,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
- u32 eee_shmem = 0;
-
- bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
- MDIO_AN_REG_EEE_ADV, &val1);
- bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_EEE_ADV, &val2);
- if ((val1 & val2) & 0x8) {
- DP(NETIF_MSG_LINK, "EEE negotiated\n");
- vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
- }
-
- if (val2 & 0x12)
- eee_shmem |= SHMEM_EEE_100M_ADV;
- if (val2 & 0x4)
- eee_shmem |= SHMEM_EEE_1G_ADV;
- if (val2 & 0x68)
- eee_shmem |= SHMEM_EEE_10G_ADV;
-
- vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
- vars->eee_status |= (eee_shmem <<
- SHMEM_EEE_LP_ADV_STATUS_SHIFT);
- }
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ bnx2x_eee_an_resolve(phy, params, vars);
}
return link_up;
@@ -10569,6 +10290,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/******************************************************************/
/* 54618SE PHY SECTION */
/******************************************************************/
+static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u32 action)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+ switch (action) {
+ case PHY_INIT:
+ /* Configure LED4: set to INTR (0x6). */
+ /* Accessing shadow register 0xe. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL2);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= ~(0xf << 4);
+ temp |= (0x6 << 4);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ /* Configure INTR based on link status change. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_INTR_MASK,
+ ~MDIO_REG_INTR_MASK_LINK_STATUS);
+ break;
+ }
+}
+
static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -10606,24 +10356,8 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
/* Wait for GPHY to reset */
msleep(50);
- /* Configure LED4: set to INTR (0x6). */
- /* Accessing shadow register 0xe. */
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_SHADOW,
- MDIO_REG_GPHY_SHADOW_LED_SEL2);
- bnx2x_cl22_read(bp, phy,
- MDIO_REG_GPHY_SHADOW,
- &temp);
- temp &= ~(0xf << 4);
- temp |= (0x6 << 4);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_SHADOW,
- MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
- /* Configure INTR based on link status change. */
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_INTR_MASK,
- ~MDIO_REG_INTR_MASK_LINK_STATUS);
+ bnx2x_54618se_specific_func(phy, params, PHY_INIT);
/* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
bnx2x_cl22_write(bp, phy,
MDIO_REG_GPHY_SHADOW,
@@ -10728,28 +10462,52 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Setting 10M force\n");
}
- /* Check if we should turn on Auto-GrEEEn */
- bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
- if (temp == MDIO_REG_GPHY_ID_54618SE) {
- if (params->feature_config_flags &
- FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
- temp = 6;
- DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+ if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) {
+ int rc;
+
+ bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS,
+ MDIO_REG_GPHY_EXP_ACCESS_TOP |
+ MDIO_REG_GPHY_EXP_TOP_2K_BUF);
+ bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp);
+ temp &= 0xfffe;
+ bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp);
+
+ rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+ bnx2x_eee_disable(phy, params, vars);
+ } else if ((params->eee_mode & EEE_MODE_ADV_LPI) &&
+ (phy->req_duplex == DUPLEX_FULL) &&
+ (bnx2x_eee_calc_timer(params) ||
+ !(params->eee_mode & EEE_MODE_ENABLE_LPI))) {
+ /* Need to advertise EEE only when requested,
+ * and either no LPI assertion was requested,
+ * or it was requested and a valid timer was set.
+ * Also notice full duplex is required for EEE.
+ */
+ bnx2x_eee_advertise(phy, params, vars,
+ SHMEM_EEE_1G_ADV);
} else {
- temp = 0;
- DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
+ DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n");
+ bnx2x_eee_disable(phy, params, vars);
+ }
+ } else {
+ vars->eee_status &= ~SHMEM_EEE_1G_ADV <<
+ SHMEM_EEE_SUPPORTED_SHIFT;
+
+ if (phy->flags & FLAGS_EEE) {
+ /* Handle legacy auto-grEEEn */
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ temp = 6;
+ DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+ } else {
+ temp = 0;
+ DP(NETIF_MSG_LINK, "Don't Adv. EEE\n");
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_EEE_ADV, temp);
}
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_DATA_REG,
- MDIO_REG_GPHY_EEE_ADV);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_ADDR_REG,
- (0x1 << 14) | MDIO_AN_DEVAD);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_DATA_REG,
- temp);
}
bnx2x_cl22_write(bp, phy,
@@ -10896,29 +10654,6 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
vars->line_speed);
- /* Report whether EEE is resolved. */
- bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
- if (val == MDIO_REG_GPHY_ID_54618SE) {
- if (vars->link_status &
- LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
- val = 0;
- else {
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_ADDR_REG,
- MDIO_AN_DEVAD);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_DATA_REG,
- MDIO_REG_GPHY_EEE_RESOLVED);
- bnx2x_cl22_write(bp, phy,
- MDIO_REG_GPHY_CL45_ADDR_REG,
- (0x1 << 14) | MDIO_AN_DEVAD);
- bnx2x_cl22_read(bp, phy,
- MDIO_REG_GPHY_CL45_DATA_REG,
- &val);
- }
- DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
- }
-
bnx2x_ext_phy_resolve_fc(phy, params, vars);
if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
@@ -10948,6 +10683,10 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
if (val & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+ if ((phy->flags & FLAGS_EEE) &&
+ bnx2x_eee_has_cap(params))
+ bnx2x_eee_an_resolve(phy, params, vars);
}
}
return link_up;
@@ -11353,7 +11092,7 @@ static struct bnx2x_phy phy_8073 = {
.format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
.hw_reset = (hw_reset_t)NULL,
.set_link_led = (set_link_led_t)NULL,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
};
static struct bnx2x_phy phy_8705 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
@@ -11546,7 +11285,7 @@ static struct bnx2x_phy phy_84823 = {
.format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
.hw_reset = (hw_reset_t)NULL,
.set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
};
static struct bnx2x_phy phy_84833 = {
@@ -11555,8 +11294,7 @@ static struct bnx2x_phy phy_84833 = {
.def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL |
- FLAGS_TX_ERROR_CHECK |
- FLAGS_EEE_10GBT),
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11582,7 +11320,7 @@ static struct bnx2x_phy phy_84833 = {
.format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
.hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
.set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
};
static struct bnx2x_phy phy_54618se = {
@@ -11616,7 +11354,7 @@ static struct bnx2x_phy phy_54618se = {
.format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL,
.set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
- .phy_specific_func = (phy_specific_func_t)NULL
+ .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func
};
/*****************************************************************/
/* */
@@ -11862,6 +11600,8 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
*phy = phy_54618se;
+ if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+ phy->flags |= FLAGS_EEE;
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
*phy = phy_7101;
@@ -12141,7 +11881,7 @@ void bnx2x_init_bmac_loopback(struct link_params *params,
bnx2x_xgxs_deassert(params);
/* set bmac loopback */
- bnx2x_bmac_enable(params, vars, 1);
+ bnx2x_bmac_enable(params, vars, 1, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
}
@@ -12233,7 +11973,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
if (USES_WARPCORE(bp))
bnx2x_xmac_enable(params, vars, 0);
else
- bnx2x_bmac_enable(params, vars, 0);
+ bnx2x_bmac_enable(params, vars, 0, 1);
}
if (params->loopback_mode == LOOPBACK_XGXS) {
@@ -12258,8 +11998,161 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
}
+static void bnx2x_set_rx_filter(struct link_params *params, u8 en)
+{
+ struct bnx2x *bp = params->bp;
+ u8 val = en * 0x1F;
+
+ /* Open the gate between the NIG to the BRB */
+ if (!CHIP_IS_E1x(bp))
+ val |= en * 0x20;
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
+
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4,
+ en*0x3);
+ }
+
+ REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), en);
+}
+static int bnx2x_avoid_link_flap(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 phy_idx;
+ u32 dont_clear_stat, lfa_sts;
+ struct bnx2x *bp = params->bp;
+
+ /* Sync the link parameters */
+ bnx2x_link_status_update(params, vars);
+
+ /*
+ * The module verification was already done by previous link owner,
+ * so this call is meant only to get warning message
+ */
+
+ for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) {
+ struct bnx2x_phy *phy = &params->phy[phy_idx];
+ if (phy->phy_specific_func) {
+ DP(NETIF_MSG_LINK, "Calling PHY specific func\n");
+ phy->phy_specific_func(phy, params, PHY_INIT);
+ }
+ if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) ||
+ (phy->media_type == ETH_PHY_SFP_1G_FIBER) ||
+ (phy->media_type == ETH_PHY_DA_TWINAX))
+ bnx2x_verify_sfp_module(phy, params);
+ }
+ lfa_sts = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ lfa_sts));
+
+ dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT;
+
+ /* Re-enable the NIG/MAC */
+ if (CHIP_IS_E3(bp)) {
+ if (!dont_clear_stat) {
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+ params->port));
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_MSTAT0 <<
+ params->port));
+ }
+ if (vars->line_speed < SPEED_10000)
+ bnx2x_umac_enable(params, vars, 0);
+ else
+ bnx2x_xmac_enable(params, vars, 0);
+ } else {
+ if (vars->line_speed < SPEED_10000)
+ bnx2x_emac_enable(params, vars, 0);
+ else
+ bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat);
+ }
+
+ /* Increment LFA count */
+ lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) |
+ (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >>
+ LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff)
+ << LINK_FLAP_AVOIDANCE_COUNT_OFFSET));
+ /* Clear link flap reason */
+ lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+
+ /* Disable NIG DRAIN */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+ /* Enable interrupts */
+ bnx2x_link_int_enable(params);
+ return 0;
+}
+
+static void bnx2x_cannot_avoid_link_flap(struct link_params *params,
+ struct link_vars *vars,
+ int lfa_status)
+{
+ u32 lfa_sts, cfg_idx, tmp_val;
+ struct bnx2x *bp = params->bp;
+
+ bnx2x_link_reset(params, vars, 1);
+
+ if (!params->lfa_base)
+ return;
+ /* Store the new link parameters */
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_duplex),
+ params->req_duplex[0] | (params->req_duplex[1] << 16));
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_flow_ctrl),
+ params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16));
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, req_line_speed),
+ params->req_line_speed[0] | (params->req_line_speed[1] << 16));
+
+ for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) {
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa,
+ speed_cap_mask[cfg_idx]),
+ params->speed_cap_mask[cfg_idx]);
+ }
+
+ tmp_val = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config));
+ tmp_val &= ~REQ_FC_AUTO_ADV_MASK;
+ tmp_val |= params->req_fc_auto_adv;
+
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, additional_config), tmp_val);
+
+ lfa_sts = REG_RD(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts));
+
+ /* Clear the "Don't Clear Statistics" bit, and set reason */
+ lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT;
+
+ /* Set link flap reason */
+ lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK;
+ lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) <<
+ LFA_LINK_FLAP_REASON_OFFSET);
+
+ /* Increment link flap counter */
+ lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) |
+ (((((lfa_sts & LINK_FLAP_COUNT_MASK) >>
+ LINK_FLAP_COUNT_OFFSET) + 1) & 0xff)
+ << LINK_FLAP_COUNT_OFFSET));
+ REG_WR(bp, params->lfa_base +
+ offsetof(struct shmem_lfa, lfa_sts), lfa_sts);
+ /* Proceed with regular link initialization */
+}
+
int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
{
+ int lfa_status;
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Phy Initialization started\n");
DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
@@ -12274,6 +12167,19 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
+ /* Driver opens NIG-BRB filters */
+ bnx2x_set_rx_filter(params, 1);
+ /* Check if link flap can be avoided */
+ lfa_status = bnx2x_check_lfa(params);
+
+ if (lfa_status == 0) {
+ DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n");
+ return bnx2x_avoid_link_flap(params, vars);
+ }
+
+ DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n",
+ lfa_status);
+ bnx2x_cannot_avoid_link_flap(params, vars, lfa_status);
/* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -12356,13 +12262,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
}
- /* Stop BigMac rx */
- if (!CHIP_IS_E3(bp))
- bnx2x_bmac_rx_disable(bp, port);
- else {
- bnx2x_xmac_disable(params);
- bnx2x_umac_disable(params);
- }
+ if (!CHIP_IS_E3(bp)) {
+ bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+ } else {
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
/* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -12420,6 +12325,56 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
vars->phy_flags = 0;
return 0;
}
+int bnx2x_lfa_reset(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 0;
+ vars->phy_flags = 0;
+ if (!params->lfa_base)
+ return bnx2x_link_reset(params, vars, 1);
+ /*
+ * Activate NIG drain so that during this time the device won't send
+ * anything while it is unable to response.
+ */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+
+ /*
+ * Close gracefully the gate from BMAC to NIG such that no half packets
+ * are passed.
+ */
+ if (!CHIP_IS_E3(bp))
+ bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0);
+
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_set_xmac_rxtx(params, 0);
+ bnx2x_set_umac_rxtx(params, 0);
+ }
+ /* Wait 10ms for the pipe to clean up*/
+ usleep_range(10000, 20000);
+
+ /* Clean the NIG-BRB using the network filters in a way that will
+ * not cut a packet in the middle.
+ */
+ bnx2x_set_rx_filter(params, 0);
+
+ /*
+ * Re-open the gate between the BMAC and the NIG, after verifying the
+ * gate to the BRB is closed, otherwise packets may arrive to the
+ * firmware before driver had initialized it. The target is to achieve
+ * minimum management protocol down time.
+ */
+ if (!CHIP_IS_E3(bp))
+ bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1);
+
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_set_xmac_rxtx(params, 1);
+ bnx2x_set_umac_rxtx(params, 1);
+ }
+ /* Disable NIG drain */
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+ return 0;
+}
/****************************************************************************/
/* Common function */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 51cac8130051..9165b89a4b19 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -155,7 +155,7 @@ struct bnx2x_phy {
#define FLAGS_DUMMY_READ (1<<9)
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
-#define FLAGS_EEE_10GBT (1<<13)
+#define FLAGS_EEE (1<<13)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -216,6 +216,7 @@ struct bnx2x_phy {
phy_specific_func_t phy_specific_func;
#define DISABLE_TX 1
#define ENABLE_TX 2
+#define PHY_INIT 3
};
/* Inputs parameters to the CLC */
@@ -304,6 +305,8 @@ struct link_params {
struct bnx2x *bp;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
+ u16 rsrv1;
+ u32 lfa_base;
};
/* Output parameters */
@@ -356,7 +359,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
to 0 */
int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy);
-
+int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars);
/* bnx2x_link_update should be called upon link interrupt */
int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0875ecfe3372..f7ed122f4071 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1162,14 +1162,9 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
{
- int pos;
u16 status;
- pos = pci_pcie_cap(dev);
- if (!pos)
- return false;
-
- pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
return status & PCI_EXP_DEVSTA_TRPND;
}
@@ -2171,7 +2166,6 @@ void bnx2x_link_set(struct bnx2x *bp)
{
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
- bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
@@ -2184,12 +2178,19 @@ static void bnx2x__link_reset(struct bnx2x *bp)
{
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
- bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
} else
BNX2X_ERR("Bootcode is missing - can not reset link\n");
}
+void bnx2x_force_link_reset(struct bnx2x *bp)
+{
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+ bnx2x_release_phy_lock(bp);
+}
+
u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
{
u8 rc = 0;
@@ -6135,8 +6136,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
u16 devctl;
int r_order, w_order;
- pci_read_config_word(bp->pdev,
- pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
+ pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
if (bp->mrrs == -1)
@@ -6757,7 +6757,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
u32 low, high;
u32 val;
- bnx2x__link_reset(bp);
DP(NETIF_MSG_HW, "starting port init port %d\n", port);
@@ -8250,12 +8249,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
* bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
*
* @bp: driver handle
+ * @keep_link: true iff link should be kept up
*/
-void bnx2x_send_unload_done(struct bnx2x *bp)
+void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
{
+ u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
+
/* Report UNLOAD_DONE to MCP */
if (!BP_NOMCP(bp))
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
}
static int bnx2x_func_wait_started(struct bnx2x *bp)
@@ -8324,7 +8326,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
return 0;
}
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
{
int port = BP_PORT(bp);
int i, rc = 0;
@@ -8446,7 +8448,7 @@ unload_error:
/* Report UNLOAD_DONE to MCP */
- bnx2x_send_unload_done(bp);
+ bnx2x_send_unload_done(bp, keep_link);
}
void bnx2x_disable_close_the_gate(struct bnx2x *bp)
@@ -8858,7 +8860,8 @@ int bnx2x_leader_reset(struct bnx2x *bp)
* driver is owner of the HW
*/
if (!global && !BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+ DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
rc = -EAGAIN;
@@ -8964,7 +8967,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
/* Stop the driver */
/* If interface has been removed - break */
- if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
return;
bp->recovery_state = BNX2X_RECOVERY_WAIT;
@@ -9130,7 +9133,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bp->sp_rtnl_state = 0;
smp_mb();
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_nic_load(bp, LOAD_NORMAL);
goto sp_rtnl_exit;
@@ -9316,7 +9319,8 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
{
- u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
+ DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
if (!rc) {
BNX2X_ERR("MCP response failure, aborting\n");
return -EBUSY;
@@ -9380,7 +9384,7 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
static int __devinit bnx2x_do_flr(struct bnx2x *bp)
{
- int i, pos;
+ int i;
u16 status;
struct pci_dev *dev = bp->pdev;
@@ -9397,16 +9401,12 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp)
return -EINVAL;
}
- pos = pci_pcie_cap(dev);
- if (!pos)
- return -ENOTTY;
-
/* Wait for Transaction Pending bit clean */
for (i = 0; i < 4; i++) {
if (i)
msleep((1 << (i - 1)) * 100);
- pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
if (!(status & PCI_EXP_DEVSTA_TRPND))
goto clear;
}
@@ -11010,7 +11010,7 @@ static int bnx2x_close(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
/* Unload the driver, release IRQs */
- bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
/* Power off */
bnx2x_set_power_state(bp, PCI_D3hot);
@@ -12167,7 +12167,7 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
rtnl_unlock();
}
-static struct pci_error_handlers bnx2x_err_handler = {
+static const struct pci_error_handlers bnx2x_err_handler = {
.error_detected = bnx2x_io_error_detected,
.slot_reset = bnx2x_io_slot_reset,
.resume = bnx2x_io_resume,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 28a0bcfe61ff..1b1999d34c71 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -4949,6 +4949,10 @@
#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
#define UMAC_REG_COMMAND_CONFIG 0x8
+/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE
+ * state from LPI state when it receives packet for transmission. The
+ * decrement unit is 1 micro-second. */
+#define UMAC_REG_EEE_WAKE_TIMER 0x6c
/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
* to bit 17 of the MAC address etc. */
#define UMAC_REG_MAC_ADDR0 0xc
@@ -4958,6 +4962,8 @@
/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
* logic to check frames. */
#define UMAC_REG_MAXFR 0x14
+#define UMAC_REG_UMAC_EEE_CTRL 0x64
+#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1<<3)
/* [RW 8] The event id for aggregated interrupt 0 */
#define USDM_REG_AGG_INT_EVENT_0 0xc4038
#define USDM_REG_AGG_INT_EVENT_1 0xc403c
@@ -6992,6 +6998,7 @@ Theotherbitsarereservedandshouldbezero*/
/* BCM84833 only */
#define MDIO_84833_TOP_CFG_FW_REV 0x400f
#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
+#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
/* These are mailbox register set used by 84833. */
@@ -7160,10 +7167,11 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
-#define MDIO_REG_GPHY_EEE_ADV 0x3c
-#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
-#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15
+#define MDIO_REG_GPHY_EXP_ACCESS 0x17
+#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00
+#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40
#define MDIO_REG_GPHY_AUX_STATUS 0x19
#define MDIO_REG_INTR_STATUS 0x1a
#define MDIO_REG_INTR_MASK 0x1b
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 62f754bd0dfe..71971a161bd1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -229,8 +229,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
*/
list_add_tail(&spacer.link, &o->pending_comp);
mb();
- list_del(&elem->link);
- list_add_tail(&elem->link, &o->pending_comp);
+ list_move_tail(&elem->link, &o->pending_comp);
list_del(&spacer.link);
} else
break;
@@ -5620,7 +5619,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = cpu_to_le16(start_params->mf_mode);
+ rdata->function_mode = (u8)start_params->mf_mode;
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index f83e033da6da..acf2fe4ca608 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1321,7 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* the current command will be enqueued to the tail of the
* pending commands list.
*
- * Return: 0 is operation was sucessfull and there are no pending completions,
+ * Return: 0 is operation was successfull and there are no pending completions,
* negative if there were errors, positive if there are pending
* completions.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index a1d0446b39b3..348ed02d3c69 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref)
#endif
}
-static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
{
- u16 res = sizeof(struct host_port_stats) >> 2;
+ u16 res = 0;
- /* if PFC stats are not supported by the MFW, don't DMA them */
- if (!(bp->flags & BC_SUPPORTS_PFC_STATS))
- res -= (sizeof(u32)*4) >> 2;
+ /* 'newest' convention - shmem2 cotains the size of the port stats */
+ if (SHMEM2_HAS(bp, sizeof_port_stats)) {
+ u32 size = SHMEM2_RD(bp, sizeof_port_stats);
+ if (size)
+ res = size;
+ /* prevent newer BC from causing buffer overflow */
+ if (res > sizeof(struct host_port_stats))
+ res = sizeof(struct host_port_stats);
+ }
+
+ /* Older convention - all BCs support the port stats' fields up until
+ * the 'not_used' field
+ */
+ if (!res) {
+ res = offsetof(struct host_port_stats, not_used) + 4;
+
+ /* if PFC stats are supported by the MFW, DMA them as well */
+ if (bp->flags & BC_SUPPORTS_PFC_STATS) {
+ res += offsetof(struct host_port_stats,
+ pfc_frames_rx_lo) -
+ offsetof(struct host_port_stats,
+ pfc_frames_tx_hi) + 4 ;
+ }
+ }
+
+ res >>= 2;
+
+ WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
return res;
}
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 3b4fc61f24cf..cc8434fd606e 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev)
}
}
-static void __cnic_free_uio(struct cnic_uio_dev *udev)
+static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
{
- uio_unregister_device(&udev->cnic_uinfo);
-
if (udev->l2_buf) {
dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
udev->l2_buf, udev->l2_buf_map);
@@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev)
udev->l2_ring = NULL;
}
+}
+
+static void __cnic_free_uio(struct cnic_uio_dev *udev)
+{
+ uio_unregister_device(&udev->cnic_uinfo);
+
+ __cnic_free_uio_rings(udev);
+
pci_dev_put(udev->pdev);
kfree(udev);
}
@@ -862,6 +868,8 @@ static void cnic_free_resc(struct cnic_dev *dev)
if (udev) {
udev->dev = NULL;
cp->udev = NULL;
+ if (udev->uio_dev == -1)
+ __cnic_free_uio_rings(udev);
}
cnic_free_context(dev);
@@ -996,6 +1004,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
return 0;
}
+static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
+{
+ struct cnic_local *cp = udev->dev->cnic_priv;
+
+ if (udev->l2_ring)
+ return 0;
+
+ udev->l2_ring_size = pages * BCM_PAGE_SIZE;
+ udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
+ &udev->l2_ring_map,
+ GFP_KERNEL | __GFP_COMP);
+ if (!udev->l2_ring)
+ return -ENOMEM;
+
+ udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+ udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+ udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
+ &udev->l2_buf_map,
+ GFP_KERNEL | __GFP_COMP);
+ if (!udev->l2_buf) {
+ __cnic_free_uio_rings(udev);
+ return -ENOMEM;
+ }
+
+ return 0;
+
+}
+
static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -1005,6 +1041,11 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
list_for_each_entry(udev, &cnic_udev_list, list) {
if (udev->pdev == dev->pcidev) {
udev->dev = dev;
+ if (__cnic_alloc_uio_rings(udev, pages)) {
+ udev->dev = NULL;
+ read_unlock(&cnic_dev_lock);
+ return -ENOMEM;
+ }
cp->udev = udev;
read_unlock(&cnic_dev_lock);
return 0;
@@ -1020,20 +1061,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
udev->dev = dev;
udev->pdev = dev->pcidev;
- udev->l2_ring_size = pages * BCM_PAGE_SIZE;
- udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
- &udev->l2_ring_map,
- GFP_KERNEL | __GFP_COMP);
- if (!udev->l2_ring)
- goto err_udev;
- udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
- udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
- udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
- &udev->l2_buf_map,
- GFP_KERNEL | __GFP_COMP);
- if (!udev->l2_buf)
- goto err_dma;
+ if (__cnic_alloc_uio_rings(udev, pages))
+ goto err_udev;
write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list);
@@ -1044,9 +1074,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
cp->udev = udev;
return 0;
- err_dma:
- dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
- udev->l2_ring, udev->l2_ring_map);
+
err_udev:
kfree(udev);
return -ENOMEM;
@@ -1260,7 +1288,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (CNIC_SUPPORTS_FCOE(cp)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
if (ret)
goto error;
@@ -1275,6 +1303,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
+ if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+ return 0;
+
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
cp->l2_rx_ring_size = 15;
@@ -3050,6 +3081,22 @@ static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
IGU_INT_DISABLE, 0);
}
+static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
+ IGU_INT_ENABLE, 1);
+}
+
+static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+
+ cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
+ IGU_INT_ENABLE, 1);
+}
+
static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
{
u32 last_status = *info->status_idx_ptr;
@@ -3086,9 +3133,8 @@ static void cnic_service_bnx2x_bh(unsigned long data)
CNIC_WR16(dev, cp->kcq1.io_addr,
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
- status_idx, IGU_INT_ENABLE, 1);
+ if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
+ cp->arm_int(dev, status_idx);
break;
}
@@ -4845,6 +4891,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
struct eth_tx_start_bd *start_bd = &txbd->start_bd;
+ struct eth_tx_parse_bd_e1x *pbd_e1x =
+ &((txbd + 1)->parse_bd_e1x);
+ struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
@@ -4854,10 +4903,15 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
start_bd->nbytes = cpu_to_le16(0x10);
start_bd->nbd = cpu_to_le16(3);
start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- start_bd->general_data = (UNICAST_ADDRESS <<
- ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+ start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ pbd_e2->parsing_data = (UNICAST_ADDRESS <<
+ ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+ else
+ pbd_e1x->global_data = (UNICAST_ADDRESS <<
+ ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
}
val = (u64) ring_map >> 32;
@@ -5308,7 +5362,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
/* Need to wait for the ring shutdown event to complete
* before clearing the CNIC_UP flag.
*/
- while (cp->udev->uio_dev != -1 && i < 15) {
+ while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
msleep(100);
i++;
}
@@ -5473,8 +5527,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
- !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+ if (CNIC_SUPPORTS_FCOE(cp))
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
@@ -5492,10 +5545,13 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
cp->ack_int = cnic_ack_bnx2x_e2_msix;
- else
+ cp->arm_int = cnic_arm_bnx2x_e2_msix;
+ } else {
cp->ack_int = cnic_ack_bnx2x_msix;
+ cp->arm_int = cnic_arm_bnx2x_msix;
+ }
cp->close_conn = cnic_close_bnx2x_conn;
return cdev;
}
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 30328097f516..148604c3fa0c 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -334,6 +334,7 @@ struct cnic_local {
void (*enable_int)(struct cnic_dev *);
void (*disable_int_sync)(struct cnic_dev *);
void (*ack_int)(struct cnic_dev *);
+ void (*arm_int)(struct cnic_dev *, u32 index);
void (*close_conn)(struct cnic_sock *, u32 opcode);
};
@@ -474,6 +475,10 @@ struct bnx2x_bd_chain_next {
MAX_STAT_COUNTER_ID_E1))
#endif
+#define CNIC_SUPPORTS_FCOE(cp) \
+ (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \
+ !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+
#define CNIC_RAMROD_TMO (HZ / 4)
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 382c98b0cc0c..ede3db35d757 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -896,7 +896,7 @@ struct tstorm_tcp_tcp_ag_context_section {
u32 snd_nxt;
u32 rtt_seq;
u32 rtt_time;
- u32 __reserved66;
+ u32 wnd_right_edge_local;
u32 wnd_right_edge;
u32 tcp_agg_vars1;
#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 5cb88881bba1..865095aad1f6 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.12"
-#define CNIC_MODULE_RELDATE "June 29, 2012"
+#define CNIC_MODULE_VERSION "2.5.14"
+#define CNIC_MODULE_RELDATE "Sep 30, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bf906c51d82a..46280ba4c5d4 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,10 +44,8 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
-#if IS_ENABLED(CONFIG_HWMON)
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#endif
#include <net/checksum.h>
#include <net/ip.h>
@@ -92,10 +90,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 124
+#define TG3_MIN_NUM 125
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "March 21, 2012"
+#define DRV_MODULE_RELDATE "September 26, 2012"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -3653,17 +3651,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_enable_register_access(tp);
/* Restore the CLKREQ setting. */
- if (tg3_flag(tp, CLKREQ_BUG)) {
- u16 lnkctl;
-
- pci_read_config_word(tp->pdev,
- pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
- &lnkctl);
- lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(tp->pdev,
- pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
- lnkctl);
- }
+ if (tg3_flag(tp, CLKREQ_BUG))
+ pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
tw32(TG3PCI_MISC_HOST_CTRL,
@@ -4434,20 +4424,13 @@ relink:
/* Prevent send BD corruption. */
if (tg3_flag(tp, CLKREQ_BUG)) {
- u16 oldlnkctl, newlnkctl;
-
- pci_read_config_word(tp->pdev,
- pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
- &oldlnkctl);
if (tp->link_config.active_speed == SPEED_100 ||
tp->link_config.active_speed == SPEED_10)
- newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
else
- newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
- if (newlnkctl != oldlnkctl)
- pci_write_config_word(tp->pdev,
- pci_pcie_cap(tp->pdev) +
- PCI_EXP_LNKCTL, newlnkctl);
+ pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
}
if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -6278,7 +6261,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
tp->rx_refill = false;
- for (i = 1; i < tp->irq_cnt; i++)
+ for (i = 1; i <= tp->rxq_cnt; i++)
err |= tg3_rx_prodring_xfer(tp, dpr,
&tp->napi[i].prodring);
@@ -7607,15 +7590,11 @@ static int tg3_init_rings(struct tg3 *tp)
return 0;
}
-/*
- * Must not be invoked with interrupt sources disabled and
- * the hardware shutdown down.
- */
-static void tg3_free_consistent(struct tg3 *tp)
+static void tg3_mem_tx_release(struct tg3 *tp)
{
int i;
- for (i = 0; i < tp->irq_cnt; i++) {
+ for (i = 0; i < tp->irq_max; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
if (tnapi->tx_ring) {
@@ -7626,17 +7605,114 @@ static void tg3_free_consistent(struct tg3 *tp)
kfree(tnapi->tx_buffers);
tnapi->tx_buffers = NULL;
+ }
+}
- if (tnapi->rx_rcb) {
- dma_free_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- tnapi->rx_rcb,
- tnapi->rx_rcb_mapping);
- tnapi->rx_rcb = NULL;
- }
+static int tg3_mem_tx_acquire(struct tg3 *tp)
+{
+ int i;
+ struct tg3_napi *tnapi = &tp->napi[0];
+
+ /* If multivector TSS is enabled, vector 0 does not handle
+ * tx interrupts. Don't allocate any resources for it.
+ */
+ if (tg3_flag(tp, ENABLE_TSS))
+ tnapi++;
+
+ for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
+ tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
+ TG3_TX_RING_SIZE, GFP_KERNEL);
+ if (!tnapi->tx_buffers)
+ goto err_out;
+
+ tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_TX_RING_BYTES,
+ &tnapi->tx_desc_mapping,
+ GFP_KERNEL);
+ if (!tnapi->tx_ring)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ tg3_mem_tx_release(tp);
+ return -ENOMEM;
+}
+
+static void tg3_mem_rx_release(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_max; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
tg3_rx_prodring_fini(tp, &tnapi->prodring);
+ if (!tnapi->rx_rcb)
+ continue;
+
+ dma_free_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ tnapi->rx_rcb,
+ tnapi->rx_rcb_mapping);
+ tnapi->rx_rcb = NULL;
+ }
+}
+
+static int tg3_mem_rx_acquire(struct tg3 *tp)
+{
+ unsigned int i, limit;
+
+ limit = tp->rxq_cnt;
+
+ /* If RSS is enabled, we need a (dummy) producer ring
+ * set on vector zero. This is the true hw prodring.
+ */
+ if (tg3_flag(tp, ENABLE_RSS))
+ limit++;
+
+ for (i = 0; i < limit; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+ goto err_out;
+
+ /* If multivector RSS is enabled, vector 0
+ * does not handle rx or tx interrupts.
+ * Don't allocate any resources for it.
+ */
+ if (!i && tg3_flag(tp, ENABLE_RSS))
+ continue;
+
+ tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
+ if (!tnapi->rx_rcb)
+ goto err_out;
+
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+ }
+
+ return 0;
+
+err_out:
+ tg3_mem_rx_release(tp);
+ return -ENOMEM;
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
if (tnapi->hw_status) {
dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
tnapi->hw_status,
@@ -7645,6 +7721,9 @@ static void tg3_free_consistent(struct tg3 *tp)
}
}
+ tg3_mem_rx_release(tp);
+ tg3_mem_tx_release(tp);
+
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
@@ -7683,72 +7762,38 @@ static int tg3_alloc_consistent(struct tg3 *tp)
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status;
- if (tg3_rx_prodring_init(tp, &tnapi->prodring))
- goto err_out;
+ if (tg3_flag(tp, ENABLE_RSS)) {
+ u16 *prodptr = 0;
- /* If multivector TSS is enabled, vector 0 does not handle
- * tx interrupts. Don't allocate any resources for it.
- */
- if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
- (i && tg3_flag(tp, ENABLE_TSS))) {
- tnapi->tx_buffers = kzalloc(
- sizeof(struct tg3_tx_ring_info) *
- TG3_TX_RING_SIZE, GFP_KERNEL);
- if (!tnapi->tx_buffers)
- goto err_out;
-
- tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
- TG3_TX_RING_BYTES,
- &tnapi->tx_desc_mapping,
- GFP_KERNEL);
- if (!tnapi->tx_ring)
- goto err_out;
- }
-
- /*
- * When RSS is enabled, the status block format changes
- * slightly. The "rx_jumbo_consumer", "reserved",
- * and "rx_mini_consumer" members get mapped to the
- * other three rx return ring producer indexes.
- */
- switch (i) {
- default:
- if (tg3_flag(tp, ENABLE_RSS)) {
- tnapi->rx_rcb_prod_idx = NULL;
+ /*
+ * When RSS is enabled, the status block format changes
+ * slightly. The "rx_jumbo_consumer", "reserved",
+ * and "rx_mini_consumer" members get mapped to the
+ * other three rx return ring producer indexes.
+ */
+ switch (i) {
+ case 1:
+ prodptr = &sblk->idx[0].rx_producer;
+ break;
+ case 2:
+ prodptr = &sblk->rx_jumbo_consumer;
+ break;
+ case 3:
+ prodptr = &sblk->reserved;
+ break;
+ case 4:
+ prodptr = &sblk->rx_mini_consumer;
break;
}
- /* Fall through */
- case 1:
+ tnapi->rx_rcb_prod_idx = prodptr;
+ } else {
tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
- break;
- case 2:
- tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
- break;
- case 3:
- tnapi->rx_rcb_prod_idx = &sblk->reserved;
- break;
- case 4:
- tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
- break;
}
-
- /*
- * If multivector RSS is enabled, vector 0 does not handle
- * rx or tx interrupts. Don't allocate any resources for it.
- */
- if (!i && tg3_flag(tp, ENABLE_RSS))
- continue;
-
- tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping,
- GFP_KERNEL);
- if (!tnapi->rx_rcb)
- goto err_out;
-
- memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
+ if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
+ goto err_out;
+
return 0;
err_out:
@@ -8054,7 +8099,7 @@ static int tg3_chip_reset(struct tg3 *tp)
udelay(120);
- if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
+ if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
u16 val16;
if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
@@ -8071,24 +8116,17 @@ static int tg3_chip_reset(struct tg3 *tp)
}
/* Clear the "no snoop" and "relaxed ordering" bits. */
- pci_read_config_word(tp->pdev,
- pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
- &val16);
- val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
- PCI_EXP_DEVCTL_NOSNOOP_EN);
+ val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
/*
* Older PCIe devices only support the 128 byte
* MPS setting. Enforce the restriction.
*/
if (!tg3_flag(tp, CPMU_PRESENT))
- val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
- pci_write_config_word(tp->pdev,
- pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
- val16);
+ val16 |= PCI_EXP_DEVCTL_PAYLOAD;
+ pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
/* Clear error status */
- pci_write_config_word(tp->pdev,
- pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
+ pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
PCI_EXP_DEVSTA_CED |
PCI_EXP_DEVSTA_NFED |
PCI_EXP_DEVSTA_FED |
@@ -8269,9 +8307,10 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
nic_addr);
}
-static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+
+static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
{
- int i;
+ int i = 0;
if (!tg3_flag(tp, ENABLE_TSS)) {
tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
@@ -8281,31 +8320,43 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
tw32(HOSTCC_TXCOL_TICKS, 0);
tw32(HOSTCC_TXMAX_FRAMES, 0);
tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+
+ for (; i < tp->txq_cnt; i++) {
+ u32 reg;
+
+ reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_coalesce_usecs);
+ reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames);
+ reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames_irq);
+ }
}
+ for (; i < tp->irq_max - 1; i++) {
+ tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ }
+}
+
+static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+ int i = 0;
+ u32 limit = tp->rxq_cnt;
+
if (!tg3_flag(tp, ENABLE_RSS)) {
tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+ limit--;
} else {
tw32(HOSTCC_RXCOL_TICKS, 0);
tw32(HOSTCC_RXMAX_FRAMES, 0);
tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
}
- if (!tg3_flag(tp, 5705_PLUS)) {
- u32 val = ec->stats_block_coalesce_usecs;
-
- tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
- tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
-
- if (!netif_carrier_ok(tp->dev))
- val = 0;
-
- tw32(HOSTCC_STAT_COAL_TICKS, val);
- }
-
- for (i = 0; i < tp->irq_cnt - 1; i++) {
+ for (; i < limit; i++) {
u32 reg;
reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
@@ -8314,27 +8365,30 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
tw32(reg, ec->rx_max_coalesced_frames);
reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
tw32(reg, ec->rx_max_coalesced_frames_irq);
-
- if (tg3_flag(tp, ENABLE_TSS)) {
- reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
- tw32(reg, ec->tx_coalesce_usecs);
- reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
- tw32(reg, ec->tx_max_coalesced_frames);
- reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
- tw32(reg, ec->tx_max_coalesced_frames_irq);
- }
}
for (; i < tp->irq_max - 1; i++) {
tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ }
+}
- if (tg3_flag(tp, ENABLE_TSS)) {
- tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
- tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
- tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
- }
+static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+ tg3_coal_tx_init(tp, ec);
+ tg3_coal_rx_init(tp, ec);
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ u32 val = ec->stats_block_coalesce_usecs;
+
+ tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+ tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
+ if (!netif_carrier_ok(tp->dev))
+ val = 0;
+
+ tw32(HOSTCC_STAT_COAL_TICKS, val);
}
}
@@ -8592,13 +8646,12 @@ static void __tg3_set_rx_mode(struct net_device *dev)
}
}
-static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
{
int i;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- tp->rss_ind_tbl[i] =
- ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+ tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
}
static void tg3_rss_check_indir_tbl(struct tg3 *tp)
@@ -8620,7 +8673,7 @@ static void tg3_rss_check_indir_tbl(struct tg3 *tp)
}
if (i != TG3_RSS_INDIR_TBL_SIZE)
- tg3_rss_init_dflt_indir_tbl(tp);
+ tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
}
static void tg3_rss_write_indir_tbl(struct tg3 *tp)
@@ -9517,7 +9570,6 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
-#if IS_ENABLED(CONFIG_HWMON)
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
int i;
@@ -9570,22 +9622,17 @@ static const struct attribute_group tg3_group = {
.attrs = tg3_attributes,
};
-#endif
-
static void tg3_hwmon_close(struct tg3 *tp)
{
-#if IS_ENABLED(CONFIG_HWMON)
if (tp->hwmon_dev) {
hwmon_device_unregister(tp->hwmon_dev);
tp->hwmon_dev = NULL;
sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
}
-#endif
}
static void tg3_hwmon_open(struct tg3 *tp)
{
-#if IS_ENABLED(CONFIG_HWMON)
int i, err;
u32 size = 0;
struct pci_dev *pdev = tp->pdev;
@@ -9617,7 +9664,6 @@ static void tg3_hwmon_open(struct tg3 *tp)
dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
}
-#endif
}
@@ -10141,21 +10187,43 @@ static int tg3_request_firmware(struct tg3 *tp)
return 0;
}
-static bool tg3_enable_msix(struct tg3 *tp)
+static u32 tg3_irq_count(struct tg3 *tp)
{
- int i, rc;
- struct msix_entry msix_ent[tp->irq_max];
+ u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
- tp->irq_cnt = netif_get_num_default_rss_queues();
- if (tp->irq_cnt > 1) {
+ if (irq_cnt > 1) {
/* We want as many rx rings enabled as there are cpus.
* In multiqueue MSI-X mode, the first MSI-X vector
* only deals with link interrupts, etc, so we add
* one to the number of vectors we are requesting.
*/
- tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
+ irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
}
+ return irq_cnt;
+}
+
+static bool tg3_enable_msix(struct tg3 *tp)
+{
+ int i, rc;
+ struct msix_entry msix_ent[tp->irq_max];
+
+ tp->txq_cnt = tp->txq_req;
+ tp->rxq_cnt = tp->rxq_req;
+ if (!tp->rxq_cnt)
+ tp->rxq_cnt = netif_get_num_default_rss_queues();
+ if (tp->rxq_cnt > tp->rxq_max)
+ tp->rxq_cnt = tp->rxq_max;
+
+ /* Disable multiple TX rings by default. Simple round-robin hardware
+ * scheduling of the TX rings can cause starvation of rings with
+ * small packets when other rings have TSO or jumbo packets.
+ */
+ if (!tp->txq_req)
+ tp->txq_cnt = 1;
+
+ tp->irq_cnt = tg3_irq_count(tp);
+
for (i = 0; i < tp->irq_max; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
@@ -10170,27 +10238,28 @@ static bool tg3_enable_msix(struct tg3 *tp)
netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
tp->irq_cnt, rc);
tp->irq_cnt = rc;
+ tp->rxq_cnt = max(rc - 1, 1);
+ if (tp->txq_cnt)
+ tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
}
for (i = 0; i < tp->irq_max; i++)
tp->napi[i].irq_vec = msix_ent[i].vector;
- netif_set_real_num_tx_queues(tp->dev, 1);
- rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
- if (netif_set_real_num_rx_queues(tp->dev, rc)) {
+ if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
pci_disable_msix(tp->pdev);
return false;
}
- if (tp->irq_cnt > 1) {
- tg3_flag_set(tp, ENABLE_RSS);
+ if (tp->irq_cnt == 1)
+ return true;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
- tg3_flag_set(tp, ENABLE_TSS);
- netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
- }
- }
+ tg3_flag_set(tp, ENABLE_RSS);
+
+ if (tp->txq_cnt > 1)
+ tg3_flag_set(tp, ENABLE_TSS);
+
+ netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
return true;
}
@@ -10224,6 +10293,11 @@ defcfg:
if (!tg3_flag(tp, USING_MSIX)) {
tp->irq_cnt = 1;
tp->napi[0].irq_vec = tp->pdev->irq;
+ }
+
+ if (tp->irq_cnt == 1) {
+ tp->txq_cnt = 1;
+ tp->rxq_cnt = 1;
netif_set_real_num_tx_queues(tp->dev, 1);
netif_set_real_num_rx_queues(tp->dev, 1);
}
@@ -10241,38 +10315,11 @@ static void tg3_ints_fini(struct tg3 *tp)
tg3_flag_clear(tp, ENABLE_TSS);
}
-static int tg3_open(struct net_device *dev)
+static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
{
- struct tg3 *tp = netdev_priv(dev);
+ struct net_device *dev = tp->dev;
int i, err;
- if (tp->fw_needed) {
- err = tg3_request_firmware(tp);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
- if (err)
- return err;
- } else if (err) {
- netdev_warn(tp->dev, "TSO capability disabled\n");
- tg3_flag_clear(tp, TSO_CAPABLE);
- } else if (!tg3_flag(tp, TSO_CAPABLE)) {
- netdev_notice(tp->dev, "TSO capability restored\n");
- tg3_flag_set(tp, TSO_CAPABLE);
- }
- }
-
- netif_carrier_off(tp->dev);
-
- err = tg3_power_up(tp);
- if (err)
- return err;
-
- tg3_full_lock(tp, 0);
-
- tg3_disable_ints(tp);
- tg3_flag_clear(tp, INIT_COMPLETE);
-
- tg3_full_unlock(tp);
-
/*
* Setup interrupts first so we know how
* many NAPI resources to allocate
@@ -10306,7 +10353,7 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0);
- err = tg3_init_hw(tp, 1);
+ err = tg3_init_hw(tp, reset_phy);
if (err) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
@@ -10317,7 +10364,7 @@ static int tg3_open(struct net_device *dev)
if (err)
goto err_out3;
- if (tg3_flag(tp, USING_MSI)) {
+ if (test_irq && tg3_flag(tp, USING_MSI)) {
err = tg3_test_msi(tp);
if (err) {
@@ -10373,20 +10420,18 @@ err_out2:
err_out1:
tg3_ints_fini(tp);
- tg3_frob_aux_power(tp, false);
- pci_set_power_state(tp->pdev, PCI_D3hot);
+
return err;
}
-static int tg3_close(struct net_device *dev)
+static void tg3_stop(struct tg3 *tp)
{
int i;
- struct tg3 *tp = netdev_priv(dev);
tg3_napi_disable(tp);
tg3_reset_task_cancel(tp);
- netif_tx_stop_all_queues(dev);
+ netif_tx_disable(tp->dev);
tg3_timer_stop(tp);
@@ -10411,13 +10456,60 @@ static int tg3_close(struct net_device *dev)
tg3_ints_fini(tp);
- /* Clear stats across close / open calls */
- memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
- memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
-
tg3_napi_fini(tp);
tg3_free_consistent(tp);
+}
+
+static int tg3_open(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int err;
+
+ if (tp->fw_needed) {
+ err = tg3_request_firmware(tp);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (err)
+ return err;
+ } else if (err) {
+ netdev_warn(tp->dev, "TSO capability disabled\n");
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ } else if (!tg3_flag(tp, TSO_CAPABLE)) {
+ netdev_notice(tp->dev, "TSO capability restored\n");
+ tg3_flag_set(tp, TSO_CAPABLE);
+ }
+ }
+
+ netif_carrier_off(tp->dev);
+
+ err = tg3_power_up(tp);
+ if (err)
+ return err;
+
+ tg3_full_lock(tp, 0);
+
+ tg3_disable_ints(tp);
+ tg3_flag_clear(tp, INIT_COMPLETE);
+
+ tg3_full_unlock(tp);
+
+ err = tg3_start(tp, true, true);
+ if (err) {
+ tg3_frob_aux_power(tp, false);
+ pci_set_power_state(tp->pdev, PCI_D3hot);
+ }
+ return err;
+}
+
+static int tg3_close(struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ tg3_stop(tp);
+
+ /* Clear stats across close / open calls */
+ memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+ memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
tg3_power_down(tp);
@@ -11207,11 +11299,11 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
if (netif_running(tp->dev))
- info->data = tp->irq_cnt;
+ info->data = tp->rxq_cnt;
else {
info->data = num_online_cpus();
- if (info->data > TG3_IRQ_MAX_VECS_RSS)
- info->data = TG3_IRQ_MAX_VECS_RSS;
+ if (info->data > TG3_RSS_MAX_NUM_QS)
+ info->data = TG3_RSS_MAX_NUM_QS;
}
/* The first interrupt vector only
@@ -11268,6 +11360,58 @@ static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
return 0;
}
+static void tg3_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ u32 deflt_qs = netif_get_num_default_rss_queues();
+
+ channel->max_rx = tp->rxq_max;
+ channel->max_tx = tp->txq_max;
+
+ if (netif_running(dev)) {
+ channel->rx_count = tp->rxq_cnt;
+ channel->tx_count = tp->txq_cnt;
+ } else {
+ if (tp->rxq_req)
+ channel->rx_count = tp->rxq_req;
+ else
+ channel->rx_count = min(deflt_qs, tp->rxq_max);
+
+ if (tp->txq_req)
+ channel->tx_count = tp->txq_req;
+ else
+ channel->tx_count = min(deflt_qs, tp->txq_max);
+ }
+}
+
+static int tg3_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return -EOPNOTSUPP;
+
+ if (channel->rx_count > tp->rxq_max ||
+ channel->tx_count > tp->txq_max)
+ return -EINVAL;
+
+ tp->rxq_req = channel->rx_count;
+ tp->txq_req = channel->tx_count;
+
+ if (!netif_running(dev))
+ return 0;
+
+ tg3_stop(tp);
+
+ netif_carrier_off(dev);
+
+ tg3_start(tp, true, false);
+
+ return 0;
+}
+
static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
@@ -12516,6 +12660,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_rxfh_indir_size = tg3_get_rxfh_indir_size,
.get_rxfh_indir = tg3_get_rxfh_indir,
.set_rxfh_indir = tg3_set_rxfh_indir,
+ .get_channels = tg3_get_channels,
+ .set_channels = tg3_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
};
@@ -14532,10 +14678,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, 57765_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSIX);
tp->irq_max = TG3_IRQ_MAX_VECS;
- tg3_rss_init_dflt_indir_tbl(tp);
}
}
+ tp->txq_max = 1;
+ tp->rxq_max = 1;
+ if (tp->irq_max > 1) {
+ tp->rxq_max = TG3_RSS_MAX_NUM_QS;
+ tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tp->txq_max = tp->irq_max - 1;
+ }
+
if (tg3_flag(tp, 5755_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_flag_set(tp, SHORT_DMA_BUG);
@@ -14565,9 +14721,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, PCI_EXPRESS);
- pci_read_config_word(tp->pdev,
- pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
- &lnkctl);
+ pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5906) {
@@ -16397,7 +16551,7 @@ done:
rtnl_unlock();
}
-static struct pci_error_handlers tg3_err_handler = {
+static const struct pci_error_handlers tg3_err_handler = {
.error_detected = tg3_io_error_detected,
.slot_reset = tg3_io_slot_reset,
.resume = tg3_io_resume
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 6d52cb286826..d9308c32102e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2860,7 +2860,8 @@ struct tg3_rx_prodring_set {
dma_addr_t rx_jmb_mapping;
};
-#define TG3_IRQ_MAX_VECS_RSS 5
+#define TG3_RSS_MAX_NUM_QS 4
+#define TG3_IRQ_MAX_VECS_RSS (TG3_RSS_MAX_NUM_QS + 1)
#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS
struct tg3_napi {
@@ -3037,6 +3038,9 @@ struct tg3 {
void (*write32_tx_mbox) (struct tg3 *, u32,
u32);
u32 dma_limit;
+ u32 txq_req;
+ u32 txq_cnt;
+ u32 txq_max;
/* begin "rx thread" cacheline section */
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
@@ -3051,6 +3055,9 @@ struct tg3 {
u32 rx_std_max_post;
u32 rx_offset;
u32 rx_pkt_map_sz;
+ u32 rxq_req;
+ u32 rxq_cnt;
+ u32 rxq_max;
bool rx_refill;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index b441f33258e7..ce1eac529470 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3268,6 +3268,7 @@ bnad_pci_probe(struct pci_dev *pdev,
* Output : using_dac = 1 for 64 bit DMA
* = 0 for 32 bit DMA
*/
+ using_dac = false;
err = bnad_pci_init(bnad, pdev, &using_dac);
if (err)
goto unlock_mutex;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 6505070abcfa..9c9f3260344a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1394,7 +1394,7 @@ static int offload_close(struct t3cdev *tdev)
sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
/* Flush work scheduled while releasing TIDs */
- flush_work_sync(&td->tid_release_task);
+ flush_work(&td->tid_release_task);
tdev->lldev = NULL;
cxgb3_set_dummy_ops(tdev);
@@ -3036,7 +3036,7 @@ static void t3_io_resume(struct pci_dev *pdev)
t3_resume_ports(adapter);
}
-static struct pci_error_handlers t3_err_handler = {
+static const struct pci_error_handlers t3_err_handler = {
.error_detected = t3_io_error_detected,
.slot_reset = t3_io_slot_reset,
.resume = t3_io_resume,
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index bff8a3cdd3df..aef45d3113ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3289,22 +3289,18 @@ static void config_pcie(struct adapter *adap)
unsigned int log2_width, pldsize;
unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
- pci_read_config_word(adap->pdev,
- adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
- &val);
+ pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
pci_read_config_word(adap->pdev, 0x2, &devid);
if (devid == 0x37) {
- pci_write_config_word(adap->pdev,
- adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
- val & ~PCI_EXP_DEVCTL_READRQ &
- ~PCI_EXP_DEVCTL_PAYLOAD);
+ pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
+ val & ~PCI_EXP_DEVCTL_READRQ &
+ ~PCI_EXP_DEVCTL_PAYLOAD);
pldsize = 0;
}
- pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL,
- &val);
+ pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
@@ -3425,15 +3421,13 @@ out_err:
static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
{
static unsigned short speed_map[] = { 33, 66, 100, 133 };
- u32 pci_mode, pcie_cap;
+ u32 pci_mode;
- pcie_cap = pci_pcie_cap(adapter->pdev);
- if (pcie_cap) {
+ if (pci_is_pcie(adapter->pdev)) {
u16 val;
p->variant = PCI_VARIANT_PCIE;
- pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
- &val);
+ pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
p->width = (val >> 4) & 0x3f;
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ec2dafe8ae5b..745a1f53361f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -67,12 +67,12 @@ enum {
};
enum {
- MEMWIN0_APERTURE = 65536,
- MEMWIN0_BASE = 0x30000,
+ MEMWIN0_APERTURE = 2048,
+ MEMWIN0_BASE = 0x1b800,
MEMWIN1_APERTURE = 32768,
MEMWIN1_BASE = 0x28000,
- MEMWIN2_APERTURE = 2048,
- MEMWIN2_BASE = 0x1b800,
+ MEMWIN2_APERTURE = 65536,
+ MEMWIN2_BASE = 0x30000,
};
enum dev_master {
@@ -211,6 +211,9 @@ struct tp_err_stats {
struct tp_params {
unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
+
+ uint32_t dack_re; /* DACK timer resolution */
+ unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
};
struct vpd_params {
@@ -315,6 +318,10 @@ enum { /* adapter flags */
USING_MSI = (1 << 1),
USING_MSIX = (1 << 2),
FW_OK = (1 << 4),
+ RSS_TNLALLLOOKUP = (1 << 5),
+ USING_SOFT_PARAMS = (1 << 6),
+ MASTER_PF = (1 << 7),
+ FW_OFLD_CONN = (1 << 9),
};
struct rx_sw_desc;
@@ -467,6 +474,11 @@ struct sge {
u16 rdma_rxq[NCHAN];
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
+ u32 fl_pg_order; /* large page allocation size */
+ u32 stat_len; /* length of status page at ring end */
+ u32 pktshift; /* padding between CPL & packet data */
+ u32 fl_align; /* response queue message alignment */
+ u32 fl_starve_thres; /* Free List starvation threshold */
unsigned int starve_thres;
u8 idma_state[2];
unsigned int egr_start;
@@ -511,6 +523,8 @@ struct adapter {
struct net_device *port[MAX_NPORTS];
u8 chan_map[NCHAN]; /* channel -> port map */
+ unsigned int l2t_start;
+ unsigned int l2t_end;
struct l2t_data *l2t;
void *uld_handle[CXGB4_ULD_MAX];
struct list_head list_node;
@@ -619,7 +633,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
-void t4_sge_init(struct adapter *adap);
+int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
extern int dbfifo_int_thresh;
@@ -638,6 +652,14 @@ static inline unsigned int us_to_core_ticks(const struct adapter *adap,
return (us * adap->params.vpd.cclk) / 1000;
}
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+ unsigned int ticks)
+{
+ /* add Core Clock / 2 to round ticks to nearest uS */
+ return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
+ adapter->params.vpd.cclk);
+}
+
void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
u32 val);
@@ -656,6 +678,9 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
}
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx);
void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
@@ -664,8 +689,12 @@ int t4_wait_dev_ready(struct adapter *adap);
int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
+ __be32 *buf);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
+int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+unsigned int t4_flash_cfg_addr(struct adapter *adapter);
int t4_check_fw_version(struct adapter *adapter);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
@@ -680,6 +709,8 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
@@ -695,6 +726,16 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_early_init(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force);
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force);
+int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
+ unsigned int mtype, unsigned int maddr,
+ u32 *finiver, u32 *finicsum, u32 *cfcsum);
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size);
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 5ed49af23d6a..6b9f6bb2f7ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -78,28 +78,45 @@
*/
#define MAX_SGE_TIMERVAL 200U
-#ifdef CONFIG_PCI_IOV
-/*
- * Virtual Function provisioning constants. We need two extra Ingress Queues
- * with Interrupt capability to serve as the VF's Firmware Event Queue and
- * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
- * Lists associated with them). For each Ethernet/Control Egress Queue and
- * for each Free List, we need an Egress Context.
- */
enum {
+ /*
+ * Physical Function provisioning constants.
+ */
+ PFRES_NVI = 4, /* # of Virtual Interfaces */
+ PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
+ PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
+ */
+ PFRES_NEQ = 256, /* # of egress queues */
+ PFRES_NIQ = 0, /* # of ingress queues */
+ PFRES_TC = 0, /* PCI-E traffic class */
+ PFRES_NEXACTF = 128, /* # of exact MPS filters */
+
+ PFRES_R_CAPS = FW_CMD_CAP_PF,
+ PFRES_WX_CAPS = FW_CMD_CAP_PF,
+
+#ifdef CONFIG_PCI_IOV
+ /*
+ * Virtual Function provisioning constants. We need two extra Ingress
+ * Queues with Interrupt capability to serve as the VF's Firmware
+ * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
+ * neither will have Free Lists associated with them). For each
+ * Ethernet/Control Egress Queue and for each Free List, we need an
+ * Egress Context.
+ */
VFRES_NPORTS = 1, /* # of "ports" per VF */
VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
- VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
+ VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
VFRES_TC = 0, /* PCI-E traffic class */
VFRES_NEXACTF = 16, /* # of exact MPS filters */
VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
+#endif
};
/*
@@ -146,7 +163,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter,
}
/*NOTREACHED*/
}
-#endif
enum {
MAX_TXQ_ENTRIES = 16384,
@@ -193,6 +209,7 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
};
#define FW_FNAME "cxgb4/t4fw.bin"
+#define FW_CFNAME "cxgb4/t4-config.txt"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
@@ -201,6 +218,28 @@ MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW_FNAME);
+/*
+ * Normally we're willing to become the firmware's Master PF but will be happy
+ * if another PF has already become the Master and initialized the adapter.
+ * Setting "force_init" will cause this driver to forcibly establish itself as
+ * the Master PF and initialize the adapter.
+ */
+static uint force_init;
+
+module_param(force_init, uint, 0644);
+MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
+
+/*
+ * Normally if the firmware we connect to has Configuration File support, we
+ * use that and only fall back to the old Driver-based initialization if the
+ * Configuration File fails for some reason. If force_old_init is set, then
+ * we'll always use the old Driver-based initialization sequence.
+ */
+static uint force_old_init;
+
+module_param(force_old_init, uint, 0644);
+MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
+
static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0644);
@@ -236,6 +275,20 @@ module_param_array(intr_cnt, uint, NULL, 0644);
MODULE_PARM_DESC(intr_cnt,
"thresholds 1..3 for queue interrupt packet counters");
+/*
+ * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
+ * offset by 2 bytes in order to have the IP headers line up on 4-byte
+ * boundaries. This is a requirement for many architectures which will throw
+ * a machine check fault if an attempt is made to access one of the 4-byte IP
+ * header fields on a non-4-byte boundary. And it's a major performance issue
+ * even on some architectures which allow it like some implementations of the
+ * x86 ISA. However, some architectures don't mind this and for some very
+ * edge-case performance sensitive applications (like forwarding large volumes
+ * of small packets), setting this DMA offset to 0 will decrease the number of
+ * PCI-E Bus transfers enough to measurably affect performance.
+ */
+static int rx_dma_offset = 2;
+
static bool vf_acls;
#ifdef CONFIG_PCI_IOV
@@ -248,6 +301,30 @@ module_param_array(num_vf, uint, NULL, 0644);
MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
#endif
+/*
+ * The filter TCAM has a fixed portion and a variable portion. The fixed
+ * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
+ * ports. The variable portion is 36 bits which can include things like Exact
+ * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
+ * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
+ * far exceed the 36-bit budget for this "compressed" header portion of the
+ * filter. Thus, we have a scarce resource which must be carefully managed.
+ *
+ * By default we set this up to mostly match the set of filter matching
+ * capabilities of T3 but with accommodations for some of T4's more
+ * interesting features:
+ *
+ * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
+ * [Inner] VLAN (17), Port (3), FCoE (1) }
+ */
+enum {
+ TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
+ TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
+ TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
+};
+
+static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+
static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
@@ -852,11 +929,25 @@ static int upgrade_fw(struct adapter *adap)
*/
if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
vers > adap->params.fw_vers) {
- ret = -t4_load_fw(adap, fw->data, fw->size);
+ dev_info(dev, "upgrading firmware ...\n");
+ ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
+ /*force=*/false);
if (!ret)
- dev_info(dev, "firmware upgraded to version %pI4 from "
- FW_FNAME "\n", &hdr->fw_ver);
+ dev_info(dev, "firmware successfully upgraded to "
+ FW_FNAME " (%d.%d.%d.%d)\n",
+ FW_HDR_FW_VER_MAJOR_GET(vers),
+ FW_HDR_FW_VER_MINOR_GET(vers),
+ FW_HDR_FW_VER_MICRO_GET(vers),
+ FW_HDR_FW_VER_BUILD_GET(vers));
+ else
+ dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
+ } else {
+ /*
+ * Tell our caller that we didn't upgrade the firmware.
+ */
+ ret = -EINVAL;
}
+
out: release_firmware(fw);
return ret;
}
@@ -2470,8 +2561,8 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
else
delta = size - hw_pidx + pidx;
wmb();
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
- V_QID(qid) | V_PIDX(delta));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(qid) | PIDX(delta));
}
out:
return ret;
@@ -2579,8 +2670,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
else
delta = q->size - hw_pidx + q->db_pidx;
wmb();
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
- V_QID(q->cntxt_id) | V_PIDX(delta));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(delta));
}
out:
q->db_disabled = 0;
@@ -2617,9 +2708,9 @@ static void process_db_full(struct work_struct *work)
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
drain_db_fifo(adap, dbfifo_drain_delay);
- t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
- F_DBFIFO_HP_INT | F_DBFIFO_LP_INT,
- F_DBFIFO_HP_INT | F_DBFIFO_LP_INT);
+ t4_set_reg_field(adap, SGE_INT_ENABLE3,
+ DBFIFO_HP_INT | DBFIFO_LP_INT,
+ DBFIFO_HP_INT | DBFIFO_LP_INT);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
}
@@ -2639,8 +2730,8 @@ static void process_db_drop(struct work_struct *work)
void t4_db_full(struct adapter *adap)
{
- t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
- F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0);
+ t4_set_reg_field(adap, SGE_INT_ENABLE3,
+ DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
queue_work(workq, &adap->db_full_task);
}
@@ -3076,6 +3167,10 @@ static void setup_memwin(struct adapter *adap)
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
(bar0 + MEMWIN2_BASE) | BIR(0) |
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+}
+
+static void setup_memwin_rdma(struct adapter *adap)
+{
if (adap->vres.ocq.size) {
unsigned int start, sz_kb;
@@ -3155,6 +3250,488 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
+ *
+ * If the firmware we're dealing with has Configuration File support, then
+ * we use that to perform all configuration
+ */
+
+/*
+ * Tweak configuration based on module parameters, etc. Most of these have
+ * defaults assigned to them by Firmware Configuration Files (if we're using
+ * them) but need to be explicitly set if we're using hard-coded
+ * initialization. But even in the case of using Firmware Configuration
+ * Files, we'd like to expose the ability to change these via module
+ * parameters so these are essentially common tweaks/settings for
+ * Configuration Files and hard-coded initialization ...
+ */
+static int adap_init0_tweaks(struct adapter *adapter)
+{
+ /*
+ * Fix up various Host-Dependent Parameters like Page Size, Cache
+ * Line Size, etc. The firmware default is for a 4KB Page Size and
+ * 64B Cache Line Size ...
+ */
+ t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
+
+ /*
+ * Process module parameters which affect early initialization.
+ */
+ if (rx_dma_offset != 2 && rx_dma_offset != 0) {
+ dev_err(&adapter->pdev->dev,
+ "Ignoring illegal rx_dma_offset=%d, using 2\n",
+ rx_dma_offset);
+ rx_dma_offset = 2;
+ }
+ t4_set_reg_field(adapter, SGE_CONTROL,
+ PKTSHIFT_MASK,
+ PKTSHIFT(rx_dma_offset));
+
+ /*
+ * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
+ * adds the pseudo header itself.
+ */
+ t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
+ CSUM_HAS_PSEUDO_HDR, 0);
+
+ return 0;
+}
+
+/*
+ * Attempt to initialize the adapter via a Firmware Configuration File.
+ */
+static int adap_init0_config(struct adapter *adapter, int reset)
+{
+ struct fw_caps_config_cmd caps_cmd;
+ const struct firmware *cf;
+ unsigned long mtype = 0, maddr = 0;
+ u32 finiver, finicsum, cfcsum;
+ int ret, using_flash;
+
+ /*
+ * Reset device if necessary.
+ */
+ if (reset) {
+ ret = t4_fw_reset(adapter, adapter->mbox,
+ PIORSTMODE | PIORST);
+ if (ret < 0)
+ goto bye;
+ }
+
+ /*
+ * If we have a T4 configuration file under /lib/firmware/cxgb4/,
+ * then use that. Otherwise, use the configuration file stored
+ * in the adapter flash ...
+ */
+ ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
+ if (ret < 0) {
+ using_flash = 1;
+ mtype = FW_MEMTYPE_CF_FLASH;
+ maddr = t4_flash_cfg_addr(adapter);
+ } else {
+ u32 params[7], val[7];
+
+ using_flash = 0;
+ if (cf->size >= FLASH_CFG_MAX_SIZE)
+ ret = -ENOMEM;
+ else {
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+ ret = t4_query_params(adapter, adapter->mbox,
+ adapter->fn, 0, 1, params, val);
+ if (ret == 0) {
+ /*
+ * For t4_memory_write() below addresses and
+ * sizes have to be in terms of multiples of 4
+ * bytes. So, if the Configuration File isn't
+ * a multiple of 4 bytes in length we'll have
+ * to write that out separately since we can't
+ * guarantee that the bytes following the
+ * residual byte in the buffer returned by
+ * request_firmware() are zeroed out ...
+ */
+ size_t resid = cf->size & 0x3;
+ size_t size = cf->size & ~0x3;
+ __be32 *data = (__be32 *)cf->data;
+
+ mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
+ maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
+
+ ret = t4_memory_write(adapter, mtype, maddr,
+ size, data);
+ if (ret == 0 && resid != 0) {
+ union {
+ __be32 word;
+ char buf[4];
+ } last;
+ int i;
+
+ last.word = data[size >> 2];
+ for (i = resid; i < 4; i++)
+ last.buf[i] = 0;
+ ret = t4_memory_write(adapter, mtype,
+ maddr + size,
+ 4, &last.word);
+ }
+ }
+ }
+
+ release_firmware(cf);
+ if (ret)
+ goto bye;
+ }
+
+ /*
+ * Issue a Capability Configuration command to the firmware to get it
+ * to parse the Configuration File. We don't use t4_fw_config_file()
+ * because we want the ability to modify various features after we've
+ * processed the configuration file ...
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ caps_cmd.retval_len16 =
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ if (ret < 0)
+ goto bye;
+
+ finiver = ntohl(caps_cmd.finiver);
+ finicsum = ntohl(caps_cmd.finicsum);
+ cfcsum = ntohl(caps_cmd.cfcsum);
+ if (finicsum != cfcsum)
+ dev_warn(adapter->pdev_dev, "Configuration File checksum "\
+ "mismatch: [fini] csum=%#x, computed csum=%#x\n",
+ finicsum, cfcsum);
+
+ /*
+ * If we're a pure NIC driver then disable all offloading facilities.
+ * This will allow the firmware to optimize aspects of the hardware
+ * configuration which will result in improved performance.
+ */
+ caps_cmd.ofldcaps = 0;
+ caps_cmd.iscsicaps = 0;
+ caps_cmd.rdmacaps = 0;
+ caps_cmd.fcoecaps = 0;
+
+ /*
+ * And now tell the firmware to use the configuration we just loaded.
+ */
+ caps_cmd.op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE);
+ caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ NULL);
+ if (ret < 0)
+ goto bye;
+
+ /*
+ * Tweak configuration based on system architecture, module
+ * parameters, etc.
+ */
+ ret = adap_init0_tweaks(adapter);
+ if (ret < 0)
+ goto bye;
+
+ /*
+ * And finally tell the firmware to initialize itself using the
+ * parameters from the Configuration File.
+ */
+ ret = t4_fw_initialize(adapter, adapter->mbox);
+ if (ret < 0)
+ goto bye;
+
+ /*
+ * Return successfully and note that we're operating with parameters
+ * not supplied by the driver, rather than from hard-wired
+ * initialization constants burried in the driver.
+ */
+ adapter->flags |= USING_SOFT_PARAMS;
+ dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
+ "Configuration File %s, version %#x, computed checksum %#x\n",
+ (using_flash
+ ? "in device FLASH"
+ : "/lib/firmware/" FW_CFNAME),
+ finiver, cfcsum);
+ return 0;
+
+ /*
+ * Something bad happened. Return the error ... (If the "error"
+ * is that there's no Configuration File on the adapter we don't
+ * want to issue a warning since this is fairly common.)
+ */
+bye:
+ if (ret != -ENOENT)
+ dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
+ -ret);
+ return ret;
+}
+
+/*
+ * Attempt to initialize the adapter via hard-coded, driver supplied
+ * parameters ...
+ */
+static int adap_init0_no_config(struct adapter *adapter, int reset)
+{
+ struct sge *s = &adapter->sge;
+ struct fw_caps_config_cmd caps_cmd;
+ u32 v;
+ int i, ret;
+
+ /*
+ * Reset device if necessary
+ */
+ if (reset) {
+ ret = t4_fw_reset(adapter, adapter->mbox,
+ PIORSTMODE | PIORST);
+ if (ret < 0)
+ goto bye;
+ }
+
+ /*
+ * Get device capabilities and select which we'll be using.
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ if (ret < 0)
+ goto bye;
+
+#ifndef CONFIG_CHELSIO_T4_OFFLOAD
+ /*
+ * If we're a pure NIC driver then disable all offloading facilities.
+ * This will allow the firmware to optimize aspects of the hardware
+ * configuration which will result in improved performance.
+ */
+ caps_cmd.ofldcaps = 0;
+ caps_cmd.iscsicaps = 0;
+ caps_cmd.rdmacaps = 0;
+ caps_cmd.fcoecaps = 0;
+#endif
+
+ if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+ if (!vf_acls)
+ caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+ else
+ caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+ } else if (vf_acls) {
+ dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
+ goto bye;
+ }
+ caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ NULL);
+ if (ret < 0)
+ goto bye;
+
+ /*
+ * Tweak configuration based on system architecture, module
+ * parameters, etc.
+ */
+ ret = adap_init0_tweaks(adapter);
+ if (ret < 0)
+ goto bye;
+
+ /*
+ * Select RSS Global Mode we want to use. We use "Basic Virtual"
+ * mode which maps each Virtual Interface to its own section of
+ * the RSS Table and we turn on all map and hash enables ...
+ */
+ adapter->flags |= RSS_TNLALLLOOKUP;
+ ret = t4_config_glbl_rss(adapter, adapter->mbox,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
+ ((adapter->flags & RSS_TNLALLLOOKUP) ?
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
+ if (ret < 0)
+ goto bye;
+
+ /*
+ * Set up our own fundamental resource provisioning ...
+ */
+ ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
+ PFRES_NEQ, PFRES_NETHCTRL,
+ PFRES_NIQFLINT, PFRES_NIQ,
+ PFRES_TC, PFRES_NVI,
+ FW_PFVF_CMD_CMASK_MASK,
+ pfvfres_pmask(adapter, adapter->fn, 0),
+ PFRES_NEXACTF,
+ PFRES_R_CAPS, PFRES_WX_CAPS);
+ if (ret < 0)
+ goto bye;
+
+ /*
+ * Perform low level SGE initialization. We need to do this before we
+ * send the firmware the INITIALIZE command because that will cause
+ * any other PF Drivers which are waiting for the Master
+ * Initialization to proceed forward.
+ */
+ for (i = 0; i < SGE_NTIMERS - 1; i++)
+ s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
+ s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
+ s->counter_val[0] = 1;
+ for (i = 1; i < SGE_NCOUNTERS; i++)
+ s->counter_val[i] = min(intr_cnt[i - 1],
+ THRESHOLD_0_GET(THRESHOLD_0_MASK));
+ t4_sge_init(adapter);
+
+#ifdef CONFIG_PCI_IOV
+ /*
+ * Provision resource limits for Virtual Functions. We currently
+ * grant them all the same static resource limits except for the Port
+ * Access Rights Mask which we're assigning based on the PF. All of
+ * the static provisioning stuff for both the PF and VF really needs
+ * to be managed in a persistent manner for each device which the
+ * firmware controls.
+ */
+ {
+ int pf, vf;
+
+ for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
+ if (num_vf[pf] <= 0)
+ continue;
+
+ /* VF numbering starts at 1! */
+ for (vf = 1; vf <= num_vf[pf]; vf++) {
+ ret = t4_cfg_pfvf(adapter, adapter->mbox,
+ pf, vf,
+ VFRES_NEQ, VFRES_NETHCTRL,
+ VFRES_NIQFLINT, VFRES_NIQ,
+ VFRES_TC, VFRES_NVI,
+ FW_PFVF_CMD_CMASK_GET(
+ FW_PFVF_CMD_CMASK_MASK),
+ pfvfres_pmask(
+ adapter, pf, vf),
+ VFRES_NEXACTF,
+ VFRES_R_CAPS, VFRES_WX_CAPS);
+ if (ret < 0)
+ dev_warn(adapter->pdev_dev,
+ "failed to "\
+ "provision pf/vf=%d/%d; "
+ "err=%d\n", pf, vf, ret);
+ }
+ }
+ }
+#endif
+
+ /*
+ * Set up the default filter mode. Later we'll want to implement this
+ * via a firmware command, etc. ... This needs to be done before the
+ * firmare initialization command ... If the selected set of fields
+ * isn't equal to the default value, we'll need to make sure that the
+ * field selections will fit in the 36-bit budget.
+ */
+ if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
+ int i, bits = 0;
+
+ for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++)
+ switch (tp_vlan_pri_map & (1 << i)) {
+ case 0:
+ /* compressed filter field not enabled */
+ break;
+ case FCOE_MASK:
+ bits += 1;
+ break;
+ case PORT_MASK:
+ bits += 3;
+ break;
+ case VNIC_ID_MASK:
+ bits += 17;
+ break;
+ case VLAN_MASK:
+ bits += 17;
+ break;
+ case TOS_MASK:
+ bits += 8;
+ break;
+ case PROTOCOL_MASK:
+ bits += 8;
+ break;
+ case ETHERTYPE_MASK:
+ bits += 16;
+ break;
+ case MACMATCH_MASK:
+ bits += 9;
+ break;
+ case MPSHITTYPE_MASK:
+ bits += 3;
+ break;
+ case FRAGMENTATION_MASK:
+ bits += 1;
+ break;
+ }
+
+ if (bits > 36) {
+ dev_err(adapter->pdev_dev,
+ "tp_vlan_pri_map=%#x needs %d bits > 36;"\
+ " using %#x\n", tp_vlan_pri_map, bits,
+ TP_VLAN_PRI_MAP_DEFAULT);
+ tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
+ }
+ }
+ v = tp_vlan_pri_map;
+ t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
+ &v, 1, TP_VLAN_PRI_MAP);
+
+ /*
+ * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
+ * to support any of the compressed filter fields above. Newer
+ * versions of the firmware do this automatically but it doesn't hurt
+ * to set it here. Meanwhile, we do _not_ need to set Lookup Every
+ * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
+ * since the firmware automatically turns this on and off when we have
+ * a non-zero number of filters active (since it does have a
+ * performance impact).
+ */
+ if (tp_vlan_pri_map)
+ t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
+ FIVETUPLELOOKUP_MASK,
+ FIVETUPLELOOKUP_MASK);
+
+ /*
+ * Tweak some settings.
+ */
+ t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
+ RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
+ PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
+ KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
+
+ /*
+ * Get basic stuff going by issuing the Firmware Initialize command.
+ * Note that this _must_ be after all PFVF commands ...
+ */
+ ret = t4_fw_initialize(adapter, adapter->mbox);
+ if (ret < 0)
+ goto bye;
+
+ /*
+ * Return successfully!
+ */
+ dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
+ "driver parameters\n");
+ return 0;
+
+ /*
+ * Something bad happened. Return the error ...
+ */
+bye:
+ return ret;
+}
+
+/*
+ * Phase 0 of initialization: contact FW, obtain config, perform basic init.
*/
static int adap_init0(struct adapter *adap)
{
@@ -3162,72 +3739,216 @@ static int adap_init0(struct adapter *adap)
u32 v, port_vec;
enum dev_state state;
u32 params[7], val[7];
- struct fw_caps_config_cmd c;
-
- ret = t4_check_fw_version(adap);
- if (ret == -EINVAL || ret > 0) {
- if (upgrade_fw(adap) >= 0) /* recache FW version */
- ret = t4_check_fw_version(adap);
- }
- if (ret < 0)
- return ret;
+ int reset = 1, j;
- /* contact FW, request master */
- ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
+ /*
+ * Contact FW, advertising Master capability (and potentially forcing
+ * ourselves as the Master PF if our module parameter force_init is
+ * set).
+ */
+ ret = t4_fw_hello(adap, adap->mbox, adap->fn,
+ force_init ? MASTER_MUST : MASTER_MAY,
+ &state);
if (ret < 0) {
dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
ret);
return ret;
}
+ if (ret == adap->mbox)
+ adap->flags |= MASTER_PF;
+ if (force_init && state == DEV_STATE_INIT)
+ state = DEV_STATE_UNINIT;
- /* reset device */
- ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
- if (ret < 0)
- goto bye;
-
- for (v = 0; v < SGE_NTIMERS - 1; v++)
- adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
- adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
- adap->sge.counter_val[0] = 1;
- for (v = 1; v < SGE_NCOUNTERS; v++)
- adap->sge.counter_val[v] = min(intr_cnt[v - 1],
- THRESHOLD_3_MASK);
-#define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+ /*
+ * If we're the Master PF Driver and the device is uninitialized,
+ * then let's consider upgrading the firmware ... (We always want
+ * to check the firmware version number in order to A. get it for
+ * later reporting and B. to warn if the currently loaded firmware
+ * is excessively mismatched relative to the driver.)
+ */
+ ret = t4_check_fw_version(adap);
+ if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
+ if (ret == -EINVAL || ret > 0) {
+ if (upgrade_fw(adap) >= 0) {
+ /*
+ * Note that the chip was reset as part of the
+ * firmware upgrade so we don't reset it again
+ * below and grab the new firmware version.
+ */
+ reset = 0;
+ ret = t4_check_fw_version(adap);
+ }
+ }
+ if (ret < 0)
+ return ret;
+ }
- params[0] = FW_PARAM_DEV(CCLK);
- ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
+ /*
+ * Grab VPD parameters. This should be done after we establish a
+ * connection to the firmware since some of the VPD parameters
+ * (notably the Core Clock frequency) are retrieved via requests to
+ * the firmware. On the other hand, we need these fairly early on
+ * so we do this right after getting ahold of the firmware.
+ */
+ ret = get_vpd_params(adap, &adap->params.vpd);
if (ret < 0)
goto bye;
- adap->params.vpd.cclk = val[0];
- ret = adap_init1(adap, &c);
+ /*
+ * Find out what ports are available to us. Note that we need to do
+ * this before calling adap_init0_no_config() since it needs nports
+ * and portvec ...
+ */
+ v =
+ FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
if (ret < 0)
goto bye;
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
+ /*
+ * If the firmware is initialized already (and we're not forcing a
+ * master initialization), note that we're living with existing
+ * adapter parameters. Otherwise, it's time to try initializing the
+ * adapter ...
+ */
+ if (state == DEV_STATE_INIT) {
+ dev_info(adap->pdev_dev, "Coming up as %s: "\
+ "Adapter already initialized\n",
+ adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+ adap->flags |= USING_SOFT_PARAMS;
+ } else {
+ dev_info(adap->pdev_dev, "Coming up as MASTER: "\
+ "Initializing adapter\n");
+
+ /*
+ * If the firmware doesn't support Configuration
+ * Files warn user and exit,
+ */
+ if (ret < 0)
+ dev_warn(adap->pdev_dev, "Firmware doesn't support "
+ "configuration file.\n");
+ if (force_old_init)
+ ret = adap_init0_no_config(adap, reset);
+ else {
+ /*
+ * Find out whether we're dealing with a version of
+ * the firmware which has configuration file support.
+ */
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+ params, val);
+
+ /*
+ * If the firmware doesn't support Configuration
+ * Files, use the old Driver-based, hard-wired
+ * initialization. Otherwise, try using the
+ * Configuration File support and fall back to the
+ * Driver-based initialization if there's no
+ * Configuration File found.
+ */
+ if (ret < 0)
+ ret = adap_init0_no_config(adap, reset);
+ else {
+ /*
+ * The firmware provides us with a memory
+ * buffer where we can load a Configuration
+ * File from the host if we want to override
+ * the Configuration File in flash.
+ */
+
+ ret = adap_init0_config(adap, reset);
+ if (ret == -ENOENT) {
+ dev_info(adap->pdev_dev,
+ "No Configuration File present "
+ "on adapter. Using hard-wired "
+ "configuration parameters.\n");
+ ret = adap_init0_no_config(adap, reset);
+ }
+ }
+ }
+ if (ret < 0) {
+ dev_err(adap->pdev_dev,
+ "could not initialize adapter, error %d\n",
+ -ret);
+ goto bye;
+ }
+ }
+
+ /*
+ * If we're living with non-hard-coded parameters (either from a
+ * Firmware Configuration File or values programmed by a different PF
+ * Driver), give the SGE code a chance to pull in anything that it
+ * needs ... Note that this must be called after we retrieve our VPD
+ * parameters in order to know how to convert core ticks to seconds.
+ */
+ if (adap->flags & USING_SOFT_PARAMS) {
+ ret = t4_sge_init(adap);
+ if (ret < 0)
+ goto bye;
+ }
+
+ /*
+ * Grab some of our basic fundamental operating parameters.
+ */
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
#define FW_PARAM_PFVF(param) \
- (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
- FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
- FW_PARAMS_PARAM_Y(adap->fn))
+ FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
+ FW_PARAMS_PARAM_Y(0) | \
+ FW_PARAMS_PARAM_Z(0)
- params[0] = FW_PARAM_DEV(PORTVEC);
+ params[0] = FW_PARAM_PFVF(EQ_START);
params[1] = FW_PARAM_PFVF(L2T_START);
params[2] = FW_PARAM_PFVF(L2T_END);
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
params[5] = FW_PARAM_PFVF(IQFLINT_START);
- params[6] = FW_PARAM_PFVF(EQ_START);
- ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
if (ret < 0)
goto bye;
- port_vec = val[0];
+ adap->sge.egr_start = val[0];
+ adap->l2t_start = val[1];
+ adap->l2t_end = val[2];
adap->tids.ftid_base = val[3];
adap->tids.nftids = val[4] - val[3] + 1;
adap->sge.ingr_start = val[5];
- adap->sge.egr_start = val[6];
- if (c.ofldcaps) {
+ /* query params related to active filter region */
+ params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
+ params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ /* If Active filter size is set we enable establishing
+ * offload connection through firmware work request
+ */
+ if ((val[0] != val[1]) && (ret >= 0)) {
+ adap->flags |= FW_OFLD_CONN;
+ adap->tids.aftid_base = val[0];
+ adap->tids.aftid_end = val[1];
+ }
+
+#ifdef CONFIG_CHELSIO_T4_OFFLOAD
+ /*
+ * Get device capabilities so we can determine what resources we need
+ * to manage.
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ if (ret < 0)
+ goto bye;
+
+ if (caps_cmd.ofldcaps) {
/* query offload-related parameters */
params[0] = FW_PARAM_DEV(NTID);
params[1] = FW_PARAM_PFVF(SERVER_START);
@@ -3235,28 +3956,55 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(TDDP_START);
params[4] = FW_PARAM_PFVF(TDDP_END);
params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
- ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
- val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ params, val);
if (ret < 0)
goto bye;
adap->tids.ntids = val[0];
adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
adap->tids.stid_base = val[1];
adap->tids.nstids = val[2] - val[1] + 1;
+ /*
+ * Setup server filter region. Divide the availble filter
+ * region into two parts. Regular filters get 1/3rd and server
+ * filters get 2/3rd part. This is only enabled if workarond
+ * path is enabled.
+ * 1. For regular filters.
+ * 2. Server filter: This are special filters which are used
+ * to redirect SYN packets to offload queue.
+ */
+ if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
+ adap->tids.sftid_base = adap->tids.ftid_base +
+ DIV_ROUND_UP(adap->tids.nftids, 3);
+ adap->tids.nsftids = adap->tids.nftids -
+ DIV_ROUND_UP(adap->tids.nftids, 3);
+ adap->tids.nftids = adap->tids.sftid_base -
+ adap->tids.ftid_base;
+ }
adap->vres.ddp.start = val[3];
adap->vres.ddp.size = val[4] - val[3] + 1;
adap->params.ofldq_wr_cred = val[5];
+
+ params[0] = FW_PARAM_PFVF(ETHOFLD_START);
+ params[1] = FW_PARAM_PFVF(ETHOFLD_END);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ params, val);
+ if ((val[0] != val[1]) && (ret >= 0)) {
+ adap->tids.uotid_base = val[0];
+ adap->tids.nuotids = val[1] - val[0] + 1;
+ }
+
adap->params.offload = 1;
}
- if (c.rdmacaps) {
+ if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
params[1] = FW_PARAM_PFVF(STAG_END);
params[2] = FW_PARAM_PFVF(RQ_START);
params[3] = FW_PARAM_PFVF(RQ_END);
params[4] = FW_PARAM_PFVF(PBL_START);
params[5] = FW_PARAM_PFVF(PBL_END);
- ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
- val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ params, val);
if (ret < 0)
goto bye;
adap->vres.stag.start = val[0];
@@ -3272,8 +4020,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(CQ_END);
params[4] = FW_PARAM_PFVF(OCQ_START);
params[5] = FW_PARAM_PFVF(OCQ_END);
- ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
- val);
+ ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
if (ret < 0)
goto bye;
adap->vres.qp.start = val[0];
@@ -3283,11 +4030,11 @@ static int adap_init0(struct adapter *adap)
adap->vres.ocq.start = val[4];
adap->vres.ocq.size = val[5] - val[4] + 1;
}
- if (c.iscsicaps) {
+ if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
params[1] = FW_PARAM_PFVF(ISCSI_END);
- ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
- val);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ params, val);
if (ret < 0)
goto bye;
adap->vres.iscsi.start = val[0];
@@ -3295,63 +4042,33 @@ static int adap_init0(struct adapter *adap)
}
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
+#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
- adap->params.nports = hweight32(port_vec);
- adap->params.portvec = port_vec;
- adap->flags |= FW_OK;
-
- /* These are finalized by FW initialization, load their values now */
+ /*
+ * These are finalized by FW initialization, load their values now.
+ */
v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+ adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
-#ifdef CONFIG_PCI_IOV
- /*
- * Provision resource limits for Virtual Functions. We currently
- * grant them all the same static resource limits except for the Port
- * Access Rights Mask which we're assigning based on the PF. All of
- * the static provisioning stuff for both the PF and VF really needs
- * to be managed in a persistent manner for each device which the
- * firmware controls.
- */
- {
- int pf, vf;
-
- for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
- if (num_vf[pf] <= 0)
- continue;
+ /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+ for (j = 0; j < NCHAN; j++)
+ adap->params.tp.tx_modq[j] = j;
- /* VF numbering starts at 1! */
- for (vf = 1; vf <= num_vf[pf]; vf++) {
- ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
- VFRES_NEQ, VFRES_NETHCTRL,
- VFRES_NIQFLINT, VFRES_NIQ,
- VFRES_TC, VFRES_NVI,
- FW_PFVF_CMD_CMASK_MASK,
- pfvfres_pmask(adap, pf, vf),
- VFRES_NEXACTF,
- VFRES_R_CAPS, VFRES_WX_CAPS);
- if (ret < 0)
- dev_warn(adap->pdev_dev, "failed to "
- "provision pf/vf=%d/%d; "
- "err=%d\n", pf, vf, ret);
- }
- }
- }
-#endif
-
- setup_memwin(adap);
+ adap->flags |= FW_OK;
return 0;
/*
- * If a command timed out or failed with EIO FW does not operate within
- * its spec or something catastrophic happened to HW/FW, stop issuing
- * commands.
+ * Something bad happened. If a command timed out or failed with EIO
+ * FW does not operate within its spec or something catastrophic
+ * happened to HW/FW, stop issuing commands.
*/
-bye: if (ret != -ETIMEDOUT && ret != -EIO)
- t4_fw_bye(adap, adap->fn);
+bye:
+ if (ret != -ETIMEDOUT && ret != -EIO)
+ t4_fw_bye(adap, adap->mbox);
return ret;
}
@@ -3453,7 +4170,7 @@ static void eeh_resume(struct pci_dev *pdev)
rtnl_unlock();
}
-static struct pci_error_handlers cxgb4_eeh = {
+static const struct pci_error_handlers cxgb4_eeh = {
.error_detected = eeh_err_detected,
.slot_reset = eeh_slot_reset,
.resume = eeh_resume,
@@ -3694,15 +4411,7 @@ static void __devinit print_port_info(const struct net_device *dev)
static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
{
- u16 v;
- int pos;
-
- pos = pci_pcie_cap(dev);
- if (pos > 0) {
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
- v |= PCI_EXP_DEVCTL_RELAX_EN;
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
- }
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
}
/*
@@ -3814,7 +4523,9 @@ static int __devinit init_one(struct pci_dev *pdev,
err = t4_prep_adapter(adapter);
if (err)
goto out_unmap_bar;
+ setup_memwin(adapter);
err = adap_init0(adapter);
+ setup_memwin_rdma(adapter);
if (err)
goto out_unmap_bar;
@@ -3956,8 +4667,11 @@ static void __devexit remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+#ifdef CONFIG_PCI_IOV
pci_disable_sriov(pdev);
+#endif
+
if (adapter) {
int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index d79980c5fc63..1b899fea1a91 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -100,6 +100,8 @@ struct tid_info {
unsigned int nftids;
unsigned int ftid_base;
+ unsigned int aftid_base;
+ unsigned int aftid_end;
spinlock_t atid_lock ____cacheline_aligned_in_smp;
union aopen_entry *afree;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index d49933ed551f..3ecc087d732d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -68,9 +68,6 @@
*/
#define RX_PKT_SKB_LEN 512
-/* Ethernet header padding prepended to RX_PKTs */
-#define RX_PKT_PAD 2
-
/*
* Max number of Tx descriptors we clean up at a time. Should be modest as
* freeing skbs isn't cheap and it happens while holding locks. We just need
@@ -137,13 +134,6 @@
*/
#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
-enum {
- /* packet alignment in FL buffers */
- FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
- /* egress status entry size */
- STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
-};
-
struct tx_sw_desc { /* SW state per Tx descriptor */
struct sk_buff *skb;
struct ulptx_sgl *sgl;
@@ -155,16 +145,57 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
};
/*
- * The low bits of rx_sw_desc.dma_addr have special meaning.
+ * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
+ * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
+ * We could easily support more but there doesn't seem to be much need for
+ * that ...
+ */
+#define FL_MTU_SMALL 1500
+#define FL_MTU_LARGE 9000
+
+static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
+ unsigned int mtu)
+{
+ struct sge *s = &adapter->sge;
+
+ return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
+}
+
+#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
+#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
+
+/*
+ * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
+ * these to specify the buffer size as an index into the SGE Free List Buffer
+ * Size register array. We also use bit 4, when the buffer has been unmapped
+ * for DMA, but this is of course never sent to the hardware and is only used
+ * to prevent double unmappings. All of the above requires that the Free List
+ * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
+ * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * Free List Buffer alignment is 32 bytes, this works out for us ...
*/
enum {
- RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */
- RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+ RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
+ RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
+ RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
+
+ /*
+ * XXX We shouldn't depend on being able to use these indices.
+ * XXX Especially when some other Master PF has initialized the
+ * XXX adapter or we use the Firmware Configuration File. We
+ * XXX should really search through the Host Buffer Size register
+ * XXX array for the appropriately sized buffer indices.
+ */
+ RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
+ RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
+
+ RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
+ RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
};
static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
{
- return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+ return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
}
static inline bool is_buf_mapped(const struct rx_sw_desc *d)
@@ -392,14 +423,35 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
}
}
-static inline int get_buf_size(const struct rx_sw_desc *d)
+static inline int get_buf_size(struct adapter *adapter,
+ const struct rx_sw_desc *d)
{
-#if FL_PG_ORDER > 0
- return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
- PAGE_SIZE;
-#else
- return PAGE_SIZE;
-#endif
+ struct sge *s = &adapter->sge;
+ unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
+ int buf_size;
+
+ switch (rx_buf_size_idx) {
+ case RX_SMALL_PG_BUF:
+ buf_size = PAGE_SIZE;
+ break;
+
+ case RX_LARGE_PG_BUF:
+ buf_size = PAGE_SIZE << s->fl_pg_order;
+ break;
+
+ case RX_SMALL_MTU_BUF:
+ buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
+ break;
+
+ case RX_LARGE_MTU_BUF:
+ buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+
+ return buf_size;
}
/**
@@ -418,7 +470,8 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
if (is_buf_mapped(d))
dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
- get_buf_size(d), PCI_DMA_FROMDEVICE);
+ get_buf_size(adap, d),
+ PCI_DMA_FROMDEVICE);
put_page(d->page);
d->page = NULL;
if (++q->cidx == q->size)
@@ -444,7 +497,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
if (is_buf_mapped(d))
dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
- get_buf_size(d), PCI_DMA_FROMDEVICE);
+ get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
d->page = NULL;
if (++q->cidx == q->size)
q->cidx = 0;
@@ -485,6 +538,7 @@ static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
gfp_t gfp)
{
+ struct sge *s = &adap->sge;
struct page *pg;
dma_addr_t mapping;
unsigned int cred = q->avail;
@@ -493,25 +547,27 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
gfp |= __GFP_NOWARN | __GFP_COLD;
-#if FL_PG_ORDER > 0
+ if (s->fl_pg_order == 0)
+ goto alloc_small_pages;
+
/*
* Prefer large buffers
*/
while (n) {
- pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
+ pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
if (unlikely(!pg)) {
q->large_alloc_failed++;
break; /* fall back to single pages */
}
mapping = dma_map_page(adap->pdev_dev, pg, 0,
- PAGE_SIZE << FL_PG_ORDER,
+ PAGE_SIZE << s->fl_pg_order,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
- __free_pages(pg, FL_PG_ORDER);
+ __free_pages(pg, s->fl_pg_order);
goto out; /* do not try small pages for this error */
}
- mapping |= RX_LARGE_BUF;
+ mapping |= RX_LARGE_PG_BUF;
*d++ = cpu_to_be64(mapping);
set_rx_sw_desc(sd, pg, mapping);
@@ -525,8 +581,8 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
}
n--;
}
-#endif
+alloc_small_pages:
while (n--) {
pg = __skb_alloc_page(gfp, NULL);
if (unlikely(!pg)) {
@@ -769,8 +825,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock);
if (!q->db_disabled) {
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
- V_QID(q->cntxt_id) | V_PIDX(n));
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(n));
}
q->db_pidx = q->pidx;
spin_unlock(&q->db_lock);
@@ -1519,6 +1575,8 @@ static noinline int handle_trace_pkt(struct adapter *adap,
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
const struct cpl_rx_pkt *pkt)
{
+ struct adapter *adapter = rxq->rspq.adap;
+ struct sge *s = &adapter->sge;
int ret;
struct sk_buff *skb;
@@ -1529,8 +1587,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
return;
}
- copy_frags(skb, gl, RX_PKT_PAD);
- skb->len = gl->tot_len - RX_PKT_PAD;
+ copy_frags(skb, gl, s->pktshift);
+ skb->len = gl->tot_len - s->pktshift;
skb->data_len = skb->len;
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1566,6 +1624,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sk_buff *skb;
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ struct sge *s = &q->adap->sge;
if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
return handle_trace_pkt(q->adap, si);
@@ -1585,7 +1644,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
return 0;
}
- __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
+ __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
if (skb->dev->features & NETIF_F_RXHASH)
@@ -1696,6 +1755,8 @@ static int process_responses(struct sge_rspq *q, int budget)
int budget_left = budget;
const struct rsp_ctrl *rc;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ struct adapter *adapter = q->adap;
+ struct sge *s = &adapter->sge;
while (likely(budget_left)) {
rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
@@ -1722,7 +1783,7 @@ static int process_responses(struct sge_rspq *q, int budget)
/* gather packet fragments */
for (frags = 0, fp = si.frags; ; frags++, fp++) {
rsd = &rxq->fl.sdesc[rxq->fl.cidx];
- bufsz = get_buf_size(rsd);
+ bufsz = get_buf_size(adapter, rsd);
fp->page = rsd->page;
fp->offset = q->offset;
fp->size = min(bufsz, len);
@@ -1747,7 +1808,7 @@ static int process_responses(struct sge_rspq *q, int budget)
si.nfrags = frags + 1;
ret = q->handler(q, q->cur_desc, &si);
if (likely(ret == 0))
- q->offset += ALIGN(fp->size, FL_ALIGN);
+ q->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&si, &rxq->fl, frags);
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1983,6 +2044,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
{
int ret, flsz = 0;
struct fw_iq_cmd c;
+ struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
/* Size needs to be multiple of 16, including status entry. */
@@ -2015,11 +2077,11 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
- &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
+ &fl->sdesc, s->stat_len, NUMA_NO_NODE);
if (!fl->desc)
goto fl_nomem;
- flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
+ flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
FW_IQ_CMD_FL0FETCHRO(1) |
FW_IQ_CMD_FL0DATARO(1) |
@@ -2096,14 +2158,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
{
int ret, nentries;
struct fw_eq_eth_cmd c;
+ struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
/* Add status entries */
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
netdev_queue_numa_node_read(netdevq));
if (!txq->q.desc)
return -ENOMEM;
@@ -2149,10 +2212,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
{
int ret, nentries;
struct fw_eq_ctrl_cmd c;
+ struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
/* Add status entries */
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
sizeof(struct tx_desc), 0, &txq->q.phys_addr,
@@ -2200,14 +2264,15 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
{
int ret, nentries;
struct fw_eq_ofld_cmd c;
+ struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
/* Add status entries */
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
NUMA_NO_NODE);
if (!txq->q.desc)
return -ENOMEM;
@@ -2251,8 +2316,10 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
static void free_txq(struct adapter *adap, struct sge_txq *q)
{
+ struct sge *s = &adap->sge;
+
dma_free_coherent(adap->pdev_dev,
- q->size * sizeof(struct tx_desc) + STAT_LEN,
+ q->size * sizeof(struct tx_desc) + s->stat_len,
q->desc, q->phys_addr);
q->cntxt_id = 0;
q->sdesc = NULL;
@@ -2262,6 +2329,7 @@ static void free_txq(struct adapter *adap, struct sge_txq *q)
static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
struct sge_fl *fl)
{
+ struct sge *s = &adap->sge;
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
@@ -2276,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
if (fl) {
free_rx_bufs(adap, fl, fl->avail);
- dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
+ dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
fl->desc, fl->addr);
kfree(fl->sdesc);
fl->sdesc = NULL;
@@ -2408,18 +2476,112 @@ void t4_sge_stop(struct adapter *adap)
* Performs SGE initialization needed every time after a chip reset.
* We do not initialize any of the queues here, instead the driver
* top-level must request them individually.
+ *
+ * Called in two different modes:
+ *
+ * 1. Perform actual hardware initialization and record hard-coded
+ * parameters which were used. This gets used when we're the
+ * Master PF and the Firmware Configuration File support didn't
+ * work for some reason.
+ *
+ * 2. We're not the Master PF or initialization was performed with
+ * a Firmware Configuration File. In this case we need to grab
+ * any of the SGE operating parameters that we need to have in
+ * order to do our job and make sure we can live with them ...
*/
-void t4_sge_init(struct adapter *adap)
+
+static int t4_sge_init_soft(struct adapter *adap)
{
- unsigned int i, v;
struct sge *s = &adap->sge;
- unsigned int fl_align_log = ilog2(FL_ALIGN);
+ u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
+ u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+ u32 ingress_rx_threshold;
- t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
- INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
- INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
- RXPKTCPLMODE |
- (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
+ /*
+ * Verify that CPL messages are going to the Ingress Queue for
+ * process_responses() and that only packet data is going to the
+ * Free Lists.
+ */
+ if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
+ RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Validate the Host Buffer Register Array indices that we want to
+ * use ...
+ *
+ * XXX Note that we should really read through the Host Buffer Size
+ * XXX register array and find the indices of the Buffer Sizes which
+ * XXX meet our needs!
+ */
+ #define READ_FL_BUF(x) \
+ t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
+
+ fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
+ fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
+ fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
+ fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+
+ #undef READ_FL_BUF
+
+ if (fl_small_pg != PAGE_SIZE ||
+ (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
+ (fl_large_pg & (fl_large_pg-1)) != 0))) {
+ dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+
+ if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
+ fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
+ dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
+ fl_small_mtu, fl_large_mtu);
+ return -EINVAL;
+ }
+
+ /*
+ * Retrieve our RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
+ timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
+ timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
+ s->timer_val[0] = core_ticks_to_us(adap,
+ TIMERVALUE0_GET(timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ TIMERVALUE1_GET(timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ TIMERVALUE2_GET(timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ TIMERVALUE3_GET(timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ TIMERVALUE4_GET(timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ TIMERVALUE5_GET(timer_value_4_and_5));
+
+ ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
+ s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
+ s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
+ s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
+ s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+
+ return 0;
+}
+
+static int t4_sge_init_hard(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+
+ /*
+ * Set up our basic SGE mode to deliver CPL messages to our Ingress
+ * Queue and Packet Date to the Free List.
+ */
+ t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
+ RXPKTCPLMODE_MASK);
/*
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
@@ -2433,13 +2595,24 @@ void t4_sge_init(struct adapter *adap)
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
F_ENABLE_DROP);
- for (i = v = 0; i < 32; i += 4)
- v |= (PAGE_SHIFT - 10) << i;
- t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
-#if FL_PG_ORDER > 0
- t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
-#endif
+ /*
+ * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
+ * t4_fixup_host_params().
+ */
+ s->fl_pg_order = FL_PG_ORDER;
+ if (s->fl_pg_order)
+ t4_write_reg(adap,
+ SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
+ PAGE_SIZE << FL_PG_ORDER);
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
+ FL_MTU_SMALL_BUFSIZE(adap));
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
+ FL_MTU_LARGE_BUFSIZE(adap));
+
+ /*
+ * Note that the SGE Ingress Packet Count Interrupt Threshold and
+ * Timer Holdoff values must be supplied by our caller.
+ */
t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
THRESHOLD_0(s->counter_val[0]) |
THRESHOLD_1(s->counter_val[1]) |
@@ -2449,14 +2622,54 @@ void t4_sge_init(struct adapter *adap)
TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
- TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
- TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
+ TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
+ TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
- TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
- TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
+ TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
+ TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
+
+ return 0;
+}
+
+int t4_sge_init(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ u32 sge_control;
+ int ret;
+
+ /*
+ * Ingress Padding Boundary and Egress Status Page Size are set up by
+ * t4_fixup_host_params().
+ */
+ sge_control = t4_read_reg(adap, SGE_CONTROL);
+ s->pktshift = PKTSHIFT_GET(sge_control);
+ s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
+ s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
+ X_INGPADBOUNDARY_SHIFT);
+
+ if (adap->flags & USING_SOFT_PARAMS)
+ ret = t4_sge_init_soft(adap);
+ else
+ ret = t4_sge_init_hard(adap);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ s->fl_starve_thres
+ = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
+
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
s->idma_state[0] = s->idma_state[1] = 0;
spin_lock_init(&s->intrq_lock);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fa947dfa4c30..35b81d8b59e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -120,6 +120,28 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
}
}
+/**
+ * t4_write_indirect - write indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect addresses
+ * @data_reg: register holding the value for the indirect registers
+ * @vals: values to write
+ * @nregs: how many indirect registers to write
+ * @start_idx: address of first indirect register to write
+ *
+ * Writes a sequential block of registers that are accessed indirectly
+ * through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx++);
+ t4_write_reg(adap, data_reg, *vals++);
+ }
+}
+
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
*/
@@ -330,6 +352,143 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
return 0;
}
+/*
+ * t4_mem_win_rw - read/write memory through PCIE memory window
+ * @adap: the adapter
+ * @addr: address of first byte requested
+ * @data: MEMWIN0_APERTURE bytes of data containing the requested address
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
+ * MEMWIN0_APERTURE-byte-aligned address that covers the requested
+ * address @addr.
+ */
+static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
+{
+ int i;
+
+ /*
+ * Setup offset into PCIE memory window. Address must be a
+ * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
+ * ensure that changes propagate before we attempt to use the new
+ * values.)
+ */
+ t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
+ addr & ~(MEMWIN0_APERTURE - 1));
+ t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
+
+ /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
+ for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
+ if (dir)
+ *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i));
+ else
+ t4_write_reg(adap, (MEMWIN0_BASE + i), *data++);
+ }
+
+ return 0;
+}
+
+/**
+ * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @adap: the adapter
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @addr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @buf: host memory buffer
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * from/to the firmware's memory. If this memory contains data
+ * structures which contain multi-byte integers, it's the callers
+ * responsibility to perform appropriate byte order conversions.
+ */
+static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
+ __be32 *buf, int dir)
+{
+ u32 pos, start, end, offset, memoffset;
+ int ret;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ /*
+ * Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2
+ */
+ memoffset = (mtype * (5 * 1024 * 1024));
+
+ /* Determine the PCIE_MEM_ACCESS_OFFSET */
+ addr = addr + memoffset;
+
+ /*
+ * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
+ * at a time so we need to round down the start and round up the end.
+ * We'll start copying out of the first line at (addr - start) a word
+ * at a time.
+ */
+ start = addr & ~(MEMWIN0_APERTURE-1);
+ end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
+ offset = (addr - start)/sizeof(__be32);
+
+ for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
+ __be32 data[MEMWIN0_APERTURE/sizeof(__be32)];
+
+ /*
+ * If we're writing, copy the data from the caller's memory
+ * buffer
+ */
+ if (!dir) {
+ /*
+ * If we're doing a partial write, then we need to do
+ * a read-modify-write ...
+ */
+ if (offset || len < MEMWIN0_APERTURE) {
+ ret = t4_mem_win_rw(adap, pos, data, 1);
+ if (ret)
+ return ret;
+ }
+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+ len > 0) {
+ data[offset++] = *buf++;
+ len -= sizeof(__be32);
+ }
+ }
+
+ /*
+ * Transfer a block of memory and bail if there's an error.
+ */
+ ret = t4_mem_win_rw(adap, pos, data, dir);
+ if (ret)
+ return ret;
+
+ /*
+ * If we're reading, copy the data into the caller's memory
+ * buffer.
+ */
+ if (dir)
+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+ len > 0) {
+ *buf++ = data[offset++];
+ len -= sizeof(__be32);
+ }
+ }
+
+ return 0;
+}
+
+int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
+ __be32 *buf)
+{
+ return t4_memory_rw(adap, mtype, addr, len, buf, 0);
+}
+
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0
#define VPD_LEN 512
@@ -355,8 +514,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
*
* Reads card parameters stored in VPD EEPROM.
*/
-static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
+ u32 cclk_param, cclk_val;
int i, ret;
int ec, sn;
u8 vpd[VPD_LEN], csum;
@@ -418,6 +578,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
+
+ /*
+ * Ask firmware for the Core Clock since it knows how to translate the
+ * Reference Clock ('V2') VPD field into a Core Clock value ...
+ */
+ cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ ret = t4_query_params(adapter, adapter->mbox, 0, 0,
+ 1, &cclk_param, &cclk_val);
+ if (ret)
+ return ret;
+ p->cclk = cclk_val;
+
return 0;
}
@@ -718,6 +891,77 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
}
/**
+ * t4_flash_cfg_addr - return the address of the flash configuration file
+ * @adapter: the adapter
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored.
+ */
+unsigned int t4_flash_cfg_addr(struct adapter *adapter)
+{
+ if (adapter->params.sf_size == 0x100000)
+ return FLASH_FPGA_CFG_START;
+ else
+ return FLASH_CFG_START;
+}
+
+/**
+ * t4_load_cfg - download config file
+ * @adap: the adapter
+ * @cfg_data: the cfg text file to write
+ * @size: text file size
+ *
+ * Write the supplied config text file to the card's serial flash.
+ */
+int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+{
+ int ret, i, n;
+ unsigned int addr;
+ unsigned int flash_cfg_start_sec;
+ unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+ addr = t4_flash_cfg_addr(adap);
+ flash_cfg_start_sec = addr / SF_SEC_SIZE;
+
+ if (size > FLASH_CFG_MAX_SIZE) {
+ dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
+ FLASH_CFG_MAX_SIZE);
+ return -EFBIG;
+ }
+
+ i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
+ sf_sec_size);
+ ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
+ flash_cfg_start_sec + i - 1);
+ /*
+ * If size == 0 then we're simply erasing the FLASH sectors associated
+ * with the on-adapter Firmware Configuration File.
+ */
+ if (ret || size == 0)
+ goto out;
+
+ /* this will write to the flash up to SF_PAGE_SIZE at a time */
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ if ((size - i) < SF_PAGE_SIZE)
+ n = size - i;
+ else
+ n = SF_PAGE_SIZE;
+ ret = t4_write_flash(adap, addr, n, cfg_data);
+ if (ret)
+ goto out;
+
+ addr += SF_PAGE_SIZE;
+ cfg_data += SF_PAGE_SIZE;
+ }
+
+out:
+ if (ret)
+ dev_err(adap->pdev_dev, "config file %s failed %d\n",
+ (size == 0 ? "clear" : "download"), ret);
+ return ret;
+}
+
+/**
* t4_load_fw - download firmware
* @adap: the adapter
* @fw_data: the firmware image to write
@@ -1018,9 +1262,9 @@ static void sge_intr_handler(struct adapter *adapter)
{ ERR_INVALID_CIDX_INC,
"SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
- { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
- { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
- { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
+ { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
+ { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
+ { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
@@ -1520,7 +1764,7 @@ void t4_intr_enable(struct adapter *adapter)
ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
- F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
+ DBFIFO_HP_INT | DBFIFO_LP_INT |
EGRESS_SIZE_ERR);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
@@ -1717,6 +1961,23 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
}
/**
+ * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ * @adap: the adapter
+ * @addr: the indirect TP register address
+ * @mask: specifies the field within the register to modify
+ * @val: new value for the field
+ *
+ * Sets a field of an indirect TP register to the given value.
+ */
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val)
+{
+ t4_write_reg(adap, TP_PIO_ADDR, addr);
+ val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
+ t4_write_reg(adap, TP_PIO_DATA, val);
+}
+
+/**
* init_cong_ctrl - initialize congestion control parameters
* @a: the alpha values for congestion control
* @b: the beta values for congestion control
@@ -2000,9 +2261,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_WRITE |
- V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
+ c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
c.cycles_to_len16 = htonl(FW_LEN16(c));
c.u.addrval.addr = htonl(addr);
c.u.addrval.val = htonl(val);
@@ -2033,8 +2294,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
return -EINVAL;
- t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15);
- t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET);
+ t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
+ t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
for (i = 0; i < len; i += 4)
*data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
@@ -2102,39 +2363,129 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
}
/**
- * t4_fw_hello - establish communication with FW
- * @adap: the adapter
- * @mbox: mailbox to use for the FW command
- * @evt_mbox: mailbox to receive async FW events
- * @master: specifies the caller's willingness to be the device master
- * @state: returns the current device state
+ * t4_fw_hello - establish communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @evt_mbox: mailbox to receive async FW events
+ * @master: specifies the caller's willingness to be the device master
+ * @state: returns the current device state (if non-NULL)
*
- * Issues a command to establish communication with FW.
+ * Issues a command to establish communication with FW. Returns either
+ * an error (negative integer) or the mailbox of the Master PF.
*/
int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state)
{
int ret;
struct fw_hello_cmd c;
+ u32 v;
+ unsigned int master_mbox;
+ int retries = FW_CMD_HELLO_RETRIES;
+retry:
+ memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
c.err_to_mbasyncnot = htonl(
FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
- FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
- FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
+ FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
+ FW_HELLO_CMD_MBMASTER_MASK) |
+ FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
+ FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
+ FW_HELLO_CMD_CLEARINIT);
+ /*
+ * Issue the HELLO command to the firmware. If it's not successful
+ * but indicates that we got a "busy" or "timeout" condition, retry
+ * the HELLO until we exhaust our retry limit.
+ */
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
- if (ret == 0 && state) {
- u32 v = ntohl(c.err_to_mbasyncnot);
- if (v & FW_HELLO_CMD_INIT)
- *state = DEV_STATE_INIT;
- else if (v & FW_HELLO_CMD_ERR)
+ if (ret < 0) {
+ if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
+ goto retry;
+ return ret;
+ }
+
+ v = ntohl(c.err_to_mbasyncnot);
+ master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
+ if (state) {
+ if (v & FW_HELLO_CMD_ERR)
*state = DEV_STATE_ERR;
+ else if (v & FW_HELLO_CMD_INIT)
+ *state = DEV_STATE_INIT;
else
*state = DEV_STATE_UNINIT;
}
- return ret;
+
+ /*
+ * If we're not the Master PF then we need to wait around for the
+ * Master PF Driver to finish setting up the adapter.
+ *
+ * Note that we also do this wait if we're a non-Master-capable PF and
+ * there is no current Master PF; a Master PF may show up momentarily
+ * and we wouldn't want to fail pointlessly. (This can happen when an
+ * OS loads lots of different drivers rapidly at the same time). In
+ * this case, the Master PF returned by the firmware will be
+ * FW_PCIE_FW_MASTER_MASK so the test below will work ...
+ */
+ if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
+ master_mbox != mbox) {
+ int waiting = FW_CMD_HELLO_TIMEOUT;
+
+ /*
+ * Wait for the firmware to either indicate an error or
+ * initialized state. If we see either of these we bail out
+ * and report the issue to the caller. If we exhaust the
+ * "hello timeout" and we haven't exhausted our retries, try
+ * again. Otherwise bail with a timeout error.
+ */
+ for (;;) {
+ u32 pcie_fw;
+
+ msleep(50);
+ waiting -= 50;
+
+ /*
+ * If neither Error nor Initialialized are indicated
+ * by the firmware keep waiting till we exaust our
+ * timeout ... and then retry if we haven't exhausted
+ * our retries ...
+ */
+ pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
+ if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
+ if (waiting <= 0) {
+ if (retries-- > 0)
+ goto retry;
+
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ /*
+ * We either have an Error or Initialized condition
+ * report errors preferentially.
+ */
+ if (state) {
+ if (pcie_fw & FW_PCIE_FW_ERR)
+ *state = DEV_STATE_ERR;
+ else if (pcie_fw & FW_PCIE_FW_INIT)
+ *state = DEV_STATE_INIT;
+ }
+
+ /*
+ * If we arrived before a Master PF was selected and
+ * there's not a valid Master PF, grab its identity
+ * for our caller.
+ */
+ if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
+ (pcie_fw & FW_PCIE_FW_MASTER_VLD))
+ master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
+ break;
+ }
+ }
+
+ return master_mbox;
}
/**
@@ -2186,6 +2537,334 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
}
/**
+ * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @force: force uP into RESET even if FW RESET command fails
+ *
+ * Issues a RESET command to firmware (if desired) with a HALT indication
+ * and then puts the microprocessor into RESET state. The RESET command
+ * will only be issued if a legitimate mailbox is provided (mbox <=
+ * FW_PCIE_FW_MASTER_MASK).
+ *
+ * This is generally used in order for the host to safely manipulate the
+ * adapter without fear of conflicting with whatever the firmware might
+ * be doing. The only way out of this state is to RESTART the firmware
+ * ...
+ */
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+{
+ int ret = 0;
+
+ /*
+ * If a legitimate mailbox is provided, issue a RESET command
+ * with a HALT indication.
+ */
+ if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+ struct fw_reset_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, RESET, WRITE);
+ c.val = htonl(PIORST | PIORSTMODE);
+ c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ }
+
+ /*
+ * Normally we won't complete the operation if the firmware RESET
+ * command fails but if our caller insists we'll go ahead and put the
+ * uP into RESET. This can be useful if the firmware is hung or even
+ * missing ... We'll have to take the risk of putting the uP into
+ * RESET without the cooperation of firmware in that case.
+ *
+ * We also force the firmware's HALT flag to be on in case we bypassed
+ * the firmware RESET command above or we're dealing with old firmware
+ * which doesn't have the HALT capability. This will serve as a flag
+ * for the incoming firmware to know that it's coming out of a HALT
+ * rather than a RESET ... if it's new enough to understand that ...
+ */
+ if (ret == 0 || force) {
+ t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
+ t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
+ FW_PCIE_FW_HALT);
+ }
+
+ /*
+ * And we always return the result of the firmware RESET command
+ * even when we force the uP into RESET ...
+ */
+ return ret;
+}
+
+/**
+ * t4_fw_restart - restart the firmware by taking the uP out of RESET
+ * @adap: the adapter
+ * @reset: if we want to do a RESET to restart things
+ *
+ * Restart firmware previously halted by t4_fw_halt(). On successful
+ * return the previous PF Master remains as the new PF Master and there
+ * is no need to issue a new HELLO command, etc.
+ *
+ * We do this in two ways:
+ *
+ * 1. If we're dealing with newer firmware we'll simply want to take
+ * the chip's microprocessor out of RESET. This will cause the
+ * firmware to start up from its start vector. And then we'll loop
+ * until the firmware indicates it's started again (PCIE_FW.HALT
+ * reset to 0) or we timeout.
+ *
+ * 2. If we're dealing with older firmware then we'll need to RESET
+ * the chip since older firmware won't recognize the PCIE_FW.HALT
+ * flag and automatically RESET itself on startup.
+ */
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+{
+ if (reset) {
+ /*
+ * Since we're directing the RESET instead of the firmware
+ * doing it automatically, we need to clear the PCIE_FW.HALT
+ * bit.
+ */
+ t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
+
+ /*
+ * If we've been given a valid mailbox, first try to get the
+ * firmware to do the RESET. If that works, great and we can
+ * return success. Otherwise, if we haven't been given a
+ * valid mailbox or the RESET command failed, fall back to
+ * hitting the chip with a hammer.
+ */
+ if (mbox <= FW_PCIE_FW_MASTER_MASK) {
+ t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+ msleep(100);
+ if (t4_fw_reset(adap, mbox,
+ PIORST | PIORSTMODE) == 0)
+ return 0;
+ }
+
+ t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
+ msleep(2000);
+ } else {
+ int ms;
+
+ t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
+ for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+ if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
+ return 0;
+ msleep(100);
+ ms += 100;
+ }
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @fw_data: the firmware image to write
+ * @size: image size
+ * @force: force upgrade even if firmware doesn't cooperate
+ *
+ * Perform all of the steps necessary for upgrading an adapter's
+ * firmware image. Normally this requires the cooperation of the
+ * existing firmware in order to halt all existing activities
+ * but if an invalid mailbox token is passed in we skip that step
+ * (though we'll still put the adapter microprocessor into RESET in
+ * that case).
+ *
+ * On successful return the new firmware will have been loaded and
+ * the adapter will have been fully RESET losing all previous setup
+ * state. On unsuccessful return the adapter may be completely hosed ...
+ * positive errno indicates that the adapter is ~probably~ intact, a
+ * negative errno indicates that things are looking bad ...
+ */
+int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
+ const u8 *fw_data, unsigned int size, int force)
+{
+ const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
+ int reset, ret;
+
+ ret = t4_fw_halt(adap, mbox, force);
+ if (ret < 0 && !force)
+ return ret;
+
+ ret = t4_load_fw(adap, fw_data, size);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Older versions of the firmware don't understand the new
+ * PCIE_FW.HALT flag and so won't know to perform a RESET when they
+ * restart. So for newly loaded older firmware we'll have to do the
+ * RESET for it so it starts up on a clean slate. We can tell if
+ * the newly loaded firmware will handle this right by checking
+ * its header flags to see if it advertises the capability.
+ */
+ reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+ return t4_fw_restart(adap, mbox, reset);
+}
+
+
+/**
+ * t4_fw_config_file - setup an adapter via a Configuration File
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @mtype: the memory type where the Configuration File is located
+ * @maddr: the memory address where the Configuration File is located
+ * @finiver: return value for CF [fini] version
+ * @finicsum: return value for CF [fini] checksum
+ * @cfcsum: return value for CF computed checksum
+ *
+ * Issue a command to get the firmware to process the Configuration
+ * File located at the specified mtype/maddress. If the Configuration
+ * File is processed successfully and return value pointers are
+ * provided, the Configuration File "[fini] section version and
+ * checksum values will be returned along with the computed checksum.
+ * It's up to the caller to decide how it wants to respond to the
+ * checksums not matching but it recommended that a prominant warning
+ * be emitted in order to help people rapidly identify changed or
+ * corrupted Configuration Files.
+ *
+ * Also note that it's possible to modify things like "niccaps",
+ * "toecaps",etc. between processing the Configuration File and telling
+ * the firmware to use the new configuration. Callers which want to
+ * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
+ * Configuration Files if they want to do this.
+ */
+int t4_fw_config_file(struct adapter *adap, unsigned int mbox,
+ unsigned int mtype, unsigned int maddr,
+ u32 *finiver, u32 *finicsum, u32 *cfcsum)
+{
+ struct fw_caps_config_cmd caps_cmd;
+ int ret;
+
+ /*
+ * Tell the firmware to process the indicated Configuration File.
+ * If there are no errors and the caller has provided return value
+ * pointers for the [fini] section version, checksum and computed
+ * checksum, pass those back to the caller.
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ caps_cmd.retval_len16 =
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd);
+ if (ret < 0)
+ return ret;
+
+ if (finiver)
+ *finiver = ntohl(caps_cmd.finiver);
+ if (finicsum)
+ *finicsum = ntohl(caps_cmd.finicsum);
+ if (cfcsum)
+ *cfcsum = ntohl(caps_cmd.cfcsum);
+
+ /*
+ * And now tell the firmware to use the configuration we just loaded.
+ */
+ caps_cmd.op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE);
+ caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
+ return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL);
+}
+
+/**
+ * t4_fixup_host_params - fix up host-dependent parameters
+ * @adap: the adapter
+ * @page_size: the host's Base Page Size
+ * @cache_line_size: the host's Cache Line Size
+ *
+ * Various registers in T4 contain values which are dependent on the
+ * host's Base Page and Cache Line Sizes. This function will fix all of
+ * those registers with the appropriate values as passed in ...
+ */
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size)
+{
+ unsigned int page_shift = fls(page_size) - 1;
+ unsigned int sge_hps = page_shift - 10;
+ unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+ unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+ unsigned int fl_align_log = fls(fl_align) - 1;
+
+ t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
+ HOSTPAGESIZEPF0(sge_hps) |
+ HOSTPAGESIZEPF1(sge_hps) |
+ HOSTPAGESIZEPF2(sge_hps) |
+ HOSTPAGESIZEPF3(sge_hps) |
+ HOSTPAGESIZEPF4(sge_hps) |
+ HOSTPAGESIZEPF5(sge_hps) |
+ HOSTPAGESIZEPF6(sge_hps) |
+ HOSTPAGESIZEPF7(sge_hps));
+
+ t4_set_reg_field(adap, SGE_CONTROL,
+ INGPADBOUNDARY(INGPADBOUNDARY_MASK) |
+ EGRSTATUSPAGESIZE_MASK,
+ INGPADBOUNDARY(fl_align_log - 5) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+
+ /*
+ * Adjust various SGE Free List Host Buffer Sizes.
+ *
+ * This is something of a crock since we're using fixed indices into
+ * the array which are also known by the sge.c code and the T4
+ * Firmware Configuration File. We need to come up with a much better
+ * approach to managing this array. For now, the first four entries
+ * are:
+ *
+ * 0: Host Page Size
+ * 1: 64KB
+ * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
+ * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
+ *
+ * For the single-MTU buffers in unpacked mode we need to include
+ * space for the SGE Control Packet Shift, 14 byte Ethernet header,
+ * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
+ * Padding boundry. All of these are accommodated in the Factory
+ * Default Firmware Configuration File but we need to adjust it for
+ * this host's cache line size.
+ */
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
+ (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
+ & ~(fl_align-1));
+ t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
+ (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
+ & ~(fl_align-1));
+
+ t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
+
+ return 0;
+}
+
+/**
+ * t4_fw_initialize - ask FW to initialize the device
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to FW to partially initialize the device. This
+ * performs initialization that generally doesn't depend on user input.
+ */
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_initialize_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, INITIALIZE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
* t4_query_params - query FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -2741,11 +3420,9 @@ static void __devinit get_pci_mode(struct adapter *adapter,
struct pci_params *p)
{
u16 val;
- u32 pcie_cap = pci_pcie_cap(adapter->pdev);
- if (pcie_cap) {
- pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
- &val);
+ if (pci_is_pcie(adapter->pdev)) {
+ pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
p->speed = val & PCI_EXP_LNKSTA_CLS;
p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
}
@@ -2837,10 +3514,6 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
return ret;
}
- ret = get_vpd_params(adapter, &adapter->params.vpd);
- if (ret < 0)
- return ret;
-
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
@@ -2848,6 +3521,7 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
*/
adapter->params.nports = 1;
adapter->params.portvec = 1;
+ adapter->params.vpd.cclk = 50000;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c26b455f37de..f534ed7e10e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -58,6 +58,7 @@ enum {
enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
};
enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
@@ -137,4 +138,83 @@ struct rsp_ctrl {
#define QINTR_CNT_EN 0x1
#define QINTR_TIMER_IDX(x) ((x) << 1)
#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start) ((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+
+enum {
+ /*
+ * Various Expansion-ROM boot images, etc.
+ */
+ FLASH_EXP_ROM_START_SEC = 0,
+ FLASH_EXP_ROM_NSECS = 6,
+ FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
+ FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+
+ /*
+ * iSCSI Boot Firmware Table (iBFT) and other driver-related
+ * parameters ...
+ */
+ FLASH_IBFT_START_SEC = 6,
+ FLASH_IBFT_NSECS = 1,
+ FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
+ FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
+
+ /*
+ * Boot configuration data.
+ */
+ FLASH_BOOTCFG_START_SEC = 7,
+ FLASH_BOOTCFG_NSECS = 1,
+ FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
+ FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
+
+ /*
+ * Location of firmware image in FLASH.
+ */
+ FLASH_FW_START_SEC = 8,
+ FLASH_FW_NSECS = 8,
+ FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+ FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+ /*
+ * iSCSI persistent/crash information.
+ */
+ FLASH_ISCSI_CRASH_START_SEC = 29,
+ FLASH_ISCSI_CRASH_NSECS = 1,
+ FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
+ FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
+
+ /*
+ * FCoE persistent/crash information.
+ */
+ FLASH_FCOE_CRASH_START_SEC = 30,
+ FLASH_FCOE_CRASH_NSECS = 1,
+ FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
+ FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
+
+ /*
+ * Location of Firmware Configuration File in FLASH. Since the FPGA
+ * "FLASH" is smaller we need to store the Configuration File in a
+ * different location -- which will overlap the end of the firmware
+ * image if firmware ever gets that large ...
+ */
+ FLASH_CFG_START_SEC = 31,
+ FLASH_CFG_NSECS = 1,
+ FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
+ FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+
+ FLASH_FPGA_CFG_START_SEC = 15,
+ FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC),
+
+ /*
+ * Sectors 32-63 are reserved for FLASH failover.
+ */
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 111fc323f155..a1a8b57200f6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -86,10 +86,17 @@
#define CIDXINC_SHIFT 0
#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
+#define X_RXPKTCPLMODE_SPLIT 1
+#define X_INGPADBOUNDARY_SHIFT 5
+
#define SGE_CONTROL 0x1008
#define DCASYSTYPE 0x00080000U
-#define RXPKTCPLMODE 0x00040000U
-#define EGRSTATUSPAGESIZE 0x00020000U
+#define RXPKTCPLMODE_MASK 0x00040000U
+#define RXPKTCPLMODE_SHIFT 18
+#define RXPKTCPLMODE(x) ((x) << RXPKTCPLMODE_SHIFT)
+#define EGRSTATUSPAGESIZE_MASK 0x00020000U
+#define EGRSTATUSPAGESIZE_SHIFT 17
+#define EGRSTATUSPAGESIZE(x) ((x) << EGRSTATUSPAGESIZE_SHIFT)
#define PKTSHIFT_MASK 0x00001c00U
#define PKTSHIFT_SHIFT 10
#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
@@ -108,6 +115,35 @@
#define GLOBALENABLE 0x00000001U
#define SGE_HOST_PAGE_SIZE 0x100c
+
+#define HOSTPAGESIZEPF7_MASK 0x0000000fU
+#define HOSTPAGESIZEPF7_SHIFT 28
+#define HOSTPAGESIZEPF7(x) ((x) << HOSTPAGESIZEPF7_SHIFT)
+
+#define HOSTPAGESIZEPF6_MASK 0x0000000fU
+#define HOSTPAGESIZEPF6_SHIFT 24
+#define HOSTPAGESIZEPF6(x) ((x) << HOSTPAGESIZEPF6_SHIFT)
+
+#define HOSTPAGESIZEPF5_MASK 0x0000000fU
+#define HOSTPAGESIZEPF5_SHIFT 20
+#define HOSTPAGESIZEPF5(x) ((x) << HOSTPAGESIZEPF5_SHIFT)
+
+#define HOSTPAGESIZEPF4_MASK 0x0000000fU
+#define HOSTPAGESIZEPF4_SHIFT 16
+#define HOSTPAGESIZEPF4(x) ((x) << HOSTPAGESIZEPF4_SHIFT)
+
+#define HOSTPAGESIZEPF3_MASK 0x0000000fU
+#define HOSTPAGESIZEPF3_SHIFT 12
+#define HOSTPAGESIZEPF3(x) ((x) << HOSTPAGESIZEPF3_SHIFT)
+
+#define HOSTPAGESIZEPF2_MASK 0x0000000fU
+#define HOSTPAGESIZEPF2_SHIFT 8
+#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT)
+
+#define HOSTPAGESIZEPF1_MASK 0x0000000fU
+#define HOSTPAGESIZEPF1_SHIFT 4
+#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_SHIFT)
+
#define HOSTPAGESIZEPF0_MASK 0x0000000fU
#define HOSTPAGESIZEPF0_SHIFT 0
#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
@@ -155,6 +191,8 @@
#define SGE_INT_ENABLE3 0x1040
#define SGE_FL_BUFFER_SIZE0 0x1044
#define SGE_FL_BUFFER_SIZE1 0x1048
+#define SGE_FL_BUFFER_SIZE2 0x104c
+#define SGE_FL_BUFFER_SIZE3 0x1050
#define SGE_INGRESS_RX_THRESHOLD 0x10a0
#define THRESHOLD_0_MASK 0x3f000000U
#define THRESHOLD_0_SHIFT 24
@@ -173,6 +211,12 @@
#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
+#define SGE_CONM_CTRL 0x1094
+#define EGRTHRESHOLD_MASK 0x00003f00U
+#define EGRTHRESHOLDshift 8
+#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
+#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
+
#define SGE_TIMER_VALUE_0_AND_1 0x10b8
#define TIMERVALUE0_MASK 0xffff0000U
#define TIMERVALUE0_SHIFT 16
@@ -184,64 +228,54 @@
#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
#define SGE_TIMER_VALUE_2_AND_3 0x10bc
+#define TIMERVALUE2_MASK 0xffff0000U
+#define TIMERVALUE2_SHIFT 16
+#define TIMERVALUE2(x) ((x) << TIMERVALUE2_SHIFT)
+#define TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
+#define TIMERVALUE3_MASK 0x0000ffffU
+#define TIMERVALUE3_SHIFT 0
+#define TIMERVALUE3(x) ((x) << TIMERVALUE3_SHIFT)
+#define TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
+
#define SGE_TIMER_VALUE_4_AND_5 0x10c0
+#define TIMERVALUE4_MASK 0xffff0000U
+#define TIMERVALUE4_SHIFT 16
+#define TIMERVALUE4(x) ((x) << TIMERVALUE4_SHIFT)
+#define TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
+#define TIMERVALUE5_MASK 0x0000ffffU
+#define TIMERVALUE5_SHIFT 0
+#define TIMERVALUE5(x) ((x) << TIMERVALUE5_SHIFT)
+#define TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
+
#define SGE_DEBUG_INDEX 0x10cc
#define SGE_DEBUG_DATA_HIGH 0x10d0
#define SGE_DEBUG_DATA_LOW 0x10d4
#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
-#define S_LP_INT_THRESH 12
-#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
#define S_HP_INT_THRESH 28
+#define M_HP_INT_THRESH 0xfU
#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
+#define M_HP_COUNT 0x7ffU
+#define S_HP_COUNT 16
+#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
+#define S_LP_INT_THRESH 12
+#define M_LP_INT_THRESH 0xfU
+#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
+#define M_LP_COUNT 0x7ffU
+#define S_LP_COUNT 0
+#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
#define A_SGE_DBFIFO_STATUS 0x10a4
#define S_ENABLE_DROP 13
#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
#define F_ENABLE_DROP V_ENABLE_DROP(1U)
-#define A_SGE_DOORBELL_CONTROL 0x10a8
-
-#define A_SGE_CTXT_CMD 0x11fc
-#define A_SGE_DBQ_CTXT_BADDR 0x1084
-
-#define A_SGE_PF_KDOORBELL 0x0
-
-#define S_QID 15
-#define V_QID(x) ((x) << S_QID)
-
-#define S_PIDX 0
-#define V_PIDX(x) ((x) << S_PIDX)
-
-#define M_LP_COUNT 0x7ffU
-#define S_LP_COUNT 0
-#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
-
-#define M_HP_COUNT 0x7ffU
-#define S_HP_COUNT 16
-#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
-
-#define A_SGE_INT_ENABLE3 0x1040
-
-#define S_DBFIFO_HP_INT 8
-#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
-#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
-
-#define S_DBFIFO_LP_INT 7
-#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
-#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
-
#define S_DROPPED_DB 0
#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
#define F_DROPPED_DB V_DROPPED_DB(1U)
+#define A_SGE_DOORBELL_CONTROL 0x10a8
-#define S_ERR_DROPPED_DB 18
-#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
-#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
-
-#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
-
-#define M_HP_INT_THRESH 0xfU
-#define M_LP_INT_THRESH 0xfU
+#define A_SGE_CTXT_CMD 0x11fc
+#define A_SGE_DBQ_CTXT_BADDR 0x1084
#define PCIE_PF_CLI 0x44
#define PCIE_INT_CAUSE 0x3004
@@ -287,6 +321,8 @@
#define WINDOW(x) ((x) << WINDOW_SHIFT)
#define PCIE_MEM_ACCESS_OFFSET 0x306c
+#define PCIE_FW 0x30b8
+
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
#define RNPP 0x80000000U
#define RPCP 0x20000000U
@@ -364,7 +400,7 @@
#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
#define MEM_WRAP_CLIENT_NUM_SHIFT 0
#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
-
+#define MA_PCIE_FW 0x30b8
#define MA_PARITY_ERROR_STATUS 0x77f4
#define EDC_0_BASE_ADDR 0x7900
@@ -385,6 +421,7 @@
#define CIM_BOOT_CFG 0x7b00
#define BOOTADDR_MASK 0xffffff00U
+#define UPCRST 0x1U
#define CIM_PF_MAILBOX_DATA 0x240
#define CIM_PF_MAILBOX_CTRL 0x280
@@ -457,6 +494,13 @@
#define VLANEXTENABLE_MASK 0x0000f000U
#define VLANEXTENABLE_SHIFT 12
+#define TP_GLOBAL_CONFIG 0x7d08
+#define FIVETUPLELOOKUP_SHIFT 17
+#define FIVETUPLELOOKUP_MASK 0x00060000U
+#define FIVETUPLELOOKUP(x) ((x) << FIVETUPLELOOKUP_SHIFT)
+#define FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \
+ FIVETUPLELOOKUP_SHIFT)
+
#define TP_PARA_REG2 0x7d68
#define MAXRXDATA_MASK 0xffff0000U
#define MAXRXDATA_SHIFT 16
@@ -466,8 +510,47 @@
#define TIMERRESOLUTION_MASK 0x00ff0000U
#define TIMERRESOLUTION_SHIFT 16
#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
+#define DELAYEDACKRESOLUTION_MASK 0x000000ffU
+#define DELAYEDACKRESOLUTION_SHIFT 0
+#define DELAYEDACKRESOLUTION_GET(x) \
+ (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT)
#define TP_SHIFT_CNT 0x7dc0
+#define SYNSHIFTMAX_SHIFT 24
+#define SYNSHIFTMAX_MASK 0xff000000U
+#define SYNSHIFTMAX(x) ((x) << SYNSHIFTMAX_SHIFT)
+#define SYNSHIFTMAX_GET(x) (((x) & SYNSHIFTMAX_MASK) >> \
+ SYNSHIFTMAX_SHIFT)
+#define RXTSHIFTMAXR1_SHIFT 20
+#define RXTSHIFTMAXR1_MASK 0x00f00000U
+#define RXTSHIFTMAXR1(x) ((x) << RXTSHIFTMAXR1_SHIFT)
+#define RXTSHIFTMAXR1_GET(x) (((x) & RXTSHIFTMAXR1_MASK) >> \
+ RXTSHIFTMAXR1_SHIFT)
+#define RXTSHIFTMAXR2_SHIFT 16
+#define RXTSHIFTMAXR2_MASK 0x000f0000U
+#define RXTSHIFTMAXR2(x) ((x) << RXTSHIFTMAXR2_SHIFT)
+#define RXTSHIFTMAXR2_GET(x) (((x) & RXTSHIFTMAXR2_MASK) >> \
+ RXTSHIFTMAXR2_SHIFT)
+#define PERSHIFTBACKOFFMAX_SHIFT 12
+#define PERSHIFTBACKOFFMAX_MASK 0x0000f000U
+#define PERSHIFTBACKOFFMAX(x) ((x) << PERSHIFTBACKOFFMAX_SHIFT)
+#define PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \
+ PERSHIFTBACKOFFMAX_SHIFT)
+#define PERSHIFTMAX_SHIFT 8
+#define PERSHIFTMAX_MASK 0x00000f00U
+#define PERSHIFTMAX(x) ((x) << PERSHIFTMAX_SHIFT)
+#define PERSHIFTMAX_GET(x) (((x) & PERSHIFTMAX_MASK) >> \
+ PERSHIFTMAX_SHIFT)
+#define KEEPALIVEMAXR1_SHIFT 4
+#define KEEPALIVEMAXR1_MASK 0x000000f0U
+#define KEEPALIVEMAXR1(x) ((x) << KEEPALIVEMAXR1_SHIFT)
+#define KEEPALIVEMAXR1_GET(x) (((x) & KEEPALIVEMAXR1_MASK) >> \
+ KEEPALIVEMAXR1_SHIFT)
+#define KEEPALIVEMAXR2_SHIFT 0
+#define KEEPALIVEMAXR2_MASK 0x0000000fU
+#define KEEPALIVEMAXR2(x) ((x) << KEEPALIVEMAXR2_SHIFT)
+#define KEEPALIVEMAXR2_GET(x) (((x) & KEEPALIVEMAXR2_MASK) >> \
+ KEEPALIVEMAXR2_SHIFT)
#define TP_CCTRL_TABLE 0x7ddc
#define TP_MTU_TABLE 0x7de4
@@ -501,6 +584,20 @@
#define TP_INT_CAUSE 0x7e74
#define FLMTXFLSTEMPTY 0x40000000U
+#define TP_VLAN_PRI_MAP 0x140
+#define FRAGMENTATION_SHIFT 9
+#define FRAGMENTATION_MASK 0x00000200U
+#define MPSHITTYPE_MASK 0x00000100U
+#define MACMATCH_MASK 0x00000080U
+#define ETHERTYPE_MASK 0x00000040U
+#define PROTOCOL_MASK 0x00000020U
+#define TOS_MASK 0x00000010U
+#define VLAN_MASK 0x00000008U
+#define VNIC_ID_MASK 0x00000004U
+#define PORT_MASK 0x00000002U
+#define FCOE_SHIFT 0
+#define FCOE_MASK 0x00000001U
+
#define TP_INGRESS_CONFIG 0x141
#define VNIC 0x00000800U
#define CSUM_HAS_PSEUDO_HDR 0x00000400U
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ad53f796b574..a6364632b490 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -79,6 +79,8 @@ struct fw_wr_hdr {
#define FW_WR_FLOWID(x) ((x) << 8)
#define FW_WR_LEN16(x) ((x) << 0)
+#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
+
struct fw_ulptx_wr {
__be32 op_to_compl;
__be32 flowid_len16;
@@ -155,6 +157,17 @@ struct fw_eth_tx_pkt_vm_wr {
#define FW_CMD_MAX_TIMEOUT 3000
+/*
+ * If a host driver does a HELLO and discovers that there's already a MASTER
+ * selected, we may have to wait for that MASTER to finish issuing RESET,
+ * configuration and INITIALIZE commands. Also, there's a possibility that
+ * our own HELLO may get lost if it happens right as the MASTER is issuign a
+ * RESET command, so we need to be willing to make a few retries of our HELLO.
+ */
+#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT)
+#define FW_CMD_HELLO_RETRIES 3
+
+
enum fw_cmd_opcodes {
FW_LDST_CMD = 0x01,
FW_RESET_CMD = 0x03,
@@ -304,7 +317,17 @@ struct fw_reset_cmd {
__be32 op_to_write;
__be32 retval_len16;
__be32 val;
- __be32 r3;
+ __be32 halt_pkd;
+};
+
+#define FW_RESET_CMD_HALT_SHIFT 31
+#define FW_RESET_CMD_HALT_MASK 0x1
+#define FW_RESET_CMD_HALT(x) ((x) << FW_RESET_CMD_HALT_SHIFT)
+#define FW_RESET_CMD_HALT_GET(x) \
+ (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK)
+
+enum fw_hellow_cmd {
+ fw_hello_cmd_stage_os = 0x0
};
struct fw_hello_cmd {
@@ -315,8 +338,14 @@ struct fw_hello_cmd {
#define FW_HELLO_CMD_INIT (1U << 30)
#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
-#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24)
+#define FW_HELLO_CMD_MBMASTER_MASK 0xfU
+#define FW_HELLO_CMD_MBMASTER_SHIFT 24
+#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT)
+#define FW_HELLO_CMD_MBMASTER_GET(x) \
+ (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK)
#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
+#define FW_HELLO_CMD_STAGE(x) ((x) << 17)
+#define FW_HELLO_CMD_CLEARINIT (1U << 16)
__be32 fwrev;
};
@@ -401,6 +430,14 @@ enum fw_caps_config_fcoe {
FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
};
+enum fw_memtype_cf {
+ FW_MEMTYPE_CF_EDC0 = 0x0,
+ FW_MEMTYPE_CF_EDC1 = 0x1,
+ FW_MEMTYPE_CF_EXTMEM = 0x2,
+ FW_MEMTYPE_CF_FLASH = 0x4,
+ FW_MEMTYPE_CF_INTERNAL = 0x5,
+};
+
struct fw_caps_config_cmd {
__be32 op_to_write;
__be32 retval_len16;
@@ -416,10 +453,15 @@ struct fw_caps_config_cmd {
__be16 r4;
__be16 iscsicaps;
__be16 fcoecaps;
- __be32 r5;
- __be64 r6;
+ __be32 cfcsum;
+ __be32 finiver;
+ __be32 finicsum;
};
+#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27)
+#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24)
+#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16)
+
/*
* params command mnemonics
*/
@@ -451,6 +493,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
+ FW_PARAMS_PARAM_DEV_CF = 0x0D,
};
/*
@@ -492,6 +535,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
+ FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
+ FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E
};
/*
@@ -507,8 +552,16 @@ enum fw_params_param_dmaq {
#define FW_PARAMS_MNEM(x) ((x) << 24)
#define FW_PARAMS_PARAM_X(x) ((x) << 16)
-#define FW_PARAMS_PARAM_Y(x) ((x) << 8)
-#define FW_PARAMS_PARAM_Z(x) ((x) << 0)
+#define FW_PARAMS_PARAM_Y_SHIFT 8
+#define FW_PARAMS_PARAM_Y_MASK 0xffU
+#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT)
+#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\
+ FW_PARAMS_PARAM_Y_MASK)
+#define FW_PARAMS_PARAM_Z_SHIFT 0
+#define FW_PARAMS_PARAM_Z_MASK 0xffu
+#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT)
+#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\
+ FW_PARAMS_PARAM_Z_MASK)
#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
@@ -1599,6 +1652,16 @@ struct fw_debug_cmd {
} u;
};
+#define FW_PCIE_FW_ERR (1U << 31)
+#define FW_PCIE_FW_INIT (1U << 30)
+#define FW_PCIE_FW_HALT (1U << 29)
+#define FW_PCIE_FW_MASTER_VLD (1U << 15)
+#define FW_PCIE_FW_MASTER_MASK 0x7
+#define FW_PCIE_FW_MASTER_SHIFT 12
+#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
+#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
+ FW_PCIE_FW_MASTER_MASK)
+
struct fw_hdr {
u8 ver;
u8 reserved1;
@@ -1613,7 +1676,11 @@ struct fw_hdr {
u8 intfver_iscsi;
u8 intfver_fcoe;
u8 reserved2;
- __be32 reserved3[27];
+ __u32 reserved3;
+ __u32 reserved4;
+ __u32 reserved5;
+ __be32 flags;
+ __be32 reserved6[23];
};
#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
@@ -1621,18 +1688,8 @@ struct fw_hdr {
#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
-#define S_FW_CMD_OP 24
-#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP)
-
-#define S_FW_CMD_REQUEST 23
-#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
-#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
-
-#define S_FW_CMD_WRITE 21
-#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
-#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
-
-#define S_FW_LDST_CMD_ADDRSPACE 0
-#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+enum fw_hdr_flags {
+ FW_HDR_FLAGS_RESET_HALT = 0x00000001,
+};
#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8877fbfefb63..f16745f4b36b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2421,7 +2421,7 @@ int t4vf_sge_init(struct adapter *adapter)
fl0, fl1);
return -EINVAL;
}
- if ((sge_params->sge_control & RXPKTCPLMODE) == 0) {
+ if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL;
}
@@ -2431,7 +2431,8 @@ int t4vf_sge_init(struct adapter *adapter)
*/
if (fl1)
FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
- STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64);
+ STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+ ? 128 : 64);
PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
SGE_INGPADBOUNDARY_SHIFT);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index d266c86a53f7..cf4c05bdf5fe 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
+#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
struct be_dma_mem {
@@ -336,7 +337,6 @@ struct phy_info {
u16 auto_speeds_supported;
u16 fixed_speeds_supported;
int link_speed;
- int forced_port_speed;
u32 dac_cable_len;
u32 advertising;
u32 supported;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8c63d06ab12b..af60bb26e330 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
dev_warn(&adapter->pdev->dev,
- "opcode %d-%d is not permitted\n",
+ "VF is not privileged to issue opcode %d-%d\n",
opcode, subsystem);
} else {
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
@@ -165,14 +165,13 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
}
}
-/* Grp5 QOS Speed evt */
+/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
struct be_async_event_grp5_qos_link_speed *evt)
{
- if (evt->physical_port == adapter->port_num) {
- /* qos_link_speed is in units of 10 Mbps */
- adapter->phy.link_speed = evt->qos_link_speed * 10;
- }
+ if (adapter->phy.link_speed >= 0 &&
+ evt->physical_port == adapter->port_num)
+ adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
}
/*Grp5 PVID evt*/
@@ -717,7 +716,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
/* Use MCC */
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle, u32 pmac_id)
+ bool permanent, u32 if_handle, u32 pmac_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mac_query *req;
@@ -734,7 +733,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
- req->type = type;
+ req->type = MAC_ADDRESS_TYPE_NETWORK;
if (permanent) {
req->permanent = 1;
} else {
@@ -1326,9 +1325,28 @@ err:
return status;
}
-/* Uses synchronous mcc */
-int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
- u16 *link_speed, u8 *link_status, u32 dom)
+static int be_mac_to_link_speed(int mac_speed)
+{
+ switch (mac_speed) {
+ case PHY_LINK_SPEED_ZERO:
+ return 0;
+ case PHY_LINK_SPEED_10MBPS:
+ return 10;
+ case PHY_LINK_SPEED_100MBPS:
+ return 100;
+ case PHY_LINK_SPEED_1GBPS:
+ return 1000;
+ case PHY_LINK_SPEED_10GBPS:
+ return 10000;
+ }
+ return 0;
+}
+
+/* Uses synchronous mcc
+ * Returns link_speed in Mbps
+ */
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+ u8 *link_status, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_link_status *req;
@@ -1357,11 +1375,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
- if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
- if (link_speed)
- *link_speed = le16_to_cpu(resp->link_speed);
- if (mac_speed)
- *mac_speed = resp->mac_speed;
+ if (link_speed) {
+ *link_speed = resp->link_speed ?
+ le16_to_cpu(resp->link_speed) * 10 :
+ be_mac_to_link_speed(resp->mac_speed);
+
+ if (!resp->logical_link_status)
+ *link_speed = 0;
}
if (link_status)
*link_status = resp->logical_link_status;
@@ -2405,6 +2425,9 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
adapter->be3_native = le32_to_cpu(resp->cap_flags) &
CAPABILITY_BE3_NATIVE_ERX_API;
+ if (!adapter->be3_native)
+ dev_warn(&adapter->pdev->dev,
+ "adapter not in advanced mode\n");
}
err:
mutex_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 250f19b5f7b6..0936e21e3cff 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1687,7 +1687,7 @@ struct be_cmd_req_set_ext_fat_caps {
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle, u32 pmac_id);
+ bool permanent, u32 if_handle, u32 pmac_id);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
u32 if_id, u32 *pmac_id, u32 domain);
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
@@ -1714,8 +1714,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type);
extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
- u16 *link_speed, u8 *link_status, u32 dom);
+extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+ u8 *link_status, u32 dom);
extern int be_cmd_reset(struct be_adapter *adapter);
extern int be_cmd_get_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index c0e700653f96..8e6fb0ba6aa9 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -512,28 +512,6 @@ static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
return val;
}
-static int convert_to_et_speed(u32 be_speed)
-{
- int et_speed = SPEED_10000;
-
- switch (be_speed) {
- case PHY_LINK_SPEED_10MBPS:
- et_speed = SPEED_10;
- break;
- case PHY_LINK_SPEED_100MBPS:
- et_speed = SPEED_100;
- break;
- case PHY_LINK_SPEED_1GBPS:
- et_speed = SPEED_1000;
- break;
- case PHY_LINK_SPEED_10GBPS:
- et_speed = SPEED_10000;
- break;
- }
-
- return et_speed;
-}
-
bool be_pause_supported(struct be_adapter *adapter)
{
return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
@@ -544,27 +522,16 @@ bool be_pause_supported(struct be_adapter *adapter)
static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct be_adapter *adapter = netdev_priv(netdev);
- u8 port_speed = 0;
- u16 link_speed = 0;
u8 link_status;
- u32 et_speed = 0;
+ u16 link_speed = 0;
int status;
- if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) {
- if (adapter->phy.forced_port_speed < 0) {
- status = be_cmd_link_status_query(adapter, &port_speed,
- &link_speed, &link_status, 0);
- if (!status)
- be_link_status_update(adapter, link_status);
- if (link_speed)
- et_speed = link_speed * 10;
- else if (link_status)
- et_speed = convert_to_et_speed(port_speed);
- } else {
- et_speed = adapter->phy.forced_port_speed;
- }
-
- ethtool_cmd_speed_set(ecmd, et_speed);
+ if (adapter->phy.link_speed < 0) {
+ status = be_cmd_link_status_query(adapter, &link_speed,
+ &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
+ ethtool_cmd_speed_set(ecmd, link_speed);
status = be_cmd_get_phy_info(adapter);
if (status)
@@ -773,8 +740,8 @@ static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
- u8 mac_speed = 0;
- u16 qos_link_speed = 0;
+ int status;
+ u8 link_status = 0;
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
@@ -798,11 +765,11 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
test->flags |= ETH_TEST_FL_FAILED;
}
- if (be_cmd_link_status_query(adapter, &mac_speed,
- &qos_link_speed, NULL, 0) != 0) {
+ status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
+ if (status) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
- } else if (!mac_speed) {
+ } else if (!link_status) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = 1;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 78b8aa8069f0..eb3f2cb3b93b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -20,6 +20,7 @@
#include "be.h"
#include "be_cmds.h"
#include <asm/div64.h>
+#include <linux/aer.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -240,9 +241,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- status = be_cmd_mac_addr_query(adapter, current_mac,
- MAC_ADDRESS_TYPE_NETWORK, false,
- adapter->if_handle, 0);
+ status = be_cmd_mac_addr_query(adapter, current_mac, false,
+ adapter->if_handle, 0);
if (status)
goto err;
@@ -1075,7 +1075,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
static int be_find_vfs(struct be_adapter *adapter, int vf_state)
{
struct pci_dev *dev, *pdev = adapter->pdev;
- int vfs = 0, assigned_vfs = 0, pos, vf_fn;
+ int vfs = 0, assigned_vfs = 0, pos;
u16 offset, stride;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -1086,9 +1086,7 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
while (dev) {
- vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
- if (dev->is_virtfn && dev->devfn == vf_fn &&
- dev->bus->number == pdev->bus->number) {
+ if (dev->is_virtfn && pci_physfn(dev) == pdev) {
vfs++;
if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
assigned_vfs++;
@@ -1896,6 +1894,8 @@ static int be_tx_qs_create(struct be_adapter *adapter)
return status;
}
+ dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
+ adapter->num_tx_qs);
return 0;
}
@@ -1946,10 +1946,9 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
return rc;
}
- if (adapter->num_rx_qs != MAX_RX_QS)
- dev_info(&adapter->pdev->dev,
- "Created only %d receive queues\n", adapter->num_rx_qs);
-
+ dev_info(&adapter->pdev->dev,
+ "created %d RSS queue(s) and 1 default RX queue\n",
+ adapter->num_rx_qs - 1);
return 0;
}
@@ -2176,8 +2175,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
{
u32 num = 0;
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !sriov_want(adapter) && be_physfn(adapter) &&
- !be_is_mc(adapter)) {
+ !sriov_want(adapter) && be_physfn(adapter)) {
num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
}
@@ -2188,6 +2186,7 @@ static void be_msix_enable(struct be_adapter *adapter)
{
#define BE_MIN_MSIX_VECTORS 1
int i, status, num_vec, num_roce_vec = 0;
+ struct device *dev = &adapter->pdev->dev;
/* If RSS queues are not used, need a vec for default RX Q */
num_vec = min(be_num_rss_want(adapter), num_online_cpus());
@@ -2212,6 +2211,8 @@ static void be_msix_enable(struct be_adapter *adapter)
num_vec) == 0)
goto done;
}
+
+ dev_warn(dev, "MSIx enable failed\n");
return;
done:
if (be_roce_supported(adapter)) {
@@ -2225,6 +2226,7 @@ done:
}
} else
adapter->num_msix_vec = num_vec;
+ dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
return;
}
@@ -2441,8 +2443,7 @@ static int be_open(struct net_device *netdev)
be_eq_notify(adapter, eqo->q.id, true, false, 0);
}
- status = be_cmd_link_status_query(adapter, NULL, NULL,
- &link_status, 0);
+ status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
if (!status)
be_link_status_update(adapter, link_status);
@@ -2646,8 +2647,8 @@ static int be_vf_setup(struct be_adapter *adapter)
}
for_all_vfs(adapter, vf_cfg, vf) {
- status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
- NULL, vf + 1);
+ lnk_speed = 1000;
+ status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
if (status)
goto err;
vf_cfg->tx_rate = lnk_speed * 10;
@@ -2671,7 +2672,6 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->be3_native = false;
adapter->promiscuous = false;
adapter->eq_next_idx = 0;
- adapter->phy.forced_port_speed = -1;
}
static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2693,21 +2693,16 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
status = be_cmd_get_mac_from_list(adapter, mac,
active_mac, pmac_id, 0);
if (*active_mac) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK,
- false, if_handle,
- *pmac_id);
+ status = be_cmd_mac_addr_query(adapter, mac, false,
+ if_handle, *pmac_id);
}
} else if (be_physfn(adapter)) {
/* For BE3, for PF get permanent MAC */
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, true,
- 0, 0);
+ status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
*active_mac = false;
} else {
/* For BE3, for VF get soft MAC assigned by PF*/
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false,
+ status = be_cmd_mac_addr_query(adapter, mac, false,
if_handle, 0);
*active_mac = true;
}
@@ -2724,6 +2719,8 @@ static int be_get_config(struct be_adapter *adapter)
if (pos) {
pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
&dev_num_vfs);
+ if (!lancer_chip(adapter))
+ dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
adapter->dev_num_vfs = dev_num_vfs;
}
return 0;
@@ -3437,6 +3434,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
if (mem->va)
dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
mem->dma);
+ kfree(adapter->pmac_id);
}
static int be_ctrl_init(struct be_adapter *adapter)
@@ -3473,6 +3471,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
}
memset(rx_filter->va, 0, rx_filter->size);
+ /* primary mac needs 1 pmac entry */
+ adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
@@ -3543,6 +3547,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_ctrl_cleanup(adapter);
+ pci_disable_pcie_error_reporting(pdev);
+
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -3609,12 +3615,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
else
adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
- /* primary mac needs 1 pmac entry */
- adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
- sizeof(u32), GFP_KERNEL);
- if (!adapter->pmac_id)
- return -ENOMEM;
-
status = be_cmd_get_cntl_attributes(adapter);
if (status)
return status;
@@ -3800,6 +3800,23 @@ static bool be_reset_required(struct be_adapter *adapter)
return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
}
+static char *mc_name(struct be_adapter *adapter)
+{
+ if (adapter->function_mode & FLEX10_MODE)
+ return "FLEX10";
+ else if (adapter->function_mode & VNIC_MODE)
+ return "vNIC";
+ else if (adapter->function_mode & UMC_ENABLED)
+ return "UMC";
+ else
+ return "";
+}
+
+static inline char *func_name(struct be_adapter *adapter)
+{
+ return be_physfn(adapter) ? "PF" : "VF";
+}
+
static int __devinit be_probe(struct pci_dev *pdev,
const struct pci_device_id *pdev_id)
{
@@ -3844,6 +3861,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
}
}
+ status = pci_enable_pcie_error_reporting(pdev);
+ if (status)
+ dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
+
status = be_ctrl_init(adapter);
if (status)
goto free_netdev;
@@ -3886,7 +3907,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
status = be_setup(adapter);
if (status)
- goto msix_disable;
+ goto stats_clean;
be_netdev_init(netdev);
status = register_netdev(netdev);
@@ -3900,15 +3921,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_cmd_query_port_name(adapter, &port_name);
- dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
- port_name);
+ dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
+ func_name(adapter), mc_name(adapter), port_name);
return 0;
unsetup:
be_clear(adapter);
-msix_disable:
- be_msix_disable(adapter);
stats_clean:
be_stats_cleanup(adapter);
ctrl_clean:
@@ -4066,6 +4085,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
if (status)
return PCI_ERS_RESULT_DISCONNECT;
+ pci_cleanup_aer_uncorrect_error_status(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -4106,7 +4126,7 @@ err:
dev_err(&adapter->pdev->dev, "EEH resume failed\n");
}
-static struct pci_error_handlers be_eeh_handlers = {
+static const struct pci_error_handlers be_eeh_handlers = {
.error_detected = be_eeh_err_detected,
.slot_reset = be_eeh_reset,
.resume = be_eeh_resume,
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 3574e1499dfc..feff51664dcf 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -62,6 +62,13 @@ config FSL_PQ_MDIO
---help---
This driver supports the MDIO bus used by the gianfar and UCC drivers.
+config FSL_XGMAC_MDIO
+ tristate "Freescale XGMAC MDIO"
+ depends on FSL_SOC
+ select PHYLIB
+ ---help---
+ This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
+
config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 1752488c9ee5..3d1839afff65 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
endif
obj-$(CONFIG_FS_ENET) += fs_enet/
obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
gianfar_driver-objs := gianfar.o \
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 9527b28d70d1..c93a05654b46 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -19,54 +19,90 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/unistd.h>
#include <linux/slab.h>
-#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/crc32.h>
#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm/ucc.h>
+#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
#include "gianfar.h"
-#include "fsl_pq_mdio.h"
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+#define MIIMCFG_INIT_VALUE 0x00000007
+#define MIIMCFG_RESET 0x80000000
+
+#define MII_READ_COMMAND 0x00000001
+
+struct fsl_pq_mii {
+ u32 miimcfg; /* MII management configuration reg */
+ u32 miimcom; /* MII management command reg */
+ u32 miimadd; /* MII management address reg */
+ u32 miimcon; /* MII management control reg */
+ u32 miimstat; /* MII management status reg */
+ u32 miimind; /* MII management indication reg */
+};
+
+struct fsl_pq_mdio {
+ u8 res1[16];
+ u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
+ u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
+ u8 res2[4];
+ u32 emapm; /* MDIO Event mapping register (for etsec2)*/
+ u8 res3[1280];
+ struct fsl_pq_mii mii;
+ u8 res4[28];
+ u32 utbipar; /* TBI phy address reg (only on UCC) */
+ u8 res5[2728];
+} __packed;
/* Number of microseconds to wait for an MII register to respond */
#define MII_TIMEOUT 1000
struct fsl_pq_mdio_priv {
void __iomem *map;
- struct fsl_pq_mdio __iomem *regs;
+ struct fsl_pq_mii __iomem *regs;
+ int irqs[PHY_MAX_ADDR];
+};
+
+/*
+ * Per-device-type data. Each type of device tree node that we support gets
+ * one of these.
+ *
+ * @mii_offset: the offset of the MII registers within the memory map of the
+ * node. Some nodes define only the MII registers, and some define the whole
+ * MAC (which includes the MII registers).
+ *
+ * @get_tbipa: determines the address of the TBIPA register
+ *
+ * @ucc_configure: a special function for extra QE configuration
+ */
+struct fsl_pq_mdio_data {
+ unsigned int mii_offset; /* offset of the MII registers */
+ uint32_t __iomem * (*get_tbipa)(void __iomem *p);
+ void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
};
/*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus attached to the local interface, which may be different from the
- * generic mdio bus (tied to a single interface), waiting until the write is
- * done before returning. This is helpful in programming interfaces like
- * the TBI which control interfaces like onchip SERDES and are always tied to
- * the local mdio pins, which may not be the same as system mdio bus, used for
+ * Write value to the PHY at mii_id at register regnum, on the bus attached
+ * to the local interface, which may be different from the generic mdio bus
+ * (tied to a single interface), waiting until the write is done before
+ * returning. This is helpful in programming interfaces like the TBI which
+ * control interfaces like onchip SERDES and are always tied to the local
+ * mdio pins, which may not be the same as system mdio bus, used for
* controlling the external PHYs, for example.
*/
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
- int regnum, u16 value)
+static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+ struct fsl_pq_mii __iomem *regs = priv->regs;
u32 status;
/* Set the PHY address and the register address we want to write */
@@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
}
/*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value. Clears miimcom first. All PHY operation
- * done on the bus attached to the local interface,
- * which may be different from the generic mdio bus
- * This is helpful in programming interfaces like
- * the TBI which, in turn, control interfaces like onchip SERDES
- * and are always tied to the local mdio pins, which may not be the
+ * Read the bus for PHY at addr mii_id, register regnum, and return the value.
+ * Clears miimcom first.
+ *
+ * All PHY operation done on the bus attached to the local interface, which
+ * may be different from the generic mdio bus. This is helpful in programming
+ * interfaces like the TBI which, in turn, control interfaces like on-chip
+ * SERDES and are always tied to the local mdio pins, which may not be the
* same as system mdio bus, used for controlling the external PHYs, for eg.
*/
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
- int mii_id, int regnum)
+static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
- u16 value;
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+ struct fsl_pq_mii __iomem *regs = priv->regs;
u32 status;
+ u16 value;
/* Set the PHY address and the register address we want to read */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
/* Grab the value of the register from miimstat */
value = in_be32(&regs->miimstat);
+ dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
return value;
}
-static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
-{
- struct fsl_pq_mdio_priv *priv = bus->priv;
-
- return priv->regs;
-}
-
-/*
- * Write value to the PHY at mii_id at register regnum,
- * on the bus, waiting until the write is done before returning.
- */
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
-{
- struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
- /* Write to the local MII regs */
- return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
-}
-
-/*
- * Read the bus for PHY at addr mii_id, register regnum, and
- * return the value. Clears miimcom first.
- */
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
- struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
-
- /* Read the local MII regs */
- return fsl_pq_local_mdio_read(regs, mii_id, regnum);
-}
-
/* Reset the MIIM registers, and wait for the bus to free */
static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
- struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+ struct fsl_pq_mii __iomem *regs = priv->regs;
u32 status;
mutex_lock(&bus->mdio_lock);
@@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
mutex_unlock(&bus->mdio_lock);
if (!status) {
- printk(KERN_ERR "%s: The MII Bus is stuck!\n",
- bus->name);
+ dev_err(&bus->dev, "timeout waiting for MII bus\n");
return -EBUSY;
}
return 0;
}
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+/*
+ * This is mildly evil, but so is our hardware for doing this.
+ * Also, we have to cast back to struct gfar because of
+ * definition weirdness done in gianfar.h.
+ */
+static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
{
- const u32 *addr;
- u64 taddr = OF_BAD_ADDR;
-
- addr = of_get_address(np, 0, NULL, NULL);
- if (addr)
- taddr = of_translate_address(np, addr);
+ struct gfar __iomem *enet_regs = p;
- snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
- (unsigned long long)taddr);
+ return &enet_regs->tbipa;
}
-EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
+/*
+ * Return the TBIPAR address for an eTSEC2 node
+ */
+static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
+{
+ return p;
+}
+#endif
-static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+/*
+ * Return the TBIPAR address for a QE MDIO node
+ */
+static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
{
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
- struct gfar __iomem *enet_regs;
+ struct fsl_pq_mdio __iomem *mdio = p;
- /*
- * This is mildly evil, but so is our hardware for doing this.
- * Also, we have to cast back to struct gfar because of
- * definition weirdness done in gianfar.h.
- */
- if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
- of_device_is_compatible(np, "fsl,gianfar-tbi") ||
- of_device_is_compatible(np, "gianfar")) {
- enet_regs = (struct gfar __iomem *)regs;
- return &enet_regs->tbipa;
- } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
- of_device_is_compatible(np, "fsl,etsec2-tbi")) {
- return of_iomap(np, 1);
- }
-#endif
- return NULL;
+ return &mdio->utbipar;
}
-
-static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
+/*
+ * Find the UCC node that controls the given MDIO node
+ *
+ * For some reason, the QE MDIO nodes are not children of the UCC devices
+ * that control them. Therefore, we need to scan all UCC nodes looking for
+ * the one that encompases the given MDIO node. We do this by comparing
+ * physical addresses. The 'start' and 'end' addresses of the MDIO node are
+ * passed, and the correct UCC node will cover the entire address range.
+ *
+ * This assumes that there is only one QE MDIO node in the entire device tree.
+ */
+static void ucc_configure(phys_addr_t start, phys_addr_t end)
{
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+ static bool found_mii_master;
struct device_node *np = NULL;
- int err = 0;
- for_each_compatible_node(np, NULL, "ucc_geth") {
- struct resource tempres;
+ if (found_mii_master)
+ return;
- err = of_address_to_resource(np, 0, &tempres);
- if (err)
+ for_each_compatible_node(np, NULL, "ucc_geth") {
+ struct resource res;
+ const uint32_t *iprop;
+ uint32_t id;
+ int ret;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret < 0) {
+ pr_debug("fsl-pq-mdio: no address range in node %s\n",
+ np->full_name);
continue;
+ }
/* if our mdio regs fall within this UCC regs range */
- if ((start >= tempres.start) && (end <= tempres.end)) {
- /* Find the id of the UCC */
- const u32 *id;
-
- id = of_get_property(np, "cell-index", NULL);
- if (!id) {
- id = of_get_property(np, "device-id", NULL);
- if (!id)
- continue;
+ if ((start < res.start) || (end > res.end))
+ continue;
+
+ iprop = of_get_property(np, "cell-index", NULL);
+ if (!iprop) {
+ iprop = of_get_property(np, "device-id", NULL);
+ if (!iprop) {
+ pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
+ np->full_name);
+ continue;
}
+ }
- *ucc_id = *id;
+ id = be32_to_cpup(iprop);
- return 0;
+ /*
+ * cell-index and device-id for QE nodes are
+ * numbered from 1, not 0.
+ */
+ if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
+ pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
+ np->full_name);
+ continue;
}
+
+ pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
+ found_mii_master = true;
}
+}
- if (err)
- return err;
- else
- return -EINVAL;
-#else
- return -ENODEV;
#endif
-}
-static int fsl_pq_mdio_probe(struct platform_device *ofdev)
+static struct of_device_id fsl_pq_mdio_match[] = {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+ {
+ .compatible = "fsl,gianfar-tbi",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_gfar_tbipa,
+ },
+ },
+ {
+ .compatible = "fsl,gianfar-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_gfar_tbipa,
+ },
+ },
+ {
+ .type = "mdio",
+ .compatible = "gianfar",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+ .get_tbipa = get_gfar_tbipa,
+ },
+ },
+ {
+ .compatible = "fsl,etsec2-tbi",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+ .get_tbipa = get_etsec_tbipa,
+ },
+ },
+ {
+ .compatible = "fsl,etsec2-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+ .get_tbipa = get_etsec_tbipa,
+ },
+ },
+#endif
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+ {
+ .compatible = "fsl,ucc-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_ucc_tbipa,
+ .ucc_configure = ucc_configure,
+ },
+ },
+ {
+ /* Legacy UCC MDIO node */
+ .type = "mdio",
+ .compatible = "ucc_geth_phy",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_ucc_tbipa,
+ .ucc_configure = ucc_configure,
+ },
+ },
+#endif
+ /* No Kconfig option for Fman support yet */
+ {
+ .compatible = "fsl,fman-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ /* Fman TBI operations are handled elsewhere */
+ },
+ },
+
+ {},
+};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+
+static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
- struct device_node *np = ofdev->dev.of_node;
+ const struct of_device_id *id =
+ of_match_device(fsl_pq_mdio_match, &pdev->dev);
+ const struct fsl_pq_mdio_data *data = id->data;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource res;
struct device_node *tbi;
struct fsl_pq_mdio_priv *priv;
- struct fsl_pq_mdio __iomem *regs = NULL;
- void __iomem *map;
- u32 __iomem *tbipa;
struct mii_bus *new_bus;
- int tbiaddr = -1;
- const u32 *addrp;
- u64 addr = 0, size = 0;
int err;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
- new_bus = mdiobus_alloc();
- if (!new_bus) {
- err = -ENOMEM;
- goto err_free_priv;
- }
+ new_bus = mdiobus_alloc_size(sizeof(*priv));
+ if (!new_bus)
+ return -ENOMEM;
+ priv = new_bus->priv;
new_bus->name = "Freescale PowerQUICC MII Bus",
- new_bus->read = &fsl_pq_mdio_read,
- new_bus->write = &fsl_pq_mdio_write,
- new_bus->reset = &fsl_pq_mdio_reset,
- new_bus->priv = priv;
- fsl_pq_mdio_bus_name(new_bus->id, np);
-
- addrp = of_get_address(np, 0, &size, NULL);
- if (!addrp) {
- err = -EINVAL;
- goto err_free_bus;
+ new_bus->read = &fsl_pq_mdio_read;
+ new_bus->write = &fsl_pq_mdio_write;
+ new_bus->reset = &fsl_pq_mdio_reset;
+ new_bus->irq = priv->irqs;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err < 0) {
+ dev_err(&pdev->dev, "could not obtain address information\n");
+ goto error;
}
- /* Set the PHY base address */
- addr = of_translate_address(np, addrp);
- if (addr == OF_BAD_ADDR) {
- err = -EINVAL;
- goto err_free_bus;
- }
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+ (unsigned long long)res.start);
- map = ioremap(addr, size);
- if (!map) {
+ priv->map = of_iomap(np, 0);
+ if (!priv->map) {
err = -ENOMEM;
- goto err_free_bus;
+ goto error;
}
- priv->map = map;
-
- if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
- of_device_is_compatible(np, "fsl,gianfar-tbi") ||
- of_device_is_compatible(np, "fsl,ucc-mdio") ||
- of_device_is_compatible(np, "ucc_geth_phy"))
- map -= offsetof(struct fsl_pq_mdio, miimcfg);
- regs = map;
- priv->regs = regs;
-
- new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
- if (NULL == new_bus->irq) {
- err = -ENOMEM;
- goto err_unmap_regs;
+ /*
+ * Some device tree nodes represent only the MII registers, and
+ * others represent the MAC and MII registers. The 'mii_offset' field
+ * contains the offset of the MII registers inside the mapped register
+ * space.
+ */
+ if (data->mii_offset > resource_size(&res)) {
+ dev_err(&pdev->dev, "invalid register map\n");
+ err = -EINVAL;
+ goto error;
}
+ priv->regs = priv->map + data->mii_offset;
- new_bus->parent = &ofdev->dev;
- dev_set_drvdata(&ofdev->dev, new_bus);
-
- if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
- of_device_is_compatible(np, "fsl,gianfar-tbi") ||
- of_device_is_compatible(np, "fsl,etsec2-mdio") ||
- of_device_is_compatible(np, "fsl,etsec2-tbi") ||
- of_device_is_compatible(np, "gianfar")) {
- tbipa = get_gfar_tbipa(regs, np);
- if (!tbipa) {
- err = -EINVAL;
- goto err_free_irqs;
- }
- } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
- of_device_is_compatible(np, "ucc_geth_phy")) {
- u32 id;
- static u32 mii_mng_master;
-
- tbipa = &regs->utbipar;
-
- if ((err = get_ucc_id_for_range(addr, addr + size, &id)))
- goto err_free_irqs;
+ new_bus->parent = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, new_bus);
- if (!mii_mng_master) {
- mii_mng_master = id;
- ucc_set_qe_mux_mii_mng(id - 1);
+ if (data->get_tbipa) {
+ for_each_child_of_node(np, tbi) {
+ if (strcmp(tbi->type, "tbi-phy") == 0) {
+ dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
+ strrchr(tbi->full_name, '/') + 1);
+ break;
+ }
}
- } else {
- err = -ENODEV;
- goto err_free_irqs;
- }
- for_each_child_of_node(np, tbi) {
- if (!strncmp(tbi->type, "tbi-phy", 8))
- break;
- }
+ if (tbi) {
+ const u32 *prop = of_get_property(tbi, "reg", NULL);
+ uint32_t __iomem *tbipa;
- if (tbi) {
- const u32 *prop = of_get_property(tbi, "reg", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev,
+ "missing 'reg' property in node %s\n",
+ tbi->full_name);
+ err = -EBUSY;
+ goto error;
+ }
- if (prop)
- tbiaddr = *prop;
+ tbipa = data->get_tbipa(priv->map);
- if (tbiaddr == -1) {
- err = -EBUSY;
- goto err_free_irqs;
- } else {
- out_be32(tbipa, tbiaddr);
+ out_be32(tbipa, be32_to_cpup(prop));
}
}
+ if (data->ucc_configure)
+ data->ucc_configure(res.start, res.end);
+
err = of_mdiobus_register(new_bus, np);
if (err) {
- printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
- new_bus->name);
- goto err_free_irqs;
+ dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
+ new_bus->name);
+ goto error;
}
return 0;
-err_free_irqs:
- kfree(new_bus->irq);
-err_unmap_regs:
- iounmap(priv->map);
-err_free_bus:
+error:
+ if (priv->map)
+ iounmap(priv->map);
+
kfree(new_bus);
-err_free_priv:
- kfree(priv);
+
return err;
}
-static int fsl_pq_mdio_remove(struct platform_device *ofdev)
+static int fsl_pq_mdio_remove(struct platform_device *pdev)
{
- struct device *device = &ofdev->dev;
+ struct device *device = &pdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
struct fsl_pq_mdio_priv *priv = bus->priv;
@@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev)
dev_set_drvdata(device, NULL);
iounmap(priv->map);
- bus->priv = NULL;
mdiobus_free(bus);
- kfree(priv);
return 0;
}
-static struct of_device_id fsl_pq_mdio_match[] = {
- {
- .type = "mdio",
- .compatible = "ucc_geth_phy",
- },
- {
- .type = "mdio",
- .compatible = "gianfar",
- },
- {
- .compatible = "fsl,ucc-mdio",
- },
- {
- .compatible = "fsl,gianfar-tbi",
- },
- {
- .compatible = "fsl,gianfar-mdio",
- },
- {
- .compatible = "fsl,etsec2-tbi",
- },
- {
- .compatible = "fsl,etsec2-mdio",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
-
static struct platform_driver fsl_pq_mdio_driver = {
.driver = {
.name = "fsl-pq_mdio",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h
deleted file mode 100644
index bd17a2a0139b..000000000000
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation
- * Driver for the MDIO bus controller on Freescale PowerQUICC processors
- *
- * Author: Andy Fleming
- * Modifier: Sandeep Gopalpet
- *
- * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-#ifndef __FSL_PQ_MDIO_H
-#define __FSL_PQ_MDIO_H
-
-#define MIIMIND_BUSY 0x00000001
-#define MIIMIND_NOTVALID 0x00000004
-#define MIIMCFG_INIT_VALUE 0x00000007
-#define MIIMCFG_RESET 0x80000000
-
-#define MII_READ_COMMAND 0x00000001
-
-struct fsl_pq_mdio {
- u8 res1[16];
- u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
- u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
- u8 res2[4];
- u32 emapm; /* MDIO Event mapping register (for etsec2)*/
- u8 res3[1280];
- u32 miimcfg; /* MII management configuration reg */
- u32 miimcom; /* MII management command reg */
- u32 miimadd; /* MII management address reg */
- u32 miimcon; /* MII management control reg */
- u32 miimstat; /* MII management status reg */
- u32 miimind; /* MII management indication reg */
- u8 reserved[28]; /* Space holder */
- u32 utbipar; /* TBI phy address reg (only on UCC) */
- u8 res4[2728];
-} __packed;
-
-int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
-int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
-int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
- int regnum, u16 value);
-int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum);
-int __init fsl_pq_mdio_init(void);
-void fsl_pq_mdio_exit(void);
-void fsl_pq_mdio_bus_name(char *name, struct device_node *np);
-#endif /* FSL_PQ_MDIO_H */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d3233f59a82e..a1b52ec3b930 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -100,7 +100,6 @@
#include <linux/of_net.h>
#include "gianfar.h"
-#include "fsl_pq_mdio.h"
#define TX_TIMEOUT (1*HZ)
@@ -395,7 +394,13 @@ static void gfar_init_mac(struct net_device *ndev)
if (ndev->features & NETIF_F_IP_CSUM)
tctrl |= TCTRL_INIT_CSUM;
- tctrl |= TCTRL_TXSCHED_PRIO;
+ if (priv->prio_sched_en)
+ tctrl |= TCTRL_TXSCHED_PRIO;
+ else {
+ tctrl |= TCTRL_TXSCHED_WRRS;
+ gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
+ gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
+ }
gfar_write(&regs->tctrl, tctrl);
@@ -1161,6 +1166,9 @@ static int gfar_probe(struct platform_device *ofdev)
priv->rx_filer_enable = 1;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
+ /* use pritority h/w tx queue scheduling for single queue devices */
+ if (priv->num_tx_queues == 1)
+ priv->prio_sched_en = 1;
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 2136c7ff5e6d..4141ef2ddafc 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -301,8 +301,16 @@ extern const char gfar_driver_version[];
#define TCTRL_TFCPAUSE 0x00000008
#define TCTRL_TXSCHED_MASK 0x00000006
#define TCTRL_TXSCHED_INIT 0x00000000
+/* priority scheduling */
#define TCTRL_TXSCHED_PRIO 0x00000002
+/* weighted round-robin scheduling (WRRS) */
#define TCTRL_TXSCHED_WRRS 0x00000004
+/* default WRRS weight and policy setting,
+ * tailored to the tr03wt and tr47wt registers:
+ * equal weight for all Tx Qs, measured in 64byte units
+ */
+#define DEFAULT_WRRS_WEIGHT 0x18181818
+
#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
#define IEVENT_INIT_CLEAR 0xffffffff
@@ -1098,7 +1106,8 @@ struct gfar_private {
extended_hash:1,
bd_stash_en:1,
rx_filer_enable:1,
- wol_en:1; /* Wake-on-LAN enabled */
+ wol_en:1, /* Wake-on-LAN enabled */
+ prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
unsigned short padding;
/* PHY stuff */
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 0daa66b8eca0..b9db0e040563 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -510,7 +510,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
spin_unlock_irqrestore(&etsects->lock, flags);
- etsects->clock = ptp_clock_register(&etsects->caps);
+ etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
if (IS_ERR(etsects->clock)) {
err = PTR_ERR(etsects->clock);
goto no_clock;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 21c6574c5f15..164288439220 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -42,7 +42,6 @@
#include <asm/machdep.h>
#include "ucc_geth.h"
-#include "fsl_pq_mdio.h"
#undef DEBUG
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644
index 000000000000..1afb5ea2a984
--- /dev/null
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -0,0 +1,274 @@
+/*
+ * QorIQ 10G MDIO Controller
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Authors: Andy Fleming <afleming@freescale.com>
+ * Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+
+/* Number of microseconds to wait for a register to respond */
+#define TIMEOUT 1000
+
+struct tgec_mdio_controller {
+ __be32 reserved[12];
+ __be32 mdio_stat; /* MDIO configuration and status */
+ __be32 mdio_ctl; /* MDIO control */
+ __be32 mdio_data; /* MDIO data */
+ __be32 mdio_addr; /* MDIO address */
+} __packed;
+
+#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY (1 << 0)
+#define MDIO_STAT_RD_ER (1 << 1)
+#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS (1 << 10)
+#define MDIO_CTL_SCAN_EN (1 << 11)
+#define MDIO_CTL_POST_INC (1 << 14)
+#define MDIO_CTL_READ (1 << 15)
+
+#define MDIO_DATA(x) (x & 0xffff)
+#define MDIO_DATA_BSY (1 << 31)
+
+/*
+ * Wait untill the MDIO bus is free
+ */
+static int xgmac_wait_until_free(struct device *dev,
+ struct tgec_mdio_controller __iomem *regs)
+{
+ uint32_t status;
+
+ /* Wait till the bus is free */
+ status = spin_event_timeout(
+ !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0);
+ if (!status) {
+ dev_err(dev, "timeout waiting for bus to be free\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Wait till the MDIO read or write operation is complete
+ */
+static int xgmac_wait_until_done(struct device *dev,
+ struct tgec_mdio_controller __iomem *regs)
+{
+ uint32_t status;
+
+ /* Wait till the MDIO write is complete */
+ status = spin_event_timeout(
+ !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0);
+ if (!status) {
+ dev_err(dev, "timeout waiting for operation to complete\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Write value to the PHY for this device to the register at regnum,waiting
+ * until the write is done before it returns. All PHY configuration has to be
+ * done through the TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+ struct tgec_mdio_controller __iomem *regs = bus->priv;
+ uint16_t dev_addr = regnum >> 16;
+ int ret;
+
+ /* Setup the MII Mgmt clock speed */
+ out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+ ret = xgmac_wait_until_free(&bus->dev, regs);
+ if (ret)
+ return ret;
+
+ /* Set the port and dev addr */
+ out_be32(&regs->mdio_ctl,
+ MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr));
+
+ /* Set the register address */
+ out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs);
+ if (ret)
+ return ret;
+
+ /* Write the value to the register */
+ out_be32(&regs->mdio_data, MDIO_DATA(value));
+
+ ret = xgmac_wait_until_done(&bus->dev, regs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first. All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct tgec_mdio_controller __iomem *regs = bus->priv;
+ uint16_t dev_addr = regnum >> 16;
+ uint32_t mdio_ctl;
+ uint16_t value;
+ int ret;
+
+ /* Setup the MII Mgmt clock speed */
+ out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+ ret = xgmac_wait_until_free(&bus->dev, regs);
+ if (ret)
+ return ret;
+
+ /* Set the Port and Device Addrs */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ out_be32(&regs->mdio_ctl, mdio_ctl);
+
+ /* Set the register address */
+ out_be32(&regs->mdio_addr, regnum & 0xffff);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs);
+ if (ret)
+ return ret;
+
+ /* Initiate the read */
+ out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs);
+ if (ret)
+ return ret;
+
+ /* Return all Fs if nothing was there */
+ if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
+ dev_err(&bus->dev, "MDIO read error\n");
+ return 0xffff;
+ }
+
+ value = in_be32(&regs->mdio_data) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", value);
+
+ return value;
+}
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int xgmac_mdio_reset(struct mii_bus *bus)
+{
+ struct tgec_mdio_controller __iomem *regs = bus->priv;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ /* Setup the MII Mgmt clock speed */
+ out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
+
+ ret = xgmac_wait_until_free(&bus->dev, regs);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int __devinit xgmac_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *bus;
+ struct resource res;
+ int ret;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(&pdev->dev, "could not obtain address\n");
+ return ret;
+ }
+
+ bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale XGMAC MDIO Bus";
+ bus->read = xgmac_mdio_read;
+ bus->write = xgmac_mdio_write;
+ bus->reset = xgmac_mdio_reset;
+ bus->irq = bus->priv;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
+
+ /* Set the PHY base address */
+ bus->priv = of_iomap(np, 0);
+ if (!bus->priv) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register MDIO bus\n");
+ goto err_registration;
+ }
+
+ dev_set_drvdata(&pdev->dev, bus);
+
+ return 0;
+
+err_registration:
+ iounmap(bus->priv);
+
+err_ioremap:
+ mdiobus_free(bus);
+
+ return ret;
+}
+
+static int __devexit xgmac_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
+
+ mdiobus_unregister(bus);
+ iounmap(bus->priv);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static struct of_device_id xgmac_mdio_match[] = {
+ {
+ .compatible = "fsl,fman-xmdio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
+
+static struct platform_driver xgmac_mdio_driver = {
+ .driver = {
+ .name = "fsl-fman_xmdio",
+ .of_match_table = xgmac_mdio_match,
+ },
+ .probe = xgmac_mdio_probe,
+ .remove = xgmac_mdio_remove,
+};
+
+module_platform_driver(xgmac_mdio_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index fed5080a6b62..959faf7388e2 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -150,7 +150,7 @@ config SUN3_82586
config ZNET
tristate "Zenith Z-Note support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ISA_DMA_API
+ depends on EXPERIMENTAL && ISA_DMA_API && X86
---help---
The Zenith Z-Note notebook computer has a built-in network
(Ethernet) card, and this is the Linux driver for it. Note that the
diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c
index ba4e0cea3506..c9479e081b8a 100644
--- a/drivers/net/ethernet/i825xx/znet.c
+++ b/drivers/net/ethernet/i825xx/znet.c
@@ -865,14 +865,14 @@ static void hardware_init(struct net_device *dev)
disable_dma(znet->rx_dma); /* reset by an interrupting task. */
clear_dma_ff(znet->rx_dma);
set_dma_mode(znet->rx_dma, DMA_RX_MODE);
- set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start);
+ set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start));
set_dma_count(znet->rx_dma, RX_BUF_SIZE);
enable_dma(znet->rx_dma);
/* Now set up the Tx channel. */
disable_dma(znet->tx_dma);
clear_dma_ff(znet->tx_dma);
set_dma_mode(znet->tx_dma, DMA_TX_MODE);
- set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start);
+ set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start));
set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
enable_dma(znet->tx_dma);
release_dma_lock(flags);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 535f94fac4a1..29ce9bd27f94 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3157,7 +3157,7 @@ static void e100_io_resume(struct pci_dev *pdev)
}
}
-static struct pci_error_handlers e100_err_handler = {
+static const struct pci_error_handlers e100_err_handler = {
.error_detected = e100_io_error_detected,
.slot_reset = e100_io_slot_reset,
.resume = e100_io_resume,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 736a7d987db5..9089d00f1421 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -174,6 +174,20 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ /* MDI-X => 1; MDI => 0 */
+ if ((hw->media_type == e1000_media_type_copper) &&
+ netif_carrier_ok(netdev))
+ ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+ ETH_TP_MDI_X :
+ ETH_TP_MDI);
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+ if (hw->mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->mdix;
return 0;
}
@@ -183,6 +197,22 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -199,12 +229,21 @@ static int e1000_set_settings(struct net_device *netdev,
ecmd->advertising = hw->autoneg_advertised;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
+ /* calling this overrides forced MDI setting */
if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__E1000_RESETTING, &adapter->flags);
return -EINVAL;
}
}
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->mdix = AUTO_ALL_MODES;
+ else
+ hw->mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
/* reset the link */
if (netif_running(adapter->netdev)) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index bde337ee1a34..222bfaff4622 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -192,7 +192,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
static void e1000_io_resume(struct pci_dev *pdev);
-static struct pci_error_handlers e1000_err_handler = {
+static const struct pci_error_handlers e1000_err_handler = {
.error_detected = e1000_io_error_detected,
.slot_reset = e1000_io_slot_reset,
.resume = e1000_io_resume,
@@ -2014,6 +2014,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
}
+ netdev_reset_queue(adapter->netdev);
size = sizeof(struct e1000_buffer) * tx_ring->count;
memset(tx_ring->buffer_info, 0, size);
@@ -3273,6 +3274,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
nr_frags, mss);
if (count) {
+ netdev_sent_queue(netdev, skb->len);
skb_tx_timestamp(skb);
e1000_tx_queue(adapter, tx_ring, tx_flags, count);
@@ -3860,6 +3862,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
unsigned int i, eop;
unsigned int count = 0;
unsigned int total_tx_bytes=0, total_tx_packets=0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3877,6 +3880,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (cleaned) {
total_tx_packets += buffer_info->segs;
total_tx_bytes += buffer_info->bytecount;
+ if (buffer_info->skb) {
+ bytes_compl += buffer_info->skb->len;
+ pkts_compl++;
+ }
+
}
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
tx_desc->upper.data = 0;
@@ -3890,6 +3898,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
tx_ring->next_to_clean = i;
+ netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
#define TX_WAKE_THRESHOLD 32
if (unlikely(count && netif_carrier_ok(netdev) &&
E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
@@ -4950,6 +4960,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
default:
goto err_inval;
}
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ hw->mdix = AUTO_ALL_MODES;
+
return 0;
err_inval:
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 080c89093feb..c98586408005 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
**/
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
{
- u16 data = er32(POEMB);
+ u32 data = er32(POEMB);
if (active)
data |= E1000_PHY_CTRL_D0A_LPLU;
@@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
**/
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
{
- u16 data = er32(POEMB);
+ u32 data = er32(POEMB);
if (!active) {
data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 0349e2478df8..c11ac2756667 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -199,6 +199,11 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
return 0;
}
@@ -241,6 +246,10 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
default:
goto err_inval;
}
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
return 0;
err_inval:
@@ -264,6 +273,22 @@ static int e1000_set_settings(struct net_device *netdev,
return -EINVAL;
}
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -282,20 +307,32 @@ static int e1000_set_settings(struct net_device *netdev,
hw->fc.requested_mode = e1000_fc_default;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
+ /* calling this overrides forced MDI setting */
if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__E1000_RESETTING, &adapter->state);
return -EINVAL;
}
}
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ /*
+ * fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
/* reset the link */
if (netif_running(adapter->netdev)) {
e1000e_down(adapter);
e1000e_up(adapter);
- } else {
+ } else
e1000e_reset(adapter);
- }
clear_bit(__E1000_RESETTING, &adapter->state);
return 0;
@@ -1905,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev,
return -EINVAL;
if (ec->rx_coalesce_usecs == 4) {
- adapter->itr = adapter->itr_setting = 4;
+ adapter->itr_setting = 4;
+ adapter->itr = adapter->itr_setting;
} else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d01a099475a1..fb659dd8db03 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION
+#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -3446,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/*
* if short on Rx space, Rx wins and must trump Tx
- * adjustment or use Early Receive if available
+ * adjustment
*/
if (pba < min_rx_space)
pba = min_rx_space;
@@ -3755,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
e_dbg("icr is %08X\n", icr);
if (icr & E1000_ICR_RXSEQ) {
adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+ /*
+ * Force memory writes to complete before acknowledging the
+ * interrupt is handled.
+ */
wmb();
}
@@ -3796,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
goto msi_test_failed;
}
+ /*
+ * Force memory writes to complete before enabling and firing an
+ * interrupt.
+ */
wmb();
e1000_irq_enable(adapter);
@@ -3807,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
- rmb();
+ rmb(); /* read flags after interrupt has been fired */
if (adapter->flags & FLAG_MSI_TEST_FAILED) {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
@@ -4670,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
struct e1000_buffer *buffer_info;
unsigned int i;
u32 cmd_length = 0;
- u16 ipcse = 0, tucse, mss;
+ u16 ipcse = 0, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
if (!skb_is_gso(skb))
@@ -4704,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
tucss = skb_transport_offset(skb);
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
- tucse = 0;
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
@@ -4718,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
context_desc->upper_setup.tcp_fields.tucss = tucss;
context_desc->upper_setup.tcp_fields.tucso = tucso;
- context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+ context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
context_desc->cmd_and_length = cpu_to_le32(cmd_length);
@@ -5584,16 +5591,15 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
*/
if (adapter->flags & FLAG_IS_QUAD_PORT) {
struct pci_dev *us_dev = pdev->bus->self;
- int pos = pci_pcie_cap(us_dev);
u16 devctl;
- pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
- pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
- (devctl & ~PCI_EXP_DEVCTL_CERE));
+ pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
+ pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
+ (devctl & ~PCI_EXP_DEVCTL_CERE));
e1000_power_off(pdev, sleep, wake);
- pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
+ pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
} else {
e1000_power_off(pdev, sleep, wake);
}
@@ -5607,25 +5613,15 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
#else
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
- int pos;
- u16 reg16;
-
/*
* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
- pos = pci_pcie_cap(pdev);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- reg16 &= ~state;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
-
- if (!pdev->bus->self)
- return;
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
- pos = pci_pcie_cap(pdev->bus->self);
- pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
- reg16 &= ~state;
- pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+ if (pdev->bus->self)
+ pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
+ state);
}
#endif
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
@@ -6486,7 +6482,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
}
/* PCI Error Recovery (ERS) */
-static struct pci_error_handlers e1000_err_handler = {
+static const struct pci_error_handlers e1000_err_handler = {
.error_detected = e1000_io_error_detected,
.slot_reset = e1000_io_slot_reset,
.resume = e1000_io_resume,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b860d4f7ea2a..fc62a3f3a5be 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -84,8 +84,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
/* I82577 PHY Diagnostics Status */
#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
@@ -702,6 +703,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
+ /* Set MDI/MDIX mode */
+ ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /*
+ * Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
+
return e1000_set_master_slave_mode(hw);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ba994fb4cec6..ca4641e2f748 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2223,11 +2223,10 @@ out:
s32 igb_set_eee_i350(struct e1000_hw *hw)
{
s32 ret_val = 0;
- u32 ipcnfg, eeer, ctrl_ext;
+ u32 ipcnfg, eeer;
- ctrl_ext = rd32(E1000_CTRL_EXT);
- if ((hw->mac.type != e1000_i350) ||
- (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
goto out;
ipcnfg = rd32(E1000_IPCNFG);
eeer = rd32(E1000_EEER);
@@ -2240,6 +2239,14 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
+ /* keep the LPI clock running before EEE is enabled */
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
+ u32 eee_su;
+ eee_su = rd32(E1000_EEE_SU);
+ eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
+ wr32(E1000_EEE_SU, eee_su);
+ }
+
} else {
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
E1000_IPCNFG_EEE_100M_AN);
@@ -2249,6 +2256,8 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
}
wr32(E1000_IPCNFG, ipcnfg);
wr32(E1000_EEER, eeer);
+ rd32(E1000_IPCNFG);
+ rd32(E1000_EEER);
out:
return ret_val;
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index ec7e4fe3e3ee..de4b41ec3c40 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -322,6 +322,9 @@
#define E1000_FCRTC_RTH_COAL_SHIFT 4
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
+/* Timestamp in Rx buffer */
+#define E1000_RXPBS_CFG_TS_EN 0x80000000
+
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -360,6 +363,7 @@
#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
+#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_INT_ASSERTED 0x80000000
@@ -399,6 +403,7 @@
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
+#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
@@ -510,6 +515,9 @@
#define E1000_TIMINCA_16NS_SHIFT 24
+#define E1000_TSICR_TXTS 0x00000002
+#define E1000_TSIM_TXTS 0x00000002
+
#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
#define E1000_MDICNFG_PHY_MASK 0x03E00000
@@ -849,8 +857,9 @@
#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
-#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
+#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
+#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
/* SerDes Control */
#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 7be98b6f1052..3404bc79f4ca 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -464,6 +464,32 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Set MDI/MDIX mode */
+ ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ goto out;
+ phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+ /*
+ * Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
out:
return ret_val;
@@ -2246,8 +2272,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
if (ret_val)
goto out;
- phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
- phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
+ phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 34e40619f16b..6ac3299bfcb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -111,8 +111,9 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
/* I82580 PHY Control 2 */
-#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400
-#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600
/* I82580 PHY Diagnostics Status */
#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 28394bea5253..e5db48594e8a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -91,6 +91,8 @@
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
/* Filtering Registers */
#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
@@ -347,6 +349,7 @@
/* Energy Efficient Ethernet "EEE" register */
#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
+#define E1000_EEE_SU 0X0E34 /* EEE Setup */
/* Thermal Sensor Register */
#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9e572dd29ab2..8aad230c0592 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,9 +34,11 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
+#ifdef CONFIG_IGB_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#endif /* CONFIG_IGB_PTP */
#include <linux/bitops.h>
#include <linux/if_vlan.h>
@@ -99,7 +101,6 @@ struct vf_data_storage {
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
- struct pci_dev *vfdev;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -131,9 +132,9 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_512 512
+#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN IGB_RXBUFFER_512
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
@@ -167,8 +168,8 @@ struct igb_tx_buffer {
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
- dma_addr_t dma;
- u32 length;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
};
@@ -212,7 +213,6 @@ struct igb_q_vector {
struct igb_ring_container rx, tx;
struct napi_struct napi;
- int numa_node;
u16 itr_val;
u8 set_itr;
@@ -257,7 +257,6 @@ struct igb_ring {
};
/* Items past this point are only used during ring alloc / free */
dma_addr_t dma; /* phys address of the ring */
- int numa_node; /* node to alloc ring memory on */
};
enum e1000_ring_flags_t {
@@ -342,7 +341,6 @@ struct igb_adapter {
/* OS defined structs */
struct pci_dev *pdev;
- struct hwtstamp_config hwtstamp_config;
spinlock_t stats64_lock;
struct rtnl_link_stats64 stats64;
@@ -373,15 +371,19 @@ struct igb_adapter {
int vf_rate_link_speed;
u32 rss_queues;
u32 wvbr;
- int node;
u32 *shadow_vfta;
+#ifdef CONFIG_IGB_PTP
struct ptp_clock *ptp_clock;
- struct ptp_clock_info caps;
- struct delayed_work overflow_work;
+ struct ptp_clock_info ptp_caps;
+ struct delayed_work ptp_overflow_work;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+#endif /* CONFIG_IGB_PTP */
+
char fw_version[32];
};
@@ -390,6 +392,7 @@ struct igb_adapter {
#define IGB_FLAG_QUAD_PORT_A (1 << 2)
#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
#define IGB_FLAG_DMAC (1 << 4)
+#define IGB_FLAG_PTP (1 << 5)
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
@@ -435,13 +438,17 @@ extern void igb_power_up_link(struct igb_adapter *);
extern void igb_set_fw_version(struct igb_adapter *);
#ifdef CONFIG_IGB_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_remove(struct igb_adapter *adapter);
-
-extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
- struct skb_shared_hwtstamps *hwtstamps,
- u64 systim);
+extern void igb_ptp_stop(struct igb_adapter *adapter);
+extern void igb_ptp_reset(struct igb_adapter *adapter);
+extern void igb_ptp_tx_work(struct work_struct *work);
+extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd);
+#endif /* CONFIG_IGB_PTP */
-#endif
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 70591117051b..2ea012849825 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -148,9 +148,9 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
- SUPPORTED_TP);
- ecmd->advertising = (ADVERTISED_TP |
- ADVERTISED_Pause);
+ SUPPORTED_TP |
+ SUPPORTED_Pause);
+ ecmd->advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
@@ -158,6 +158,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->advertising |= hw->phy.autoneg_advertised;
}
+ if (hw->mac.autoneg != 1)
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
+ if (hw->fc.requested_mode == e1000_fc_full)
+ ecmd->advertising |= ADVERTISED_Pause;
+ else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ ecmd->advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ else
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
ecmd->port = PORT_TP;
ecmd->phy_address = hw->phy.addr;
} else {
@@ -198,6 +213,19 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
return 0;
}
@@ -214,6 +242,22 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return -EINVAL;
}
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
@@ -227,12 +271,25 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
hw->fc.requested_mode = e1000_fc_default;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
+ /* calling this overrides forced MDI setting */
if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__IGB_RESETTING, &adapter->state);
return -EINVAL;
}
}
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ /*
+ * fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
/* reset the link */
if (netif_running(adapter->netdev)) {
igb_down(adapter);
@@ -1469,33 +1526,22 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
- u16 phy_reg = 0;
hw->mac.autoneg = false;
- switch (hw->phy.type) {
- case e1000_phy_m88:
- /* Auto-MDI/MDIX Off */
- igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
- /* reset to update Auto-MDI/MDIX */
- igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
- /* autoneg off */
- igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
- break;
- case e1000_phy_82580:
- /* enable MII loopback */
- igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
- break;
- case e1000_phy_i210:
- /* set loopback speed in PHY */
- igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
- &phy_reg);
- phy_reg |= GS40G_MAC_SPEED_1G;
- igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
- phy_reg);
- ctrl_reg = rd32(E1000_CTRL_EXT);
- default:
- break;
+ if (hw->phy.type == e1000_phy_m88) {
+ if (hw->phy.id != I210_I_PHY_ID) {
+ /* Auto-MDI/MDIX Off */
+ igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
+ /* autoneg off */
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ } else {
+ /* force 1000, set loopback */
+ igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ }
}
/* add small delay to avoid loopback test failure */
@@ -1513,7 +1559,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
E1000_CTRL_FD | /* Force Duplex to FULL */
E1000_CTRL_SLU); /* Set link up enable bit */
- if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
+ if (hw->phy.type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
wr32(E1000_CTRL, ctrl_reg);
@@ -1521,11 +1567,10 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
/* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
- if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
+ if (hw->phy.type == e1000_phy_m88)
igb_phy_disable_receiver(adapter);
- udelay(500);
-
+ mdelay(500);
return 0;
}
@@ -1785,13 +1830,6 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
goto out;
}
- if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i211)) {
- dev_err(&adapter->pdev->dev,
- "Loopback test not supported on this part at this time.\n");
- *data = 0;
- goto out;
- }
*data = igb_setup_desc_rings(adapter);
if (*data)
goto out;
@@ -2257,6 +2295,54 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+static int igb_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ switch (adapter->hw.mac.type) {
+#ifdef CONFIG_IGB_PTP
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+
+ /* 82576 does not support timestamping all packets. */
+ if (adapter->hw.mac.type >= e1000_82580)
+ info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+ else
+ info->rx_filters |=
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+#endif /* CONFIG_IGB_PTP */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int igb_ethtool_begin(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2270,38 +2356,6 @@ static void igb_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
-#ifdef CONFIG_IGB_PTP
-static int igb_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- struct igb_adapter *adapter = netdev_priv(dev);
-
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (adapter->ptp_clock)
- info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
-
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL) |
- (1 << HWTSTAMP_FILTER_SOME) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
-
- return 0;
-}
-
-#endif
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2328,11 +2382,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_ethtool_stats = igb_get_ethtool_stats,
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
+ .get_ts_info = igb_get_ts_info,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
-#ifdef CONFIG_IGB_PTP
- .get_ts_info = igb_ethtool_get_ts_info,
-#endif
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 48cc4fb1a307..e1ceb37ef12e 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_find_enabled_vfs(struct igb_adapter *adapter);
-static int igb_check_vf_assignment(struct igb_adapter *adapter);
+static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
#endif
#ifdef CONFIG_PM
@@ -217,7 +216,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
static void igb_io_resume(struct pci_dev *);
-static struct pci_error_handlers igb_err_handler = {
+static const struct pci_error_handlers igb_err_handler = {
.error_detected = igb_io_error_detected,
.slot_reset = igb_io_slot_reset,
.resume = igb_io_resume,
@@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter)
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)buffer_info->dma,
- buffer_info->length,
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp);
}
@@ -456,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter)
" %04X %p %016llX %p%s\n", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
- buffer_info->length,
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp,
buffer_info->skb, next_desc);
@@ -466,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1, buffer_info->skb->data,
- buffer_info->length, true);
+ dma_unmap_len(buffer_info, len),
+ true);
}
}
@@ -683,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
{
struct igb_ring *ring;
int i;
- int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
- adapter->node);
- if (!ring)
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
- ring->numa_node = adapter->node;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
adapter->tx_ring[i] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
for (i = 0; i < adapter->num_rx_queues; i++) {
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
- adapter->node);
- if (!ring)
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
- ring->numa_node = adapter->node;
/* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
@@ -742,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
adapter->rx_ring[i] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
igb_cache_ring_register(adapter);
return 0;
err:
- /* Restore the adapter's original node */
- adapter->node = orig_node;
igb_free_queues(adapter);
return -ENOMEM;
@@ -1117,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
struct igb_q_vector *q_vector;
struct e1000_hw *hw = &adapter->hw;
int v_idx;
- int orig_node = adapter->node;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- if ((adapter->num_q_vectors == (adapter->num_rx_queues +
- adapter->num_tx_queues)) &&
- (adapter->num_rx_queues == v_idx))
- adapter->node = orig_node;
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
- adapter->node);
- if (!q_vector)
- q_vector = kzalloc(sizeof(struct igb_q_vector),
- GFP_KERNEL);
+ q_vector = kzalloc(sizeof(struct igb_q_vector),
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -1143,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
adapter->q_vector[v_idx] = q_vector;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
return 0;
err_out:
- /* Restore the adapter's original node */
- adapter->node = orig_node;
igb_free_q_vectors(adapter);
return -ENOMEM;
}
@@ -1751,6 +1706,11 @@ void igb_reset(struct igb_adapter *adapter)
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+#ifdef CONFIG_IGB_PTP
+ /* Re-enable PTP, where applicable. */
+ igb_ptp_reset(adapter);
+#endif /* CONFIG_IGB_PTP */
+
igb_get_phy_info(hw);
}
@@ -2180,11 +2140,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
}
#endif
+
#ifdef CONFIG_IGB_PTP
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
+#endif /* CONFIG_IGB_PTP */
-#endif
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2259,9 +2220,9 @@ static void __devexit igb_remove(struct pci_dev *pdev)
pm_runtime_get_noresume(&pdev->dev);
#ifdef CONFIG_IGB_PTP
- igb_ptp_remove(adapter);
+ igb_ptp_stop(adapter);
+#endif /* CONFIG_IGB_PTP */
-#endif
/*
* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
@@ -2294,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
/* disable iov and allow time for transactions to clear */
- if (!igb_check_vf_assignment(adapter)) {
+ if (igb_vfs_are_assigned(adapter)) {
+ dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ } else {
pci_disable_sriov(pdev);
msleep(500);
- } else {
- dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
}
kfree(adapter->vf_data);
@@ -2338,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- int old_vfs = igb_find_enabled_vfs(adapter);
+ int old_vfs = pci_num_vf(adapter->pdev);
int i;
/* Virtualization features not supported on i210 family. */
@@ -2418,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- adapter->node = -1;
-
spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
@@ -2666,13 +2625,11 @@ static int igb_close(struct net_device *netdev)
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
- int orig_node = dev_to_node(dev);
int size;
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
- if (!tx_ring->tx_buffer_info)
- tx_ring->tx_buffer_info = vzalloc(size);
+
+ tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -2680,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- set_dev_node(dev, tx_ring->numa_node);
tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size,
&tx_ring->dma,
GFP_KERNEL);
- set_dev_node(dev, orig_node);
- if (!tx_ring->desc)
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
-
if (!tx_ring->desc)
goto err;
@@ -2702,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
err:
vfree(tx_ring->tx_buffer_info);
- dev_err(dev,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ tx_ring->tx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -2820,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter)
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int orig_node = dev_to_node(dev);
- int size, desc_len;
+ int size;
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
- if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vzalloc(size);
+
+ rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
- desc_len = sizeof(union e1000_adv_rx_desc);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- set_dev_node(dev, rx_ring->numa_node);
rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size,
&rx_ring->dma,
GFP_KERNEL);
- set_dev_node(dev, orig_node);
- if (!rx_ring->desc)
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
-
if (!rx_ring->desc)
goto err;
@@ -2859,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err:
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dev_err(dev, "Unable to allocate memory for the receive descriptor"
- " ring\n");
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
@@ -2898,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
- u32 j, num_rx_queues, shift = 0, shift2 = 0;
- union e1000_reta {
- u32 dword;
- u8 bytes[4];
- } reta;
- static const u8 rsshash[40] = {
- 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
- 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
- 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
- 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+ u32 j, num_rx_queues, shift = 0;
+ static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
+ 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
+ 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
+ 0xFA01ACBE };
/* Fill out hash function seeds */
- for (j = 0; j < 10; j++) {
- u32 rsskey = rsshash[(j * 4)];
- rsskey |= rsshash[(j * 4) + 1] << 8;
- rsskey |= rsshash[(j * 4) + 2] << 16;
- rsskey |= rsshash[(j * 4) + 3] << 24;
- array_wr32(E1000_RSSRK(0), j, rsskey);
- }
+ for (j = 0; j < 10; j++)
+ wr32(E1000_RSSRK(j), rsskey[j]);
num_rx_queues = adapter->rss_queues;
- if (adapter->vfs_allocated_count) {
- /* 82575 and 82576 supports 2 RSS queues for VMDq */
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_82580:
- num_rx_queues = 1;
- shift = 0;
- break;
- case e1000_82576:
+ switch (hw->mac.type) {
+ case e1000_82575:
+ shift = 6;
+ break;
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count) {
shift = 3;
num_rx_queues = 2;
- break;
- case e1000_82575:
- shift = 2;
- shift2 = 6;
- default:
- break;
}
- } else {
- if (hw->mac.type == e1000_82575)
- shift = 6;
+ break;
+ default:
+ break;
}
- for (j = 0; j < (32 * 4); j++) {
- reta.bytes[j & 3] = (j % num_rx_queues) << shift;
- if (shift2)
- reta.bytes[j & 3] |= num_rx_queues << shift2;
- if ((j & 3) == 3)
- wr32(E1000_RETA(j >> 2), reta.dword);
+ /*
+ * Populate the indirection table 4 entries at a time. To do this
+ * we are generating the results for n and n+2 and then interleaving
+ * those with the results with n+1 and n+3.
+ */
+ for (j = 0; j < 32; j++) {
+ /* first pass generates n and n+2 */
+ u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
+ u32 reta = (base & 0x07800780) >> (7 - shift);
+
+ /* second pass generates n+1 and n+3 */
+ base += 0x00010001 * num_rx_queues;
+ reta |= (base & 0x07800780) << (1 + shift);
+
+ wr32(E1000_RETA(j), reta);
}
/*
@@ -3184,8 +3112,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#endif
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+#ifdef CONFIG_IGB_PTP
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
+#endif /* CONFIG_IGB_PTP */
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3269,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
{
if (tx_buffer->skb) {
dev_kfree_skb_any(tx_buffer->skb);
- if (tx_buffer->dma)
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
- } else if (tx_buffer->dma) {
+ } else if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
- tx_buffer->dma = 0;
+ dma_unmap_len_set(tx_buffer, len, 0);
/* buffer_info must be completely set up in the transmit path */
}
@@ -4229,9 +4159,11 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
if (tx_flags & IGB_TX_FLAGS_VLAN)
cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
+#ifdef CONFIG_IGB_PTP
/* set timestamp bit if present */
- if (tx_flags & IGB_TX_FLAGS_TSTAMP)
+ if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
+#endif /* CONFIG_IGB_PTP */
/* set segmentation bits for TSO */
if (tx_flags & IGB_TX_FLAGS_TSO)
@@ -4275,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
- struct igb_tx_buffer *tx_buffer_info;
+ struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
dma_addr_t dma;
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@ -4296,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
goto dma_error;
/* record length, and DMA address */
- first->length = size;
- first->dma = dma;
+ dma_unmap_len_set(first, len, size);
+ dma_unmap_addr_set(first, dma, dma);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
for (;;) {
@@ -4339,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_buffer_info->length = size;
- tx_buffer_info->dma = dma;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
tx_desc->read.olinfo_status = 0;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -4392,9 +4324,9 @@ dma_error:
/* clear dma mappings for failed tx_buffer_info map */
for (;;) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- if (tx_buffer_info == first)
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer == first)
break;
if (i == 0)
i = tx_ring->count;
@@ -4440,6 +4372,9 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
+#ifdef CONFIG_IGB_PTP
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+#endif /* CONFIG_IGB_PTP */
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
@@ -4462,10 +4397,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+#ifdef CONFIG_IGB_PTP
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !(adapter->ptp_tx_skb))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
+
+ adapter->ptp_tx_skb = skb_get(skb);
+ if (adapter->hw.mac.type == e1000_82576)
+ schedule_work(&adapter->ptp_tx_work);
}
+#endif /* CONFIG_IGB_PTP */
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4661,11 +4603,13 @@ void igb_update_stats(struct igb_adapter *adapter,
bytes = 0;
packets = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
+ u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i];
- ring->rx_stats.drops += rqdpc_tmp;
- net_stats->rx_fifo_errors += rqdpc_tmp;
+ if (rqdpc) {
+ ring->rx_stats.drops += rqdpc;
+ net_stats->rx_fifo_errors += rqdpc;
+ }
do {
start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
@@ -4755,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter,
reg = rd32(E1000_CTRL_EXT);
if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
adapter->stats.rxerrc += rd32(E1000_RXERRC);
- adapter->stats.tncrs += rd32(E1000_TNCRS);
+
+ /* this stat has invalid values on i210/i211 */
+ if ((hw->mac.type != e1000_i210) &&
+ (hw->mac.type != e1000_i211))
+ adapter->stats.tncrs += rd32(E1000_TNCRS);
}
adapter->stats.tsctc += rd32(E1000_TSCTC);
@@ -4852,6 +4800,19 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef CONFIG_IGB_PTP
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = rd32(E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ wr32(E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* CONFIG_IGB_PTP */
+
wr32(E1000_EIMS, adapter->eims_other);
return IRQ_HANDLED;
@@ -5002,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
static int igb_vf_configure(struct igb_adapter *adapter, int vf)
{
unsigned char mac_addr[ETH_ALEN];
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
- struct pci_dev *pvfdev;
- unsigned int device_id;
- u16 thisvf_devfn;
eth_random_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- device_id = IGB_82576_VF_DEV_ID;
- /* VF Stride for 82576 is 2 */
- thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
- (pdev->devfn & 1);
- break;
- case e1000_i350:
- device_id = IGB_I350_VF_DEV_ID;
- /* VF Stride for I350 is 4 */
- thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
- (pdev->devfn & 3);
- break;
- default:
- device_id = 0;
- thisvf_devfn = 0;
- break;
- }
-
- pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == thisvf_devfn)
- break;
- pvfdev = pci_get_device(hw->vendor_id,
- device_id, pvfdev);
- }
-
- if (pvfdev)
- adapter->vf_data[vf].vfdev = pvfdev;
- else
- dev_err(&pdev->dev,
- "Couldn't find pci dev ptr for VF %4.4x\n",
- thisvf_devfn);
- return pvfdev != NULL;
+ return 0;
}
-static int igb_find_enabled_vfs(struct igb_adapter *adapter)
+static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *pvfdev;
- u16 vf_devfn = 0;
- u16 vf_stride;
- unsigned int device_id;
- int vfs_found = 0;
+ struct pci_dev *vfdev;
+ int dev_id;
switch (adapter->hw.mac.type) {
case e1000_82576:
- device_id = IGB_82576_VF_DEV_ID;
- /* VF Stride for 82576 is 2 */
- vf_stride = 2;
+ dev_id = IGB_82576_VF_DEV_ID;
break;
case e1000_i350:
- device_id = IGB_I350_VF_DEV_ID;
- /* VF Stride for I350 is 4 */
- vf_stride = 4;
+ dev_id = IGB_I350_VF_DEV_ID;
break;
default:
- device_id = 0;
- vf_stride = 0;
- break;
- }
-
- vf_devfn = pdev->devfn + 0x80;
- pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == vf_devfn &&
- (pvfdev->bus->number >= pdev->bus->number))
- vfs_found++;
- vf_devfn += vf_stride;
- pvfdev = pci_get_device(hw->vendor_id,
- device_id, pvfdev);
+ return false;
}
- return vfs_found;
-}
-
-static int igb_check_vf_assignment(struct igb_adapter *adapter)
-{
- int i;
- for (i = 0; i < adapter->vfs_allocated_count; i++) {
- if (adapter->vf_data[i].vfdev) {
- if (adapter->vf_data[i].vfdev->dev_flags &
- PCI_DEV_FLAGS_ASSIGNED)
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+ while (vfdev) {
+ /* if we don't own it we don't care */
+ if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+ /* if it is assigned we cannot release it */
+ if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
return true;
}
+
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
}
+
return false;
}
@@ -5643,6 +5545,19 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef CONFIG_IGB_PTP
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = rd32(E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ wr32(E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* CONFIG_IGB_PTP */
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -5684,6 +5599,19 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef CONFIG_IGB_PTP
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = rd32(E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ wr32(E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* CONFIG_IGB_PTP */
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -5743,37 +5671,6 @@ static int igb_poll(struct napi_struct *napi, int budget)
return 0;
}
-#ifdef CONFIG_IGB_PTP
-/**
- * igb_tx_hwtstamp - utility function which checks for TX time stamp
- * @q_vector: pointer to q_vector containing needed info
- * @buffer: pointer to igb_tx_buffer structure
- *
- * If we were asked to do hardware stamping and such a time stamp is
- * available, then it must have been for this skb here because we only
- * allow only one such packet into the queue.
- */
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
- struct igb_tx_buffer *buffer_info)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
- struct skb_shared_hwtstamps shhwtstamps;
- u64 regval;
-
- /* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
- !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
- return;
-
- regval = rd32(E1000_TXSTMPL);
- regval |= (u64)rd32(E1000_TXSTMPH) << 32;
-
- igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
-}
-
-#endif
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
@@ -5785,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx.ring;
struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc, *eop_desc;
+ union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
@@ -5797,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_desc = IGB_TX_DESC(tx_ring, i);
i -= tx_ring->count;
- for (; budget; budget--) {
- eop_desc = tx_buffer->next_to_watch;
-
- /* prevent any other reads prior to eop_desc */
- rmb();
+ do {
+ union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
+ /* prevent any other reads prior to eop_desc */
+ rmb();
+
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break;
@@ -5818,25 +5715,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
-#ifdef CONFIG_IGB_PTP
- /* retrieve hardware timestamp */
- igb_tx_hwtstamp(q_vector, tx_buffer);
-
-#endif
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
- tx_buffer->skb = NULL;
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
+ /* clear tx_buffer data */
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+
/* clear last DMA location and unmap remaining buffers */
while (tx_desc != eop_desc) {
- tx_buffer->dma = 0;
-
tx_buffer++;
tx_desc++;
i++;
@@ -5847,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
/* unmap any remaining paged data */
- if (tx_buffer->dma) {
+ if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(tx_ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
}
}
- /* clear last DMA location */
- tx_buffer->dma = 0;
-
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
@@ -5867,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, 0);
}
- }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
@@ -5883,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw;
- eop_desc = tx_buffer->next_to_watch;
-
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
- if (eop_desc &&
+ if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
@@ -5912,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_ring->next_to_use,
tx_ring->next_to_clean,
tx_buffer->time_stamp,
- eop_desc,
+ tx_buffer->next_to_watch,
jiffies,
- eop_desc->wb.status);
+ tx_buffer->next_to_watch->wb.status);
netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index);
@@ -5994,47 +5889,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
-#ifdef CONFIG_IGB_PTP
-static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
- u64 regval;
-
- if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
- E1000_RXDADV_STAT_TS))
- return;
-
- /*
- * If this bit is set, then the RX registers contain the time stamp. No
- * other packet will be time stamped until we read these registers, so
- * read the registers to make them available again. Because only one
- * packet can be time stamped at a time, we know that the register
- * values must belong to this one here and therefore we don't need to
- * compare any of the additional attributes stored for it.
- *
- * If nothing went wrong, then it should have a shared tx_flags that we
- * can turn into a skb_shared_hwtstamps.
- */
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- u32 *stamp = (u32 *)skb->data;
- regval = le32_to_cpu(*(stamp + 2));
- regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
- skb_pull(skb, IGB_TS_HDR_LEN);
- } else {
- if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
-
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
- }
-
- igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
-}
-
-#endif
static void igb_rx_vlan(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -6146,8 +6000,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
}
#ifdef CONFIG_IGB_PTP
- igb_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif
+ igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
+#endif /* CONFIG_IGB_PTP */
igb_rx_hash(rx_ring, rx_desc, skb);
igb_rx_checksum(rx_ring, rx_desc, skb);
igb_rx_vlan(rx_ring, rx_desc, skb);
@@ -6341,181 +6195,6 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
/**
- * igb_hwtstamp_ioctl - control hardware time stamping
- * @netdev:
- * @ifreq:
- * @cmd:
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- **/
-static int igb_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
- u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
- u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
- u32 tsync_rx_cfg = 0;
- bool is_l4 = false;
- bool is_l2 = false;
- u32 regval;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- tsync_tx_ctl = 0;
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- tsync_rx_ctl = 0;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_ALL:
- /*
- * register TSYNCRXCFG must be set, therefore it is not
- * possible to time stamp both Sync and Delay_Req messages
- * => fall back to time stamping all packets
- */
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
- is_l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
- is_l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- is_l2 = true;
- is_l4 = true;
- break;
- default:
- return -ERANGE;
- }
-
- if (hw->mac.type == e1000_82575) {
- if (tsync_rx_ctl | tsync_tx_ctl)
- return -EINVAL;
- return 0;
- }
-
- /*
- * Per-packet timestamping only works if all packets are
- * timestamped, so enable timestamping in all packets as
- * long as one rx filter was configured.
- */
- if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
- tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- }
-
- /* enable/disable TX */
- regval = rd32(E1000_TSYNCTXCTL);
- regval &= ~E1000_TSYNCTXCTL_ENABLED;
- regval |= tsync_tx_ctl;
- wr32(E1000_TSYNCTXCTL, regval);
-
- /* enable/disable RX */
- regval = rd32(E1000_TSYNCRXCTL);
- regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
- regval |= tsync_rx_ctl;
- wr32(E1000_TSYNCRXCTL, regval);
-
- /* define which PTP packets are time stamped */
- wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
-
- /* define ethertype filter for timestamped packets */
- if (is_l2)
- wr32(E1000_ETQF(3),
- (E1000_ETQF_FILTER_ENABLE | /* enable filter */
- E1000_ETQF_1588 | /* enable timestamping */
- ETH_P_1588)); /* 1588 eth protocol type */
- else
- wr32(E1000_ETQF(3), 0);
-
-#define PTP_PORT 319
- /* L4 Queue Filter[3]: filter by destination port and protocol */
- if (is_l4) {
- u32 ftqf = (IPPROTO_UDP /* UDP */
- | E1000_FTQF_VF_BP /* VF not compared */
- | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
- | E1000_FTQF_MASK); /* mask all inputs */
- ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
-
- wr32(E1000_IMIR(3), htons(PTP_PORT));
- wr32(E1000_IMIREXT(3),
- (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
- if (hw->mac.type == e1000_82576) {
- /* enable source port check */
- wr32(E1000_SPQF(3), htons(PTP_PORT));
- ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
- }
- wr32(E1000_FTQF(3), ftqf);
- } else {
- wr32(E1000_FTQF(3), E1000_FTQF_MASK);
- }
- wrfl();
-
- adapter->hwtstamp_config = config;
-
- /* clear TX/RX time stamp registers, just to be sure */
- regval = rd32(E1000_TXSTMPH);
- regval = rd32(E1000_RXSTMPH);
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-/**
* igb_ioctl -
* @netdev:
* @ifreq:
@@ -6528,8 +6207,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
+#ifdef CONFIG_IGB_PTP
case SIOCSHWTSTAMP:
- return igb_hwtstamp_ioctl(netdev, ifr, cmd);
+ return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
@@ -6538,28 +6219,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
- u16 cap_offset;
- cap_offset = adapter->pdev->pcie_cap;
- if (!cap_offset)
+ if (pcie_capability_read_word(adapter->pdev, reg, value))
return -E1000_ERR_CONFIG;
- pci_read_config_word(adapter->pdev, cap_offset + reg, value);
-
return 0;
}
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
- u16 cap_offset;
- cap_offset = adapter->pdev->pcie_cap;
- if (!cap_offset)
+ if (pcie_capability_write_word(adapter->pdev, reg, *value))
return -E1000_ERR_CONFIG;
- pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
-
return 0;
}
@@ -6675,6 +6348,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
default:
goto err_inval;
}
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
return 0;
err_inval:
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c846ea9131a3..ee21445157a3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,22 +69,22 @@
* 2^40 * 10^-9 / 60 = 18.3 minutes.
*/
-#define IGB_OVERFLOW_PERIOD (HZ * 60 * 9)
-#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
-#define IGB_NBITS_82580 40
+#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
+#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+#define IGB_NBITS_82580 40
/*
* SYSTIM read access for the 82576
*/
-static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
+static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
{
- u64 val;
- u32 lo, hi;
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
+ u64 val;
+ u32 lo, hi;
lo = rd32(E1000_SYSTIML);
hi = rd32(E1000_SYSTIMH);
@@ -99,12 +99,12 @@ static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
* SYSTIM read access for the 82580
*/
-static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
+static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
{
- u64 val;
- u32 lo, hi, jk;
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
+ u64 val;
+ u32 lo, hi, jk;
/*
* The timestamp latches on lowest register read. For the 82580
@@ -122,16 +122,101 @@ static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
}
/*
+ * SYSTIM read access for I210/I211
+ */
+
+static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 sec, nsec, jk;
+
+ /*
+ * The timestamp latches on lowest register read. For I210/I211, the
+ * lowest register is SYSTIMR. Since we only need to provide nanosecond
+ * resolution, we can ignore it.
+ */
+ jk = rd32(E1000_SYSTIMR);
+ nsec = rd32(E1000_SYSTIML);
+ sec = rd32(E1000_SYSTIMH);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void igb_ptp_write_i210(struct igb_adapter *adapter,
+ const struct timespec *ts)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * Writing the SYSTIMR register is not necessary as it only provides
+ * sub-nanosecond resolution.
+ */
+ wr32(E1000_SYSTIML, ts->tv_nsec);
+ wr32(E1000_SYSTIMH, ts->tv_sec);
+}
+
+/**
+ * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * The 'tmreg_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two (or three) 32 bit registers. The first
+ * read latches the value. Ditto for writing.
+ *
+ * In addition, here have extended the system time with an overflow
+ * counter in software.
+ **/
+static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ unsigned long flags;
+ u64 ns;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+ ns = timecounter_cyc2time(&adapter->tc, systim);
+
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ /* Upper 32 bits contain s, lower 32 bits contain ns. */
+ hwtstamps->hwtstamp = ktime_set(systim >> 32,
+ systim & 0xFFFFFFFF);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
* PTP clock operations
*/
-static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ int neg_adj = 0;
u64 rate;
u32 incvalue;
- int neg_adj = 0;
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
- struct e1000_hw *hw = &igb->hw;
if (ppb < 0) {
neg_adj = 1;
@@ -153,13 +238,14 @@ static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
return 0;
}
-static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ int neg_adj = 0;
u64 rate;
u32 inca;
- int neg_adj = 0;
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
- struct e1000_hw *hw = &igb->hw;
if (ppb < 0) {
neg_adj = 1;
@@ -178,11 +264,12 @@ static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
return 0;
}
-static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
+static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
{
- s64 now;
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
unsigned long flags;
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+ s64 now;
spin_lock_irqsave(&igb->tmreg_lock, flags);
@@ -195,12 +282,32 @@ static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ struct timespec now, then = ns_to_timespec(delta);
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_read_i210(igb, &now);
+ now = timespec_add(now, then);
+ igb_ptp_write_i210(igb, (const struct timespec *)&now);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
+ struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
u64 ns;
u32 remainder;
- unsigned long flags;
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
spin_lock_irqsave(&igb->tmreg_lock, flags);
@@ -214,11 +321,29 @@ static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
return 0;
}
-static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
+static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
+ struct timespec *ts)
{
- u64 ns;
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
unsigned long flags;
- struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_read_i210(igb, ts);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+ u64 ns;
ns = ts->tv_sec * 1000000000ULL;
ns += ts->tv_nsec;
@@ -232,77 +357,369 @@ static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
return 0;
}
-static int ptp_82576_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
{
- return -EOPNOTSUPP;
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_write_i210(igb, ts);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
}
-static int ptp_82580_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int igb_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
-static void igb_overflow_check(struct work_struct *work)
+/**
+ * igb_ptp_tx_work
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb.
+ */
+void igb_ptp_tx_work(struct work_struct *work)
+{
+ struct igb_adapter *adapter = container_of(work, struct igb_adapter,
+ ptp_tx_work);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tsynctxctl;
+
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ tsynctxctl = rd32(E1000_TSYNCTXCTL);
+ if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
+ igb_ptp_tx_hwtstamp(adapter);
+ else
+ /* reschedule to check later */
+ schedule_work(&adapter->ptp_tx_work);
+}
+
+static void igb_ptp_overflow_check(struct work_struct *work)
{
- struct timespec ts;
struct igb_adapter *igb =
- container_of(work, struct igb_adapter, overflow_work.work);
+ container_of(work, struct igb_adapter, ptp_overflow_work.work);
+ struct timespec ts;
- igb_gettime(&igb->caps, &ts);
+ igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
- schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD);
+ schedule_delayed_work(&igb->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
+}
+
+/**
+ * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: Board private structure.
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 regval;
+
+ regval = rd32(E1000_TXSTMPL);
+ regval |= (u64)rd32(E1000_TXSTMPH) << 32;
+
+ igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+}
+
+void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ u64 regval;
+
+ if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
+ E1000_RXDADV_STAT_TS))
+ return;
+
+ /*
+ * If this bit is set, then the RX registers contain the time stamp. No
+ * other packet will be time stamped until we read these registers, so
+ * read the registers to make them available again. Because only one
+ * packet can be time stamped at a time, we know that the register
+ * values must belong to this one here and therefore we don't need to
+ * compare any of the additional attributes stored for it.
+ *
+ * If nothing went wrong, then it should have a shared tx_flags that we
+ * can turn into a skb_shared_hwtstamps.
+ */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ u32 *stamp = (u32 *)skb->data;
+ regval = le32_to_cpu(*(stamp + 2));
+ regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
+ skb_pull(skb, IGB_TS_HDR_LEN);
+ } else {
+ if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
+
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ }
+
+ igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+}
+
+/**
+ * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ **/
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct hwtstamp_config config;
+ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ u32 tsync_rx_cfg = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /*
+ * register TSYNCRXCFG must be set, therefore it is not
+ * possible to time stamp both Sync and Delay_Req messages
+ * => fall back to time stamping all packets
+ */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (hw->mac.type == e1000_82575) {
+ if (tsync_rx_ctl | tsync_tx_ctl)
+ return -EINVAL;
+ return 0;
+ }
+
+ /*
+ * Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as
+ * long as one rx filter was configured.
+ */
+ if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
+ tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+
+ if ((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ regval = rd32(E1000_RXPBS);
+ regval |= E1000_RXPBS_CFG_TS_EN;
+ wr32(E1000_RXPBS, regval);
+ }
+ }
+
+ /* enable/disable TX */
+ regval = rd32(E1000_TSYNCTXCTL);
+ regval &= ~E1000_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32(E1000_TSYNCTXCTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(E1000_TSYNCRXCTL);
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(E1000_TSYNCRXCTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
+
+ /* define ethertype filter for timestamped packets */
+ if (is_l2)
+ wr32(E1000_ETQF(3),
+ (E1000_ETQF_FILTER_ENABLE | /* enable filter */
+ E1000_ETQF_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(E1000_ETQF(3), 0);
+
+#define PTP_PORT 319
+ /* L4 Queue Filter[3]: filter by destination port and protocol */
+ if (is_l4) {
+ u32 ftqf = (IPPROTO_UDP /* UDP */
+ | E1000_FTQF_VF_BP /* VF not compared */
+ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
+ | E1000_FTQF_MASK); /* mask all inputs */
+ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
+
+ wr32(E1000_IMIR(3), htons(PTP_PORT));
+ wr32(E1000_IMIREXT(3),
+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+ if (hw->mac.type == e1000_82576) {
+ /* enable source port check */
+ wr32(E1000_SPQF(3), htons(PTP_PORT));
+ ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
+ }
+ wr32(E1000_FTQF(3), ftqf);
+ } else {
+ wr32(E1000_FTQF(3), E1000_FTQF_MASK);
+ }
+ wrfl();
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = rd32(E1000_TXSTMPL);
+ regval = rd32(E1000_TXSTMPH);
+ regval = rd32(E1000_RXSTMPL);
+ regval = rd32(E1000_RXSTMPH);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
}
void igb_ptp_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
switch (hw->mac.type) {
- case e1000_i210:
- case e1000_i211:
- case e1000_i350:
+ case e1000_82576:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 1000000000;
+ adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+ adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime = igb_ptp_settime_82576;
+ adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->cc.read = igb_ptp_read_82576;
+ adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mult = 1;
+ adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
+ /* Dial the nominal frequency. */
+ wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+ break;
case e1000_82580:
- adapter->caps.owner = THIS_MODULE;
- strcpy(adapter->caps.name, "igb-82580");
- adapter->caps.max_adj = 62499999;
- adapter->caps.n_ext_ts = 0;
- adapter->caps.pps = 0;
- adapter->caps.adjfreq = ptp_82580_adjfreq;
- adapter->caps.adjtime = igb_adjtime;
- adapter->caps.gettime = igb_gettime;
- adapter->caps.settime = igb_settime;
- adapter->caps.enable = ptp_82580_enable;
- adapter->cc.read = igb_82580_systim_read;
- adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
- adapter->cc.mult = 1;
- adapter->cc.shift = 0;
+ case e1000_i350:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+ adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+ adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
+ adapter->ptp_caps.settime = igb_ptp_settime_82576;
+ adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->cc.read = igb_ptp_read_82580;
+ adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
+ adapter->cc.mult = 1;
+ adapter->cc.shift = 0;
/* Enable the timer functions by clearing bit 31. */
wr32(E1000_TSAUXC, 0x0);
break;
-
- case e1000_82576:
- adapter->caps.owner = THIS_MODULE;
- strcpy(adapter->caps.name, "igb-82576");
- adapter->caps.max_adj = 1000000000;
- adapter->caps.n_ext_ts = 0;
- adapter->caps.pps = 0;
- adapter->caps.adjfreq = ptp_82576_adjfreq;
- adapter->caps.adjtime = igb_adjtime;
- adapter->caps.gettime = igb_gettime;
- adapter->caps.settime = igb_settime;
- adapter->caps.enable = ptp_82576_enable;
- adapter->cc.read = igb_82576_systim_read;
- adapter->cc.mask = CLOCKSOURCE_MASK(64);
- adapter->cc.mult = 1;
- adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
- /* Dial the nominal frequency. */
- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+ case e1000_i210:
+ case e1000_i211:
+ snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ adapter->ptp_caps.owner = THIS_MODULE;
+ adapter->ptp_caps.max_adj = 62499999;
+ adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
+ adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
+ adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
+ adapter->ptp_caps.settime = igb_ptp_settime_i210;
+ adapter->ptp_caps.enable = igb_ptp_enable;
+ /* Enable the timer functions by clearing bit 31. */
+ wr32(E1000_TSAUXC, 0x0);
break;
-
default:
adapter->ptp_clock = NULL;
return;
@@ -310,86 +727,114 @@ void igb_ptp_init(struct igb_adapter *adapter)
wrfl();
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
+ spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+
+ /* Initialize the clock and overflow work for devices that need it. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+ struct timespec ts = ktime_to_timespec(ktime_get_real());
- INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check);
+ igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
- spin_lock_init(&adapter->tmreg_lock);
+ INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+ igb_ptp_overflow_check);
- schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD);
+ schedule_delayed_work(&adapter->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
+ }
+
+ /* Initialize the time sync interrupts for devices that support it. */
+ if (hw->mac.type >= e1000_82580) {
+ wr32(E1000_TSIM, E1000_TSIM_TXTS);
+ wr32(E1000_IMS, E1000_IMS_TS);
+ }
- adapter->ptp_clock = ptp_clock_register(&adapter->caps);
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
- } else
+ } else {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
+ adapter->flags |= IGB_FLAG_PTP;
+ }
}
-void igb_ptp_remove(struct igb_adapter *adapter)
+/**
+ * igb_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igb_ptp_stop(struct igb_adapter *adapter)
{
switch (adapter->hw.mac.type) {
- case e1000_i211:
- case e1000_i210:
- case e1000_i350:
- case e1000_82580:
case e1000_82576:
- cancel_delayed_work_sync(&adapter->overflow_work);
+ case e1000_82580:
+ case e1000_i350:
+ cancel_delayed_work_sync(&adapter->ptp_overflow_work);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ /* No delayed work to cancel. */
break;
default:
return;
}
+ cancel_work_sync(&adapter->ptp_tx_work);
+
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
adapter->netdev->name);
+ adapter->flags &= ~IGB_FLAG_PTP;
}
}
/**
- * igb_systim_to_hwtstamp - convert system time value to hw timestamp
- * @adapter: board private structure
- * @hwtstamps: timestamp structure to update
- * @systim: unsigned 64bit system time value.
- *
- * We need to convert the system time value stored in the RX/TXSTMP registers
- * into a hwtstamp which can be used by the upper level timestamping functions.
+ * igb_ptp_reset - Re-enable the adapter for PTP following a reset.
+ * @adapter: Board private structure.
*
- * The 'tmreg_lock' spinlock is used to protect the consistency of the
- * system time value. This is needed because reading the 64 bit time
- * value involves reading two (or three) 32 bit registers. The first
- * read latches the value. Ditto for writing.
- *
- * In addition, here have extended the system time with an overflow
- * counter in software.
+ * This function handles the reset work required to re-enable the PTP device.
**/
-void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
- struct skb_shared_hwtstamps *hwtstamps,
- u64 systim)
+void igb_ptp_reset(struct igb_adapter *adapter)
{
- u64 ns;
- unsigned long flags;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags & IGB_FLAG_PTP))
+ return;
switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ /* Dial the nominal frequency. */
+ wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+ break;
+ case e1000_82580:
+ case e1000_i350:
case e1000_i210:
case e1000_i211:
- case e1000_i350:
- case e1000_82580:
- case e1000_82576:
+ /* Enable the timer functions and interrupts. */
+ wr32(E1000_TSAUXC, 0x0);
+ wr32(E1000_TSIM, E1000_TSIM_TXTS);
+ wr32(E1000_IMS, E1000_IMS_TS);
break;
default:
+ /* No work to do. */
return;
}
- spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ /* Re-initialize the timer. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
+ struct timespec ts = ktime_to_timespec(ktime_get_real());
- ns = timecounter_cyc2time(&adapter->tc, systim);
-
- spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
-
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ns);
+ igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ } else {
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0696abfe9944..0ac11f527a84 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2833,7 +2833,7 @@ static void __devexit igbvf_remove(struct pci_dev *pdev)
}
/* PCI Error Recovery (ERS) */
-static struct pci_error_handlers igbvf_err_handler = {
+static const struct pci_error_handlers igbvf_err_handler = {
.error_detected = igbvf_io_error_detected,
.slot_reset = igbvf_io_slot_reset,
.resume = igbvf_io_resume,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index d05fc95befc5..d99a2d51b948 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -115,7 +115,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
static void ixgb_io_resume (struct pci_dev *pdev);
-static struct pci_error_handlers ixgb_err_handler = {
+static const struct pci_error_handlers ixgb_err_handler = {
.error_detected = ixgb_io_error_detected,
.slot_reset = ixgb_io_slot_reset,
.resume = ixgb_io_resume,
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 5fd5d04c26c9..89f40e51fc13 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -32,7 +32,7 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
+ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b9623e9ea895..5bd26763554c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -78,6 +78,9 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
+#define IXGBE_RXBUFFER_2K 2048
+#define IXGBE_RXBUFFER_3K 3072
+#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
@@ -104,6 +107,7 @@
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
+#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -293,16 +297,25 @@ struct ixgbe_ring_feature {
* this is twice the size of a half page we need to double the page order
* for FCoE enabled Rx queues.
*/
-#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
-static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
{
- return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
+#ifdef IXGBE_FCOE
+ if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+ return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
+ IXGBE_RXBUFFER_3K;
+#endif
+ return IXGBE_RXBUFFER_2K;
}
-#else
-#define ixgbe_rx_pg_order(_ring) 0
+
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+#ifdef IXGBE_FCOE
+ if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+ return (PAGE_SIZE < 8192) ? 1 : 0;
#endif
+ return 0;
+}
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
-#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */
@@ -584,6 +597,9 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *ixgbe_dbg_adapter;
+#endif /*CONFIG_DEBUG_FS*/
};
struct ixgbe_fdir_filter {
@@ -712,7 +728,12 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info);
extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */
-
+#ifdef CONFIG_DEBUG_FS
+extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+extern void ixgbe_dbg_init(void);
+extern void ixgbe_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
new file mode 100644
index 000000000000..8d3a21889099
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -0,0 +1,300 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2012 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include "ixgbe.h"
+
+static struct dentry *ixgbe_dbg_root;
+
+static char ixgbe_dbg_reg_ops_buf[256] = "";
+
+/**
+ * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened
+ * @inode: inode that was opened
+ * @filp: file info
+ *
+ * Stash the adapter pointer hiding in the inode into the file pointer where
+ * we can find it later in the read and write calls
+ **/
+static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+/**
+ * ixgbe_dbg_reg_ops_read - read for reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ixgbe_adapter *adapter = filp->private_data;
+ char buf[256];
+ int bytes_not_copied;
+ int len;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ len = snprintf(buf, sizeof(buf), "%s: %s\n",
+ adapter->netdev->name, ixgbe_dbg_reg_ops_buf);
+ if (count < len)
+ return -ENOSPC;
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos = len;
+ return len;
+}
+
+/**
+ * ixgbe_dbg_reg_ops_write - write into reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ixgbe_adapter *adapter = filp->private_data;
+ int bytes_not_copied;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+ if (count >= sizeof(ixgbe_dbg_reg_ops_buf))
+ return -ENOSPC;
+
+ bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ else if (bytes_not_copied < count)
+ count -= bytes_not_copied;
+ else
+ return -ENOSPC;
+ ixgbe_dbg_reg_ops_buf[count] = '\0';
+
+ if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) {
+ u32 reg, value;
+ int cnt;
+ cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", &reg, &value);
+ if (cnt == 2) {
+ IXGBE_WRITE_REG(&adapter->hw, reg, value);
+ value = IXGBE_READ_REG(&adapter->hw, reg);
+ e_dev_info("write: 0x%08x = 0x%08x\n", reg, value);
+ } else {
+ e_dev_info("write <reg> <value>\n");
+ }
+ } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) {
+ u32 reg, value;
+ int cnt;
+ cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", &reg);
+ if (cnt == 1) {
+ value = IXGBE_READ_REG(&adapter->hw, reg);
+ e_dev_info("read 0x%08x = 0x%08x\n", reg, value);
+ } else {
+ e_dev_info("read <reg>\n");
+ }
+ } else {
+ e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf);
+ e_dev_info("Available commands:\n");
+ e_dev_info(" read <reg>\n");
+ e_dev_info(" write <reg> <value>\n");
+ }
+ return count;
+}
+
+static const struct file_operations ixgbe_dbg_reg_ops_fops = {
+ .owner = THIS_MODULE,
+ .open = ixgbe_dbg_reg_ops_open,
+ .read = ixgbe_dbg_reg_ops_read,
+ .write = ixgbe_dbg_reg_ops_write,
+};
+
+static char ixgbe_dbg_netdev_ops_buf[256] = "";
+
+/**
+ * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item
+ * @inode: inode that was opened
+ * @filp: file info
+ *
+ * Stash the adapter pointer hiding in the inode into the file pointer
+ * where we can find it later in the read and write calls
+ **/
+static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+/**
+ * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ixgbe_adapter *adapter = filp->private_data;
+ char buf[256];
+ int bytes_not_copied;
+ int len;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ len = snprintf(buf, sizeof(buf), "%s: %s\n",
+ adapter->netdev->name, ixgbe_dbg_netdev_ops_buf);
+ if (count < len)
+ return -ENOSPC;
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos = len;
+ return len;
+}
+
+/**
+ * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ixgbe_adapter *adapter = filp->private_data;
+ int bytes_not_copied;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+ if (count >= sizeof(ixgbe_dbg_netdev_ops_buf))
+ return -ENOSPC;
+
+ bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf,
+ buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ else if (bytes_not_copied < count)
+ count -= bytes_not_copied;
+ else
+ return -ENOSPC;
+ ixgbe_dbg_netdev_ops_buf[count] = '\0';
+
+ if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+ adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
+ e_dev_info("tx_timeout called\n");
+ } else {
+ e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf);
+ e_dev_info("Available commands:\n");
+ e_dev_info(" tx_timeout\n");
+ }
+ return count;
+}
+
+static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
+ .owner = THIS_MODULE,
+ .open = ixgbe_dbg_netdev_ops_open,
+ .read = ixgbe_dbg_netdev_ops_read,
+ .write = ixgbe_dbg_netdev_ops_write,
+};
+
+/**
+ * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter
+ * @adapter: the adapter that is starting up
+ **/
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
+{
+ const char *name = pci_name(adapter->pdev);
+ struct dentry *pfile;
+ adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root);
+ if (adapter->ixgbe_dbg_adapter) {
+ pfile = debugfs_create_file("reg_ops", 0600,
+ adapter->ixgbe_dbg_adapter, adapter,
+ &ixgbe_dbg_reg_ops_fops);
+ if (!pfile)
+ e_dev_err("debugfs reg_ops for %s failed\n", name);
+ pfile = debugfs_create_file("netdev_ops", 0600,
+ adapter->ixgbe_dbg_adapter, adapter,
+ &ixgbe_dbg_netdev_ops_fops);
+ if (!pfile)
+ e_dev_err("debugfs netdev_ops for %s failed\n", name);
+ } else {
+ e_dev_err("debugfs entry for %s failed\n", name);
+ }
+}
+
+/**
+ * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
+ * @pf: the pf that is stopping
+ **/
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
+{
+ if (adapter->ixgbe_dbg_adapter)
+ debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
+ adapter->ixgbe_dbg_adapter = NULL;
+}
+
+/**
+ * ixgbe_dbg_init - start up debugfs for the driver
+ **/
+void ixgbe_dbg_init(void)
+{
+ ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL);
+ if (ixgbe_dbg_root == NULL)
+ pr_err("init of debugfs failed\n");
+}
+
+/**
+ * ixgbe_dbg_exit - clean out the driver's debugfs entries
+ **/
+void ixgbe_dbg_exit(void)
+{
+ debugfs_remove_recursive(ixgbe_dbg_root);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 4326f74f7137..868af6938219 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
}
bi->dma = dma;
- bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
+ bi->page_offset = 0;
return true;
}
@@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
return max_len;
}
-static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- __le32 rsc_enabled;
- u32 rsc_cnt;
-
- if (!ring_is_rsc_enabled(rx_ring))
- return;
-
- rsc_enabled = rx_desc->wb.lower.lo_dword.data &
- cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
-
- /* If this is an RSC frame rsc_cnt should be non-zero */
- if (!rsc_enabled)
- return;
-
- rsc_cnt = le32_to_cpu(rsc_enabled);
- rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
-
- IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
-}
-
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
@@ -1440,16 +1417,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
prefetch(IXGBE_RX_DESC(rx_ring, ntc));
- if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
- return false;
+ /* update RSC append count if present */
+ if (ring_is_rsc_enabled(rx_ring)) {
+ __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+ cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
+
+ if (unlikely(rsc_enabled)) {
+ u32 rsc_cnt = le32_to_cpu(rsc_enabled);
+
+ rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
+ IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
- /* append_cnt indicates packet is RSC, if so fetch nextp */
- if (IXGBE_CB(skb)->append_cnt) {
- ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
- ntc &= IXGBE_RXDADV_NEXTP_MASK;
- ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+ /* update ntc based on RSC value */
+ ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+ ntc &= IXGBE_RXDADV_NEXTP_MASK;
+ ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+ }
}
+ /* if we are the last buffer then there is nothing else to do */
+ if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+ return false;
+
/* place skb in next buffer to be received */
rx_ring->rx_buffer_info[ntc].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
@@ -1458,6 +1447,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
}
/**
+ * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an ixgbe specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
+
+ /*
+ * it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /*
+ * we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being updated
+ *
+ * This function provides a basic DMA sync up for the first fragment of an
+ * skb. The reason for doing this is that the first fragment cannot be
+ * unmapped until we have reached the end of packet descriptor for a buffer
+ * chain.
+ */
+static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ /* if the page was released unmap it, else just sync our portion */
+ if (unlikely(IXGBE_CB(skb)->page_released)) {
+ dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
+ ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+ IXGBE_CB(skb)->page_released = false;
+ } else {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ IXGBE_CB(skb)->dma,
+ frag->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ }
+ IXGBE_CB(skb)->dma = 0;
+}
+
+/**
* ixgbe_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -1479,24 +1540,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
struct net_device *netdev = rx_ring->netdev;
- unsigned char *va;
- unsigned int pull_len;
-
- /* if the page was released unmap it, else just sync our portion */
- if (unlikely(IXGBE_CB(skb)->page_released)) {
- dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
- } else {
- dma_sync_single_range_for_cpu(rx_ring->dev,
- IXGBE_CB(skb)->dma,
- frag->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
- }
- IXGBE_CB(skb)->dma = 0;
/* verify that the packet does not have any known errors */
if (unlikely(ixgbe_test_staterr(rx_desc,
@@ -1506,40 +1550,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
return true;
}
- /*
- * it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /*
- * we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = skb_frag_size(frag);
- if (pull_len > IXGBE_RX_HDR_SIZE)
- pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-
- /*
- * if we sucked the frag empty then we should free it,
- * if there are other frags here something is screwed up in hardware
- */
- if (skb_frag_size(frag) == 0) {
- BUG_ON(skb_shinfo(skb)->nr_frags != 1);
- skb_shinfo(skb)->nr_frags = 0;
- __skb_frag_unref(frag);
- skb->truesize -= ixgbe_rx_bufsz(rx_ring);
- }
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ ixgbe_pull_tail(rx_ring, skb);
#ifdef IXGBE_FCOE
/* do not attempt to pad FCoE Frames as this will disrupt DDP */
@@ -1560,33 +1573,17 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
}
/**
- * ixgbe_can_reuse_page - determine if we can reuse a page
- * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
- *
- * Returns true if page can be reused in another Rx buffer
- **/
-static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
-{
- struct page *page = rx_buffer->page;
-
- /* if we are only owner of page and it is local we can reuse it */
- return likely(page_count(page) == 1) &&
- likely(page_to_nid(page) == numa_node_id());
-}
-
-/**
* ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused
*
- * Syncronizes page for reuse by the adapter
+ * Synchronizes page for reuse by the adapter
**/
static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *old_buff)
{
struct ixgbe_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
- u16 bufsz = ixgbe_rx_bufsz(rx_ring);
new_buff = &rx_ring->rx_buffer_info[nta];
@@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
/* transfer page from old buffer to new buffer */
new_buff->page = old_buff->page;
new_buff->dma = old_buff->dma;
-
- /* flip page offset to other buffer and store to new_buff */
- new_buff->page_offset = old_buff->page_offset ^ bufsz;
+ new_buff->page_offset = old_buff->page_offset;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
- new_buff->page_offset, bufsz,
+ new_buff->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
-
- /* bump ref count on page before it is given to the stack */
- get_page(new_buff->page);
}
/**
@@ -1617,20 +1610,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
* @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
*
- * This function is based on skb_add_rx_frag. I would have used that
- * function however it doesn't handle the truesize case correctly since we
- * are allocating more memory than might be used for a single receive.
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
**/
-static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- struct sk_buff *skb, int size)
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer->page, rx_buffer->page_offset,
- size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += ixgbe_rx_bufsz(rx_ring);
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
+ ixgbe_rx_bufsz(rx_ring);
+#endif
+
+ if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+
+ /*
+ * since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
+static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbe_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IXGBE_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ return NULL;
+ }
+
+ /*
+ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+
+ /*
+ * Delay unmapping of the first packet. It carries the
+ * header information, HW may still access the header
+ * after the writeback. Only unmap it when EOP is
+ * reached
+ */
+ if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+ goto dma_sync;
+
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
+ } else {
+ if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+ ixgbe_dma_sync_frag(rx_ring, skb);
+
+dma_sync:
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ }
+
+ /* pull page into skb */
+ if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+ } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ /* the page has been released from the ring */
+ IXGBE_CB(skb)->page_released = true;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->skb = NULL;
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ return skb;
}
/**
@@ -1653,16 +1785,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter = q_vector->adapter;
- int ddp_bytes = 0;
+ int ddp_bytes;
+ unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
do {
- struct ixgbe_rx_buffer *rx_buffer;
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
- struct page *page;
- u16 ntc;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -1670,9 +1800,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
cleaned_count = 0;
}
- ntc = rx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
- rx_buffer = &rx_ring->rx_buffer_info[ntc];
+ rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
break;
@@ -1684,75 +1812,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
rmb();
- page = rx_buffer->page;
- prefetchw(page);
-
- skb = rx_buffer->skb;
-
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
-
- /* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+ /* retrieve a buffer from the ring */
+ skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
- /* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IXGBE_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- break;
- }
-
- /*
- * we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
-
- /*
- * Delay unmapping of the first packet. It carries the
- * header information, HW may still access the header
- * after the writeback. Only unmap it when EOP is
- * reached
- */
- IXGBE_CB(skb)->dma = rx_buffer->dma;
- } else {
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
- }
-
- /* pull page into skb */
- ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
- le16_to_cpu(rx_desc->wb.upper.length));
-
- if (ixgbe_can_reuse_page(rx_buffer)) {
- /* hand second half of page back to the ring */
- ixgbe_reuse_rx_page(rx_ring, rx_buffer);
- } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
- /* the page has been released from the ring */
- IXGBE_CB(skb)->page_released = true;
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
- }
-
- /* clear contents of buffer_info */
- rx_buffer->skb = NULL;
- rx_buffer->dma = 0;
- rx_buffer->page = NULL;
-
- ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
cleaned_count++;
@@ -1775,6 +1840,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* if ddp, not passing to ULD unless for FCP_RSP or error */
if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
+ /* include DDPed FCoE data */
+ if (ddp_bytes > 0) {
+ if (!mss) {
+ mss = rx_ring->netdev->mtu -
+ sizeof(struct fcoe_hdr) -
+ sizeof(struct fc_frame_header) -
+ sizeof(struct fcoe_crc_eof);
+ if (mss > 512)
+ mss &= ~511;
+ }
+ total_rx_bytes += ddp_bytes;
+ total_rx_packets += DIV_ROUND_UP(ddp_bytes,
+ mss);
+ }
if (!ddp_bytes) {
dev_kfree_skb_any(skb);
continue;
@@ -1788,21 +1867,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
budget--;
} while (likely(budget));
-#ifdef IXGBE_FCOE
- /* include DDPed FCoE data */
- if (ddp_bytes > 0) {
- unsigned int mss;
-
- mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
- sizeof(struct fc_frame_header) -
- sizeof(struct fcoe_crc_eof);
- if (mss > 512)
- mss &= ~511;
- total_rx_bytes += ddp_bytes;
- total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
- }
-
-#endif /* IXGBE_FCOE */
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
-#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
- srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#else
srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#endif
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
* total size of max desc * buf_len is not greater
* than 65536
*/
-#if (PAGE_SIZE <= 8192)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (PAGE_SIZE <= 16384)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#endif
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
}
@@ -3606,8 +3660,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768);
- hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
-
#ifdef IXGBE_FCOE
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -3807,6 +3859,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCB
ixgbe_configure_dcb(adapter);
#endif
+ /*
+ * We must restore virtualization before VLANs or else
+ * the VLVF registers will not be populated
+ */
+ ixgbe_configure_virtualization(adapter);
ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter);
@@ -3838,8 +3895,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
break;
}
- ixgbe_configure_virtualization(adapter);
-
#ifdef IXGBE_FCOE
/* configure FCoE L2 filters, redirection table, and Rx control */
ixgbe_configure_fcoe(adapter);
@@ -4130,27 +4185,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
- * @rx_ring: ring to setup
- *
- * On many IA platforms the L1 cache has a critical stride of 4K, this
- * results in each receive buffer starting in the same cache set. To help
- * reduce the pressure on this cache set we can interleave the offsets so
- * that only every other buffer will be in the same cache set.
- **/
-static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
-{
- struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
- u16 i;
-
- for (i = 0; i < rx_ring->count; i += 2) {
- rx_buffer[0].page_offset = 0;
- rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
- rx_buffer = &rx_buffer[2];
- }
-}
-
-/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
**/
@@ -4195,8 +4229,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_buffer_info, 0, size);
- ixgbe_init_rx_page_offset(rx_ring);
-
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
@@ -4646,8 +4678,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
- ixgbe_init_rx_page_offset(rx_ring);
-
return 0;
err:
vfree(rx_ring->rx_buffer_info);
@@ -5530,8 +5560,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
{
u32 ssvpc;
- /* Do not perform spoof check for 82598 */
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ /* Do not perform spoof check for 82598 or if not in IOV mode */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
+ adapter->num_vfs == 0)
return;
ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
@@ -5543,7 +5574,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
if (!ssvpc)
return;
- e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
+ e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
}
/**
@@ -5874,9 +5905,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
- !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
- return;
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
+ if (unlikely(skb->no_fcs))
+ first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
+ return;
+ }
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
@@ -5938,7 +5972,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
__le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_IFCS |
IXGBE_ADVTXD_DCMD_DEXT);
/* set HW vlan bit if vlan is present */
@@ -5958,6 +5991,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
#endif
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+ /* insert frame checksum */
+ if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+
return cmd_type;
}
@@ -6063,8 +6100,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
if (likely(!data_len))
break;
- if (unlikely(skb->no_fcs))
- cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
i++;
@@ -6854,9 +6889,9 @@ static int ixgbe_set_features(struct net_device *netdev,
return 0;
}
-static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
+static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- unsigned char *addr,
+ const unsigned char *addr,
u16 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -6893,7 +6928,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
struct net_device *dev,
- unsigned char *addr)
+ const unsigned char *addr)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int err = -EOPNOTSUPP;
@@ -7136,11 +7171,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_ioremap;
}
- for (i = 1; i <= 5; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- }
-
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
@@ -7419,6 +7449,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
e_err(probe, "failed to allocate sysfs resources\n");
#endif /* CONFIG_IXGBE_HWMON */
+#ifdef CONFIG_DEBUG_FS
+ ixgbe_dbg_adapter_init(adapter);
+#endif /* CONFIG_DEBUG_FS */
+
return 0;
err_register:
@@ -7453,6 +7487,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+#ifdef CONFIG_DEBUG_FS
+ ixgbe_dbg_adapter_exit(adapter);
+#endif /*CONFIG_DEBUG_FS */
+
set_bit(__IXGBE_DOWN, &adapter->state);
cancel_work_sync(&adapter->service_task);
@@ -7527,7 +7565,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
goto skip_bad_vf_detection;
bdev = pdev->bus->self;
- while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT))
+ while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
bdev = bdev->bus->self;
if (!bdev)
@@ -7677,7 +7715,7 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
-static struct pci_error_handlers ixgbe_err_handler = {
+static const struct pci_error_handlers ixgbe_err_handler = {
.error_detected = ixgbe_io_error_detected,
.slot_reset = ixgbe_io_slot_reset,
.resume = ixgbe_io_resume,
@@ -7708,6 +7746,10 @@ static int __init ixgbe_init_module(void)
pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
pr_info("%s\n", ixgbe_copyright);
+#ifdef CONFIG_DEBUG_FS
+ ixgbe_dbg_init();
+#endif /* CONFIG_DEBUG_FS */
+
#ifdef CONFIG_IXGBE_DCA
dca_register_notify(&dca_notifier);
#endif
@@ -7730,6 +7772,11 @@ static void __exit ixgbe_exit_module(void)
dca_unregister_notify(&dca_notifier);
#endif
pci_unregister_driver(&ixgbe_driver);
+
+#ifdef CONFIG_DEBUG_FS
+ ixgbe_dbg_exit();
+#endif /* CONFIG_DEBUG_FS */
+
rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 3456d5617143..39881cb17a4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -960,7 +960,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
/* (Re)start the overflow check */
adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
- adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps);
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 4fea8716ab64..dce48bf64d96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -346,6 +346,10 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
u32 vf)
{
+ /* VLAN 0 is a special case, don't allow it to be removed */
+ if (!vid && !add)
+ return 0;
+
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
@@ -414,6 +418,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
VLAN_PRIO_SHIFT)), vf);
ixgbe_set_vmolr(hw, vf, false);
} else {
+ ixgbe_set_vf_vlan(adapter, true, 0, vf);
ixgbe_set_vmvir(adapter, 0, vf);
ixgbe_set_vmolr(hw, vf, true);
}
@@ -810,9 +815,9 @@ out:
return err;
}
-static int ixgbe_link_mbps(int internal_link_speed)
+static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
{
- switch (internal_link_speed) {
+ switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
return 100;
case IXGBE_LINK_SPEED_1GB_FULL:
@@ -824,27 +829,30 @@ static int ixgbe_link_mbps(int internal_link_speed)
}
}
-static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
- int link_speed)
+static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
{
- int rf_dec, rf_int;
- u32 bcnrc_val;
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 bcnrc_val = 0;
+ u16 queue, queues_per_pool;
+ u16 tx_rate = adapter->vfinfo[vf].tx_rate;
+
+ if (tx_rate) {
+ /* start with base link speed value */
+ bcnrc_val = adapter->vf_rate_link_speed;
- if (tx_rate != 0) {
/* Calculate the rate factor values to set */
- rf_int = link_speed / tx_rate;
- rf_dec = (link_speed - (rf_int * tx_rate));
- rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
-
- bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
- bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
- IXGBE_RTTBCNRC_RF_INT_MASK);
- bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
- } else {
- bcnrc_val = 0;
+ bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+ bcnrc_val /= tx_rate;
+
+ /* clear everything but the rate factor */
+ bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+ IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+ /* enable the rate scheduler */
+ bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
}
- IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
@@ -861,53 +869,68 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
break;
}
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+ /* determine how many queues per pool based on VMDq mask */
+ queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+
+ /* write value for all Tx queues belonging to VF */
+ for (queue = 0; queue < queues_per_pool; queue++) {
+ unsigned int reg_idx = (vf * queues_per_pool) + queue;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+ }
}
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
{
- int actual_link_speed, i;
- bool reset_rate = false;
+ int i;
/* VF Tx rate limit was not set */
- if (adapter->vf_rate_link_speed == 0)
+ if (!adapter->vf_rate_link_speed)
return;
- actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
- if (actual_link_speed != adapter->vf_rate_link_speed) {
- reset_rate = true;
+ if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
adapter->vf_rate_link_speed = 0;
dev_info(&adapter->pdev->dev,
- "Link speed has been changed. VF Transmit rate "
- "is disabled\n");
+ "Link speed has been changed. VF Transmit rate is disabled\n");
}
for (i = 0; i < adapter->num_vfs; i++) {
- if (reset_rate)
+ if (!adapter->vf_rate_link_speed)
adapter->vfinfo[i].tx_rate = 0;
- ixgbe_set_vf_rate_limit(&adapter->hw, i,
- adapter->vfinfo[i].tx_rate,
- actual_link_speed);
+ ixgbe_set_vf_rate_limit(adapter, i);
}
}
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- int actual_link_speed;
+ int link_speed;
+
+ /* verify VF is active */
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
- actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
- if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
- (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
- ((tx_rate != 0) && (tx_rate <= 10)))
- /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
+ /* verify link is up */
+ if (!adapter->link_up)
return -EINVAL;
- adapter->vf_rate_link_speed = actual_link_speed;
- adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
- ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+ /* verify we are linked at 10Gbps */
+ link_speed = ixgbe_link_mbps(adapter);
+ if (link_speed != 10000)
+ return -EINVAL;
+
+ /* rate limit cannot be less than 10Mbs or greater than link speed */
+ if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
+ return -EINVAL;
+
+ /* store values */
+ adapter->vf_rate_link_speed = link_speed;
+ adapter->vfinfo[vf].tx_rate = tx_rate;
+
+ /* update hardware configuration */
+ ixgbe_set_vf_rate_limit(adapter, vf);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 418af827b230..da17ccf5c09d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -272,5 +272,6 @@ struct ixgbe_adv_tx_context_desc {
/* Error Codes */
#define IXGBE_ERR_INVALID_MAC_ADDR -1
#define IXGBE_ERR_RESET_FAILED -2
+#define IXGBE_ERR_INVALID_ARGUMENT -3
#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 98cadb0c4dab..383b4e1cd175 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -101,7 +101,9 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_RXBUFFER_3K 3072
+#define IXGBEVF_RXBUFFER_7K 7168
+#define IXGBEVF_RXBUFFER_15K 15360
#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
@@ -259,6 +261,11 @@ enum ixbgevf_state_t {
__IXGBEVF_DOWN
};
+struct ixgbevf_cb {
+ struct sk_buff *prev;
+};
+#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb)
+
enum ixgbevf_boards {
board_82599_vf,
board_X540_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 60ef64587412..0ee9bd4819f4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -263,6 +263,8 @@ cont_loop:
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
return count < tx_ring->count;
}
@@ -272,12 +274,10 @@ cont_loop:
* @q_vector: structure containing interrupt and ring information
* @skb: packet to send up
* @status: hardware indication of status of receive
- * @rx_ring: rx descriptor ring (for a specific queue) to setup
* @rx_desc: rx descriptor
**/
static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb, u8 status,
- struct ixgbevf_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
@@ -433,11 +433,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
skb->next = next_buffer->skb;
- skb->next->prev = skb;
+ IXGBE_CB(skb->next)->prev = skb;
adapter->non_eop_descs++;
goto next_desc;
}
+ /* we should not be chaining buffers, if we did drop the skb */
+ if (IXGBE_CB(skb)->prev) {
+ do {
+ struct sk_buff *this = skb;
+ skb = IXGBE_CB(skb)->prev;
+ dev_kfree_skb(this);
+ } while (skb);
+ goto next_desc;
+ }
+
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
dev_kfree_skb_irq(skb);
@@ -461,7 +471,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+ ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -490,6 +500,8 @@ next_desc:
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
return !!budget;
}
@@ -716,40 +728,15 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
}
}
-static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+static irqreturn_t ixgbevf_msix_other(int irq, void *data)
{
struct ixgbevf_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
- u32 msg;
- bool got_ack = false;
-
- if (!hw->mbx.ops.check_for_ack(hw))
- got_ack = true;
- if (!hw->mbx.ops.check_for_msg(hw)) {
- hw->mbx.ops.read(hw, &msg, 1);
+ hw->mac.get_link_status = 1;
- if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 1));
-
- if (msg & IXGBE_VT_MSGTYPE_NACK)
- pr_warn("Last Request of type %2.2x to PF Nacked\n",
- msg & 0xFF);
- /*
- * Restore the PFSTS bit in case someone is polling for a
- * return message from the PF
- */
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
- }
-
- /*
- * checking for the ack clears the PFACK bit. Place
- * it back in the v2p_mailbox cache so that anyone
- * polling for an ack will not miss it
- */
- if (got_ack)
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -899,10 +886,10 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
}
err = request_irq(adapter->msix_entries[vector].vector,
- &ixgbevf_msix_mbx, 0, netdev->name, adapter);
+ &ixgbevf_msix_other, 0, netdev->name, adapter);
if (err) {
hw_dbg(&adapter->hw,
- "request_irq for msix_mbx failed: %d\n", err);
+ "request_irq for msix_other failed: %d\n", err);
goto free_queue_irqs;
}
@@ -1057,15 +1044,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
- srrctl |= IXGBEVF_RXBUFFER_2048 >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= rx_ring->rx_buf_len >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
+static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i;
+ u16 rx_buf_len;
+
+ /* notify the PF of our intent to use this size of frame */
+ ixgbevf_rlpml_set_vf(hw, max_frame);
+
+ /* PF will allow an extra 4 bytes past for vlan tagged frames */
+ max_frame += VLAN_HLEN;
+
+ /*
+ * Make best use of allocation by using all but 1K of a
+ * power of 2 allocation that will be used for skb->head.
+ */
+ if ((hw->mac.type == ixgbe_mac_X540_vf) &&
+ (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else if (max_frame <= IXGBEVF_RXBUFFER_3K)
+ rx_buf_len = IXGBEVF_RXBUFFER_3K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_7K)
+ rx_buf_len = IXGBEVF_RXBUFFER_7K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_15K)
+ rx_buf_len = IXGBEVF_RXBUFFER_15K;
+ else
+ rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+}
+
/**
* ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
* @adapter: board private structure
@@ -1076,18 +1094,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
u64 rdba;
struct ixgbe_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, j;
u32 rdlen;
- int rx_buf_len;
/* PSRTYPE must be initialized in 82599 */
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
- if (netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
+
+ /* set_rx_buffer_len must be called before ring initialization */
+ ixgbevf_set_rx_buffer_len(adapter);
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1103,7 +1117,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
adapter->rx_ring[i].head = IXGBE_VFRDH(j);
adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
- adapter->rx_ring[i].rx_buf_len = rx_buf_len;
ixgbevf_configure_srrctl(adapter, j);
}
@@ -1113,36 +1126,47 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ if (!hw->mac.ops.set_vfta)
+ return -EOPNOTSUPP;
spin_lock(&adapter->mbx_lock);
/* add VID to filter table */
- if (hw->mac.ops.set_vfta)
- hw->mac.ops.set_vfta(hw, vid, 0, true);
+ err = hw->mac.ops.set_vfta(hw, vid, 0, true);
spin_unlock(&adapter->mbx_lock);
+ /* translate error return types so error makes sense */
+ if (err == IXGBE_ERR_MBX)
+ return -EIO;
+
+ if (err == IXGBE_ERR_INVALID_ARGUMENT)
+ return -EACCES;
+
set_bit(vid, adapter->active_vlans);
- return 0;
+ return err;
}
static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int err = -EOPNOTSUPP;
spin_lock(&adapter->mbx_lock);
/* remove VID from filter table */
if (hw->mac.ops.set_vfta)
- hw->mac.ops.set_vfta(hw, vid, 0, false);
+ err = hw->mac.ops.set_vfta(hw, vid, 0, false);
spin_unlock(&adapter->mbx_lock);
clear_bit(vid, adapter->active_vlans);
- return 0;
+ return err;
}
static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1308,6 +1332,25 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
}
+static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int api[] = { ixgbe_mbox_api_10,
+ ixgbe_mbox_api_unknown };
+ int err = 0, idx = 0;
+
+ spin_lock(&adapter->mbx_lock);
+
+ while (api[idx] != ixgbe_mbox_api_unknown) {
+ err = ixgbevf_negotiate_api_version(hw, api[idx]);
+ if (!err)
+ break;
+ idx++;
+ }
+
+ spin_unlock(&adapter->mbx_lock);
+}
+
static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1315,7 +1358,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
int i, j = 0;
int num_rx_rings = adapter->num_rx_queues;
u32 txdctl, rxdctl;
- u32 msg[2];
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i].reg_idx;
@@ -1356,10 +1398,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
}
- msg[0] = IXGBE_VF_SET_LPE;
- msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- hw->mbx.ops.write_posted(hw, msg, 2);
-
spin_unlock(&adapter->mbx_lock);
clear_bit(__IXGBEVF_DOWN, &adapter->state);
@@ -1371,6 +1409,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_save_reset_stats(adapter);
ixgbevf_init_last_counter_stats(adapter);
+ hw->mac.get_link_status = 1;
mod_timer(&adapter->watchdog_timer, jiffies);
}
@@ -1378,6 +1417,8 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ ixgbevf_negotiate_api(adapter);
+
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
@@ -1419,7 +1460,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info->skb = NULL;
do {
struct sk_buff *this = skb;
- skb = skb->prev;
+ skb = IXGBE_CB(skb)->prev;
dev_kfree_skb(this);
} while (skb);
}
@@ -1547,8 +1588,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
-
WARN_ON(in_interrupt());
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -1561,10 +1600,8 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
* watchdog task will continue to schedule reset tasks until
* the PF is up and running.
*/
- if (!hw->mac.ops.reset_hw(hw)) {
- ixgbevf_down(adapter);
- ixgbevf_up(adapter);
- }
+ ixgbevf_down(adapter);
+ ixgbevf_up(adapter);
clear_bit(__IXGBEVF_RESETTING, &adapter->state);
}
@@ -1867,6 +1904,22 @@ err_set_interrupt:
}
/**
+ * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+
+ ixgbevf_free_q_vectors(adapter);
+ ixgbevf_reset_interrupt_capability(adapter);
+}
+
+/**
* ixgbevf_sw_init - Initialize general software structures
* (struct ixgbevf_adapter)
* @adapter: board private structure to initialize
@@ -2351,6 +2404,8 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
+ ixgbevf_negotiate_api(adapter);
+
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2860,10 +2915,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
- u32 msg[2];
if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
@@ -2877,35 +2930,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
- if (!netif_running(netdev)) {
- msg[0] = IXGBE_VF_SET_LPE;
- msg[1] = max_frame;
- hw->mbx.ops.write_posted(hw, msg, 2);
- }
-
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
return 0;
}
-static void ixgbevf_shutdown(struct pci_dev *pdev)
+static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM
+ int retval = 0;
+#endif
netif_device_detach(netdev);
if (netif_running(netdev)) {
+ rtnl_lock();
ixgbevf_down(adapter);
ixgbevf_free_irq(adapter);
ixgbevf_free_all_tx_resources(adapter);
ixgbevf_free_all_rx_resources(adapter);
+ rtnl_unlock();
}
- pci_save_state(pdev);
+ ixgbevf_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+#endif
pci_disable_device(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ixgbevf_resume(struct pci_dev *pdev)
+{
+ struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /*
+ * pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ rtnl_lock();
+ err = ixgbevf_init_interrupt_scheme(adapter);
+ rtnl_unlock();
+ if (err) {
+ dev_err(&pdev->dev, "Cannot initialize interrupts\n");
+ return err;
+ }
+
+ ixgbevf_reset(adapter);
+
+ if (netif_running(netdev)) {
+ err = ixgbevf_open(netdev);
+ if (err)
+ return err;
+ }
+
+ netif_device_attach(netdev);
+
+ return err;
+}
+
+#endif /* CONFIG_PM */
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+ ixgbevf_suspend(pdev, PMSG_SUSPEND);
}
static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
@@ -2946,7 +3055,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
-static const struct net_device_ops ixgbe_netdev_ops = {
+static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
.ndo_start_xmit = ixgbevf_xmit_frame,
@@ -2962,7 +3071,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
{
- dev->netdev_ops = &ixgbe_netdev_ops;
+ dev->netdev_ops = &ixgbevf_netdev_ops;
ixgbevf_set_ethtool_ops(dev);
dev->watchdog_timeo = 5 * HZ;
}
@@ -3131,6 +3240,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
return 0;
err_register:
+ ixgbevf_clear_interrupt_scheme(adapter);
err_sw_init:
ixgbevf_reset_interrupt_capability(adapter);
iounmap(hw->hw_addr);
@@ -3168,6 +3278,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ ixgbevf_clear_interrupt_scheme(adapter);
ixgbevf_reset_interrupt_capability(adapter);
iounmap(adapter->hw.hw_addr);
@@ -3256,7 +3367,7 @@ static void ixgbevf_io_resume(struct pci_dev *pdev)
}
/* PCI Error Recovery (ERS) */
-static struct pci_error_handlers ixgbevf_err_handler = {
+static const struct pci_error_handlers ixgbevf_err_handler = {
.error_detected = ixgbevf_io_error_detected,
.slot_reset = ixgbevf_io_slot_reset,
.resume = ixgbevf_io_resume,
@@ -3267,6 +3378,11 @@ static struct pci_driver ixgbevf_driver = {
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = __devexit_p(ixgbevf_remove),
+#ifdef CONFIG_PM
+ /* Power Management Hooks */
+ .suspend = ixgbevf_suspend,
+ .resume = ixgbevf_resume,
+#endif
.shutdown = ixgbevf_shutdown,
.err_handler = &ixgbevf_err_handler
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 9c955900fe64..d5028ddf4b31 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -86,14 +86,17 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ s32 ret_val = -IXGBE_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
ret_val = ixgbevf_poll_for_msg(hw);
/* if ack received read message, otherwise we timed out */
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size);
-
+out:
return ret_val;
}
@@ -109,7 +112,11 @@ static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ s32 ret_val = -IXGBE_ERR_MBX;
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
/* send msg */
ret_val = mbx->ops.write(hw, msg, size);
@@ -117,7 +124,7 @@ static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
/* if msg sent wait until we receive an ack */
if (!ret_val)
ret_val = ixgbevf_poll_for_ack(hw);
-
+out:
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index cf9131c5c115..946ce86f337f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -76,12 +76,29 @@
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index ec89b86f7ca4..0c7447e6fcc8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -79,6 +79,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
+ /* reset the api version */
+ hw->api_version = ixgbe_mbox_api_10;
+
IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
@@ -97,7 +100,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
msgbuf[0] = IXGBE_VF_RESET;
mbx->ops.write_posted(hw, msgbuf, 1);
- msleep(10);
+ mdelay(10);
/* set our "perm_addr" based on info provided by PF */
/* also set up the mc_filter_type which is piggy backed
@@ -346,16 +349,32 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
+ s32 err;
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+ err = mbx->ops.write_posted(hw, msgbuf, 2);
+ if (err)
+ goto mbx_err;
- return 0;
+ err = mbx->ops.read_posted(hw, msgbuf, 2);
+ if (err)
+ goto mbx_err;
+
+ /* remove extra bits from the message */
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+ msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
+
+ if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
+ err = IXGBE_ERR_INVALID_ARGUMENT;
+
+mbx_err:
+ return err;
}
/**
@@ -389,20 +408,23 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
bool *link_up,
bool autoneg_wait_to_complete)
{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 ret_val = 0;
u32 links_reg;
+ u32 in_msg = 0;
- if (!(hw->mbx.ops.check_for_rst(hw))) {
- *link_up = false;
- *speed = 0;
- return -1;
- }
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+ mac->get_link_status = true;
- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!mac->get_link_status)
+ goto out;
- if (links_reg & IXGBE_LINKS_UP)
- *link_up = true;
- else
- *link_up = false;
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
@@ -416,7 +438,79 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
break;
}
- return 0;
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error */
+ if (mbx->ops.read(hw, &in_msg, 1))
+ goto out;
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
+/**
+ * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_LPE;
+ msgbuf[1] = max_size;
+ ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+ int err;
+ u32 msg[3];
+
+ /* Negotiate the mailbox API version */
+ msg[0] = IXGBE_VF_API_NEGOTIATE;
+ msg[1] = api;
+ msg[2] = 0;
+ err = hw->mbx.ops.write_posted(hw, msg, 3);
+
+ if (!err)
+ err = hw->mbx.ops.read_posted(hw, msg, 3);
+
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ hw->api_version = api;
+ return 0;
+ }
+
+ err = IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ return err;
}
static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 25c951daee5d..47f11a584d8c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -137,6 +137,8 @@ struct ixgbe_hw {
u8 revision_id;
bool adapter_stopped;
+
+ int api_version;
};
struct ixgbevf_hw_stats {
@@ -170,5 +172,7 @@ struct ixgbevf_info {
const struct ixgbe_mac_operations *mac_ops;
};
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
#endif /* __IXGBE_VF_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index c8fef4353021..3d1899ff1076 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -40,6 +40,7 @@
#include <linux/mlx4/cmd.h>
#include <linux/semaphore.h>
+#include <rdma/ib_smi.h>
#include <asm/io.h>
@@ -394,7 +395,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
int ret;
- down(&priv->cmd.slave_sem);
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
+
vhcr->in_param = cpu_to_be64(in_param);
vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
vhcr->in_modifier = cpu_to_be32(in_modifier);
@@ -402,6 +404,7 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
vhcr->status = 0;
vhcr->flags = !!(priv->cmd.use_events) << 6;
+
if (mlx4_is_master(dev)) {
ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
if (!ret) {
@@ -438,7 +441,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
mlx4_err(dev, "failed execution of VHCR_POST command"
"opcode 0x%x\n", op);
}
- up(&priv->cmd.slave_sem);
+
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
return ret;
}
@@ -627,6 +631,162 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
+static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox)
+{
+ struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
+ struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
+ int err;
+ int i;
+
+ if (index & 0x1f)
+ return -EINVAL;
+
+ in_mad->attr_mod = cpu_to_be32(index / 32);
+
+ err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+
+ for (i = 0; i < 32; ++i)
+ pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
+
+ return err;
+}
+
+static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
+ err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+#define PORT_CAPABILITY_LOCATION_IN_SMP 20
+#define PORT_STATE_OFFSET 32
+
+static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
+{
+ if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
+ return IB_PORT_ACTIVE;
+ else
+ return IB_PORT_DOWN;
+}
+
+static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct ib_smp *smp = inbox->buf;
+ u32 index;
+ u8 port;
+ u16 *table;
+ int err;
+ int vidx, pidx;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct ib_smp *outsmp = outbox->buf;
+ __be16 *outtab = (__be16 *)(outsmp->data);
+ __be32 slave_cap_mask;
+ __be64 slave_node_guid;
+ port = vhcr->in_modifier;
+
+ if (smp->base_version == 1 &&
+ smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
+ smp->class_version == 1) {
+ if (smp->method == IB_MGMT_METHOD_GET) {
+ if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
+ index = be32_to_cpu(smp->attr_mod);
+ if (port < 1 || port > dev->caps.num_ports)
+ return -EINVAL;
+ table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ /* need to get the full pkey table because the paravirtualized
+ * pkeys may be scattered among several pkey blocks.
+ */
+ err = get_full_pkey_table(dev, port, table, inbox, outbox);
+ if (!err) {
+ for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
+ pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
+ outtab[vidx % 32] = cpu_to_be16(table[pidx]);
+ }
+ }
+ kfree(table);
+ return err;
+ }
+ if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
+ /*get the slave specific caps:*/
+ /*do the command */
+ err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
+ vhcr->in_modifier, vhcr->op_modifier,
+ vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
+ /* modify the response for slaves */
+ if (!err && slave != mlx4_master_func_num(dev)) {
+ u8 *state = outsmp->data + PORT_STATE_OFFSET;
+
+ *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
+ slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+ memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
+ }
+ return err;
+ }
+ if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
+ /* compute slave's gid block */
+ smp->attr_mod = cpu_to_be32(slave / 8);
+ /* execute cmd */
+ err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
+ vhcr->in_modifier, vhcr->op_modifier,
+ vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
+ if (!err) {
+ /* if needed, move slave gid to index 0 */
+ if (slave % 8)
+ memcpy(outsmp->data,
+ outsmp->data + (slave % 8) * 8, 8);
+ /* delete all other gids */
+ memset(outsmp->data + 8, 0, 56);
+ }
+ return err;
+ }
+ if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
+ err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
+ vhcr->in_modifier, vhcr->op_modifier,
+ vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
+ if (!err) {
+ slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
+ memcpy(outsmp->data + 12, &slave_node_guid, 8);
+ }
+ return err;
+ }
+ }
+ }
+ if (slave != mlx4_master_func_num(dev) &&
+ ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
+ (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
+ smp->method == IB_MGMT_METHOD_SET))) {
+ mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
+ "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
+ slave, smp->method, smp->mgmt_class,
+ be16_to_cpu(smp->attr_id));
+ return -EPERM;
+ }
+ /*default:*/
+ return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
+ vhcr->in_modifier, vhcr->op_modifier,
+ vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
+}
+
int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -950,7 +1110,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_GEN_QP_wrapper
+ .wrapper = mlx4_INIT2INIT_QP_wrapper
},
{
.opcode = MLX4_CMD_INIT2RTR_QP,
@@ -968,7 +1128,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_GEN_QP_wrapper
+ .wrapper = mlx4_RTR2RTS_QP_wrapper
},
{
.opcode = MLX4_CMD_RTS2RTS_QP,
@@ -977,7 +1137,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_GEN_QP_wrapper
+ .wrapper = mlx4_RTS2RTS_QP_wrapper
},
{
.opcode = MLX4_CMD_SQERR2RTS_QP,
@@ -986,7 +1146,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_GEN_QP_wrapper
+ .wrapper = mlx4_SQERR2RTS_QP_wrapper
},
{
.opcode = MLX4_CMD_2ERR_QP,
@@ -1013,7 +1173,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_GEN_QP_wrapper
+ .wrapper = mlx4_SQD2SQD_QP_wrapper
},
{
.opcode = MLX4_CMD_SQD2RTS_QP,
@@ -1022,7 +1182,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_GEN_QP_wrapper
+ .wrapper = mlx4_SQD2RTS_QP_wrapper
},
{
.opcode = MLX4_CMD_2RST_QP,
@@ -1061,6 +1221,24 @@ static struct mlx4_cmd_info cmd_info[] = {
.wrapper = mlx4_GEN_QP_wrapper
},
{
+ .opcode = MLX4_CMD_CONF_SPECIAL_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL, /* XXX verify: only demux can do this */
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_MAD_IFC,
+ .has_inbox = true,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MAD_IFC_wrapper
+ },
+ {
.opcode = MLX4_CMD_QUERY_IF_STAT,
.has_inbox = false,
.has_outbox = true,
@@ -1340,6 +1518,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
goto inform_slave_state;
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
+
/* write the version in the event field */
reply |= mlx4_comm_get_version();
@@ -1376,19 +1556,21 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
goto reset_slave;
slave_state[slave].vhcr_dma |= param;
slave_state[slave].active = true;
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
break;
case MLX4_COMM_CMD_VHCR_POST:
if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
(slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
goto reset_slave;
- down(&priv->cmd.slave_sem);
+
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_master_process_vhcr(dev, slave, NULL)) {
mlx4_err(dev, "Failed processing vhcr for slave:%d,"
" resetting slave.\n", slave);
- up(&priv->cmd.slave_sem);
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
goto reset_slave;
}
- up(&priv->cmd.slave_sem);
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
break;
default:
mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
@@ -1529,14 +1711,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
struct mlx4_slave_state *s_state;
int i, j, err, port;
- priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
- &priv->mfunc.vhcr_dma,
- GFP_KERNEL);
- if (!priv->mfunc.vhcr) {
- mlx4_err(dev, "Couldn't allocate vhcr.\n");
- return -ENOMEM;
- }
-
if (mlx4_is_master(dev))
priv->mfunc.comm =
ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
@@ -1590,6 +1764,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
mlx4_master_handle_slave_flr);
spin_lock_init(&priv->mfunc.master.slave_state_lock);
+ spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
priv->mfunc.master.comm_wq =
create_singlethread_workqueue("mlx4_comm");
if (!priv->mfunc.master.comm_wq)
@@ -1598,7 +1773,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
if (mlx4_init_resource_tracker(dev))
goto err_thread;
- sema_init(&priv->cmd.slave_sem, 1);
err = mlx4_ARM_COMM_CHANNEL(dev);
if (err) {
mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
@@ -1612,8 +1786,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
mlx4_err(dev, "Couldn't sync toggles\n");
goto err_comm;
}
-
- sema_init(&priv->cmd.slave_sem, 1);
}
return 0;
@@ -1643,6 +1815,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
mutex_init(&priv->cmd.hcr_mutex);
+ mutex_init(&priv->cmd.slave_cmd_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
priv->cmd.toggle = 1;
@@ -1659,14 +1832,30 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
}
}
+ if (mlx4_is_mfunc(dev)) {
+ priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ &priv->mfunc.vhcr_dma,
+ GFP_KERNEL);
+ if (!priv->mfunc.vhcr) {
+ mlx4_err(dev, "Couldn't allocate VHCR.\n");
+ goto err_hcr;
+ }
+ }
+
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0);
if (!priv->cmd.pool)
- goto err_hcr;
+ goto err_vhcr;
return 0;
+err_vhcr:
+ if (mlx4_is_mfunc(dev))
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
+
err_hcr:
if (!mlx4_is_slave(dev))
iounmap(priv->cmd.hcr);
@@ -1689,9 +1878,6 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
}
iounmap(priv->mfunc.comm);
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
- priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
- priv->mfunc.vhcr = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -1702,6 +1888,10 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
if (!mlx4_is_slave(dev))
iounmap(priv->cmd.hcr);
+ if (mlx4_is_mfunc(dev))
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
}
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 10bba09c44ea..c10e3a6de09f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -712,10 +712,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (bounce)
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
- /* Run destructor before passing skb to HW */
- if (likely(!skb_shared(skb)))
- skb_orphan(skb);
-
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
op_own |= htonl((bf_index & 0xffff) << 8);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 99a04648fab0..51c764901ad2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -164,13 +164,16 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
- struct mlx4_eqe *s_eqe =
- &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+ struct mlx4_eqe *s_eqe;
+ unsigned long flags;
+ spin_lock_irqsave(&slave_eq->event_lock, flags);
+ s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
if ((!!(s_eqe->owner & 0x80)) ^
(!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
"No free EQE on slave events queue\n", slave);
+ spin_unlock_irqrestore(&slave_eq->event_lock, flags);
return;
}
@@ -183,6 +186,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_event_work);
+ spin_unlock_irqrestore(&slave_eq->event_lock, flags);
}
static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
@@ -200,6 +204,196 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
slave_event(dev, slave, eqe);
}
+int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
+{
+ struct mlx4_eqe eqe;
+
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
+
+ if (!s_slave->active)
+ return 0;
+
+ memset(&eqe, 0, sizeof eqe);
+
+ eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
+ eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
+ eqe.event.port_mgmt_change.port = port;
+
+ return mlx4_GEN_EQE(dev, slave, &eqe);
+}
+EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
+
+int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
+{
+ struct mlx4_eqe eqe;
+
+ /*don't send if we don't have the that slave */
+ if (dev->num_vfs < slave)
+ return 0;
+ memset(&eqe, 0, sizeof eqe);
+
+ eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
+ eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
+ eqe.event.port_mgmt_change.port = port;
+
+ return mlx4_GEN_EQE(dev, slave, &eqe);
+}
+EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
+
+int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
+ u8 port_subtype_change)
+{
+ struct mlx4_eqe eqe;
+
+ /*don't send if we don't have the that slave */
+ if (dev->num_vfs < slave)
+ return 0;
+ memset(&eqe, 0, sizeof eqe);
+
+ eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
+ eqe.subtype = port_subtype_change;
+ eqe.event.port_change.port = cpu_to_be32(port << 28);
+
+ mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
+ port_subtype_change, slave, port);
+ return mlx4_GEN_EQE(dev, slave, &eqe);
+}
+EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
+
+enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
+ if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) {
+ pr_err("%s: Error: asking for slave:%d, port:%d\n",
+ __func__, slave, port);
+ return SLAVE_PORT_DOWN;
+ }
+ return s_state[slave].port_state[port];
+}
+EXPORT_SYMBOL(mlx4_get_slave_port_state);
+
+static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
+ enum slave_port_state state)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
+
+ if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+ pr_err("%s: Error: asking for slave:%d, port:%d\n",
+ __func__, slave, port);
+ return -1;
+ }
+ s_state[slave].port_state[port] = state;
+
+ return 0;
+}
+
+static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
+{
+ int i;
+ enum slave_port_gen_event gen_event;
+
+ for (i = 0; i < dev->num_slaves; i++)
+ set_and_calc_slave_port_state(dev, i, port, event, &gen_event);
+}
+/**************************************************************************
+ The function get as input the new event to that port,
+ and according to the prev state change the slave's port state.
+ The events are:
+ MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
+ MLX4_PORT_STATE_DEV_EVENT_PORT_UP
+ MLX4_PORT_STATE_IB_EVENT_GID_VALID
+ MLX4_PORT_STATE_IB_EVENT_GID_INVALID
+***************************************************************************/
+int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
+ u8 port, int event,
+ enum slave_port_gen_event *gen_event)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *ctx = NULL;
+ unsigned long flags;
+ int ret = -1;
+ enum slave_port_state cur_state =
+ mlx4_get_slave_port_state(dev, slave, port);
+
+ *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
+
+ if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+ pr_err("%s: Error: asking for slave:%d, port:%d\n",
+ __func__, slave, port);
+ return ret;
+ }
+
+ ctx = &priv->mfunc.master.slave_state[slave];
+ spin_lock_irqsave(&ctx->lock, flags);
+
+ mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
+ __func__, slave, cur_state, event);
+
+ switch (cur_state) {
+ case SLAVE_PORT_DOWN:
+ if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PENDING_UP);
+ break;
+ case SLAVE_PENDING_UP:
+ if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PORT_DOWN);
+ else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PORT_UP);
+ *gen_event = SLAVE_PORT_GEN_EVENT_UP;
+ }
+ break;
+ case SLAVE_PORT_UP:
+ if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PORT_DOWN);
+ *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
+ } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
+ event) {
+ mlx4_set_slave_port_state(dev, slave, port,
+ SLAVE_PENDING_UP);
+ *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
+ }
+ break;
+ default:
+ pr_err("%s: BUG!!! UNKNOWN state: "
+ "slave:%d, port:%d\n", __func__, slave, port);
+ goto out;
+ }
+ ret = mlx4_get_slave_port_state(dev, slave, port);
+ mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
+ " :%d gen_event: %d\n",
+ __func__, slave, cur_state, event, *gen_event);
+
+out:
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return ret;
+}
+
+EXPORT_SYMBOL(set_and_calc_slave_port_state);
+
+int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
+{
+ struct mlx4_eqe eqe;
+
+ memset(&eqe, 0, sizeof eqe);
+
+ eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
+ eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
+ eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.params.port_info.changed_attr =
+ cpu_to_be32((u32) attr);
+
+ slave_event(dev, ALL_SLAVES, &eqe);
+ return 0;
+}
+EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
+
void mlx4_master_handle_slave_flr(struct work_struct *work)
{
struct mlx4_mfunc_master_ctx *master =
@@ -251,6 +445,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
u32 flr_slave;
u8 update_slave_state;
int i;
+ enum slave_port_gen_event gen_event;
while ((eqe = next_eqe_sw(eq))) {
/*
@@ -347,35 +542,49 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PORT_CHANGE:
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
- mlx4_dispatch_event(dev,
- MLX4_DEV_EVENT_PORT_DOWN,
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
- if (mlx4_is_master(dev))
- /*change the state of all slave's port
- * to down:*/
- for (i = 0; i < dev->num_slaves; i++) {
- mlx4_dbg(dev, "%s: Sending "
- "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+ if (!mlx4_is_master(dev))
+ break;
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
+ if (i == mlx4_master_func_num(dev))
+ continue;
+ mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
" to slave: %d, port:%d\n",
__func__, i, port);
- if (i == dev->caps.function)
- continue;
mlx4_slave_event(dev, i, eqe);
+ } else { /* IB port */
+ set_and_calc_slave_port_state(dev, i, port,
+ MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
+ &gen_event);
+ /*we can be in pending state, then do not send port_down event*/
+ if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
+ if (i == mlx4_master_func_num(dev))
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
}
+ }
} else {
- mlx4_dispatch_event(dev,
- MLX4_DEV_EVENT_PORT_UP,
- port);
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port);
+
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
- if (mlx4_is_master(dev)) {
+ if (!mlx4_is_master(dev))
+ break;
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
for (i = 0; i < dev->num_slaves; i++) {
- if (i == dev->caps.function)
+ if (i == mlx4_master_func_num(dev))
continue;
mlx4_slave_event(dev, i, eqe);
}
- }
+ else /* IB port */
+ /* port-up event will be sent to a slave when the
+ * slave's alias-guid is set. This is done in alias_GUID.c
+ */
+ set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
}
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index c69648487321..4f30b99324cf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -183,7 +183,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
-#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
#define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
@@ -194,21 +194,39 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
+#define QUERY_FUNC_CAP_QP0_PROXY 0x14
+#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
+#define QUERY_FUNC_CAP_QP1_PROXY 0x1c
+
#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
if (vhcr->op_modifier == 1) {
- field = vhcr->in_modifier;
- MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
-
field = 0;
/* ensure force vlan and force mac bits are not set */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
/* ensure that phy_wqe_gid bit is not set */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+ field = vhcr->in_modifier; /* phys-port = logical-port */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+ /* size is now the QP number */
+ size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
+
+ size += 2;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
+
+ size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
+
+ size += 2;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
+
} else if (vhcr->op_modifier == 0) {
/* enable rdma and ethernet interfaces */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
@@ -253,99 +271,118 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
+ struct mlx4_func_cap *func_cap)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
- u8 field;
+ u8 field, op_modifier;
u32 size;
- int i;
int err = 0;
+ op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier,
+ MLX4_CMD_QUERY_FUNC_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
goto out;
outbox = mailbox->buf;
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
- if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
- mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
- err = -EPROTONOSUPPORT;
- goto out;
- }
- func_cap->flags = field;
+ if (!op_modifier) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+ if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
+ mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+ func_cap->flags = field;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+ func_cap->num_ports = field;
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
- func_cap->num_ports = field;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+ func_cap->pf_context_behaviour = size;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
- func_cap->pf_context_behaviour = size;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ func_cap->qp_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
- func_cap->qp_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ func_cap->srq_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
- func_cap->srq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ func_cap->cq_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
- func_cap->cq_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ func_cap->max_eq = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
- func_cap->max_eq = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ func_cap->reserved_eq = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
- func_cap->reserved_eq = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ func_cap->mpt_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
- func_cap->mpt_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ func_cap->mtt_quota = size & 0xFFFFFF;
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
- func_cap->mtt_quota = size & 0xFFFFFF;
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+ goto out;
+ }
- MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
- func_cap->mcg_quota = size & 0xFFFFFF;
+ /* logical port query */
+ if (gen_or_port > dev->caps.num_ports) {
+ err = -EINVAL;
+ goto out;
+ }
- for (i = 1; i <= func_cap->num_ports; ++i) {
- err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
- MLX4_CMD_QUERY_FUNC_CAP,
- MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
- if (err)
+ if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
+ mlx4_err(dev, "VLAN is enforced on this port\n");
+ err = -EPROTONOSUPPORT;
goto out;
+ }
- if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
- mlx4_err(dev, "VLAN is enforced on this port\n");
- err = -EPROTONOSUPPORT;
- goto out;
- }
-
- if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
- mlx4_err(dev, "Force mac is enabled on this port\n");
- err = -EPROTONOSUPPORT;
- goto out;
- }
- } else if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) {
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
- if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
- mlx4_err(dev, "phy_wqe_gid is "
- "enforced on this ib port\n");
- err = -EPROTONOSUPPORT;
- goto out;
- }
+ if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
+ mlx4_err(dev, "Force mac is enabled on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
}
+ } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+ if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
+ mlx4_err(dev, "phy_wqe_gid is "
+ "enforced on this ib port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+ }
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
- func_cap->physical_port[i] = field;
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+ func_cap->physical_port = field;
+ if (func_cap->physical_port != gen_or_port) {
+ err = -ENOSYS;
+ goto out;
}
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
+ func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
+ func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
+ func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
+ func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -559,7 +596,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_pds = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
dev_cap->reserved_xrcds = field >> 4;
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
dev_cap->max_xrcds = 1 << (field & 0x1f);
MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
@@ -715,6 +752,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
+ u64 flags;
int err = 0;
u8 field;
@@ -723,6 +761,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
if (err)
return err;
+ /* add port mng change event capability unconditionally to slaves */
+ MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+ flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
+ MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+
/* For guests, report Blueflame disabled */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
field &= 0x7f;
@@ -1345,6 +1388,19 @@ out:
return err;
}
+/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
+ * and real QP0 are active, so that the paravirtualized QP0 is ready
+ * to operate */
+static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ /* irrelevant if not infiniband */
+ if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
+ priv->mfunc.master.qp0_state[port].qp0_active)
+ return 1;
+ return 0;
+}
+
int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -1358,17 +1414,29 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
return 0;
- if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
- return -ENODEV;
-
- /* Enable port only if it was previously disabled */
- if (!priv->mfunc.master.init_port_ref[port]) {
- err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
- if (err)
- return err;
+ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
+ /* Enable port only if it was previously disabled */
+ if (!priv->mfunc.master.init_port_ref[port]) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ }
+ priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
+ } else {
+ if (slave == mlx4_master_func_num(dev)) {
+ if (check_qp0_state(dev, slave, port) &&
+ !priv->mfunc.master.qp0_state[port].port_active) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ priv->mfunc.master.qp0_state[port].port_active = 1;
+ priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
+ }
+ } else
+ priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
}
- priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
++priv->mfunc.master.init_port_ref[port];
return 0;
}
@@ -1441,15 +1509,29 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
(1 << port)))
return 0;
- if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
- return -ENODEV;
- if (priv->mfunc.master.init_port_ref[port] == 1) {
- err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
- MLX4_CMD_NATIVE);
- if (err)
- return err;
+ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
+ if (priv->mfunc.master.init_port_ref[port] == 1) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
+ 1000, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ }
+ priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+ } else {
+ /* infiniband port */
+ if (slave == mlx4_master_func_num(dev)) {
+ if (!priv->mfunc.master.qp0_state[port].qp0_active &&
+ priv->mfunc.master.qp0_state[port].port_active) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
+ 1000, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+ priv->mfunc.master.qp0_state[port].port_active = 0;
+ }
+ } else
+ priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
}
- priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
--priv->mfunc.master.init_port_ref[port];
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 83fcbbf1b169..85abe9c11a22 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -134,8 +134,12 @@ struct mlx4_func_cap {
int max_eq;
int reserved_eq;
int mcg_quota;
- u8 physical_port[MLX4_MAX_PORTS + 1];
- u8 port_flags[MLX4_MAX_PORTS + 1];
+ u32 qp0_tunnel_qpn;
+ u32 qp0_proxy_qpn;
+ u32 qp1_tunnel_qpn;
+ u32 qp1_proxy_qpn;
+ u8 physical_port;
+ u8 port_flags;
};
struct mlx4_adapter {
@@ -192,7 +196,8 @@ struct mlx4_set_ib_param {
};
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
-int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
+ struct mlx4_func_cap *func_cap);
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2f816c6aed72..80df2ab0177c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -95,8 +95,6 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" Not in use with device managed"
" flow steering");
-#define MLX4_VF (1 << 0)
-
#define HCA_GLOBAL_CAP_MASK 0
#define PF_CONTEXT_BEHAVIOUR_MASK 0
@@ -299,9 +297,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_dbg(dev, "Steering mode is: %s\n",
mlx4_steering_mode_str(dev->caps.steering_mode));
- /* Sense port always allowed on supported devices for ConnectX1 and 2 */
- if (dev->pdev->device != 0x1003)
+ /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
+ if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+ /* Don't do sense port on multifunction devices (for now at least) */
+ if (mlx4_is_mfunc(dev))
+ dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
@@ -384,6 +385,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
+ dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
return 0;
}
/*The function checks if there are live vf, return the num of them*/
@@ -409,20 +411,54 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
{
u32 qk = MLX4_RESERVED_QKEY_BASE;
- if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
- qpn < dev->caps.sqp_start)
+
+ if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
+ qpn < dev->phys_caps.base_proxy_sqpn)
return -EINVAL;
- if (qpn >= dev->caps.base_tunnel_sqpn)
+ if (qpn >= dev->phys_caps.base_tunnel_sqpn)
/* tunnel qp */
- qk += qpn - dev->caps.base_tunnel_sqpn;
+ qk += qpn - dev->phys_caps.base_tunnel_sqpn;
else
- qk += qpn - dev->caps.sqp_start;
+ qk += qpn - dev->phys_caps.base_proxy_sqpn;
*qkey = qk;
return 0;
}
EXPORT_SYMBOL(mlx4_get_parav_qkey);
+void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
+{
+ struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
+
+ if (!mlx4_is_master(dev))
+ return;
+
+ priv->virt2phys_pkey[slave][port - 1][i] = val;
+}
+EXPORT_SYMBOL(mlx4_sync_pkey_table);
+
+void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
+{
+ struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
+
+ if (!mlx4_is_master(dev))
+ return;
+
+ priv->slave_node_guids[slave] = guid;
+}
+EXPORT_SYMBOL(mlx4_put_slave_node_guid);
+
+__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
+
+ if (!mlx4_is_master(dev))
+ return 0;
+
+ return priv->slave_node_guids[slave];
+}
+EXPORT_SYMBOL(mlx4_get_slave_node_guid);
+
int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -493,9 +529,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
memset(&func_cap, 0, sizeof(func_cap));
- err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+ err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
if (err) {
- mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
+ err);
return err;
}
@@ -523,12 +560,33 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
+ dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+ dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+ dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+ dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+
+ if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
+ !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
for (i = 1; i <= dev->caps.num_ports; ++i) {
+ err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
+ " port %d, aborting (%d).\n", i, err);
+ goto err_mem;
+ }
+ dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
+ dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
+ dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
+ dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
dev->caps.port_mask[i] = dev->caps.port_type[i];
if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
&dev->caps.gid_table_len[i],
&dev->caps.pkey_table_len[i]))
- return -ENODEV;
+ goto err_mem;
}
if (dev->caps.uar_page_size * (dev->caps.num_uars -
@@ -538,10 +596,20 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
"PCI resource 2 size of 0x%llx, aborting.\n",
dev->caps.uar_page_size * dev->caps.num_uars,
(unsigned long long) pci_resource_len(dev->pdev, 2));
- return -ENODEV;
+ goto err_mem;
}
return 0;
+
+err_mem:
+ kfree(dev->caps.qp0_tunnel);
+ kfree(dev->caps.qp0_proxy);
+ kfree(dev->caps.qp1_tunnel);
+ kfree(dev->caps.qp1_proxy);
+ dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
+ dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
+
+ return err;
}
/*
@@ -1092,10 +1160,10 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- down(&priv->cmd.slave_sem);
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
mlx4_warn(dev, "Failed to close slave function.\n");
- up(&priv->cmd.slave_sem);
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
static int map_bf_area(struct mlx4_dev *dev)
@@ -1147,7 +1215,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
u32 slave_read;
u32 cmd_channel_ver;
- down(&priv->cmd.slave_sem);
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
priv->cmd.max_cmds = 1;
mlx4_warn(dev, "Sending reset\n");
ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
@@ -1196,12 +1264,13 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
goto err;
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
goto err;
- up(&priv->cmd.slave_sem);
+
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
return 0;
err:
mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
- up(&priv->cmd.slave_sem);
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
return -EIO;
}
@@ -1848,7 +1917,7 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
iounmap(owner);
}
-static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
{
struct mlx4_priv *priv;
struct mlx4_dev *dev;
@@ -1871,12 +1940,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
/*
* Check for BARs.
*/
- if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+ if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing DCS, aborting."
- "(id == 0X%p, id->driver_data: 0x%lx,"
- " pci_resource_flags(pdev, 0):0x%lx)\n", id,
- id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
+ "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
+ pci_dev_data, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
@@ -1941,7 +2009,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev->rev_id = pdev->revision;
/* Detect if this device is a virtual function */
- if (id && id->driver_data & MLX4_VF) {
+ if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
/* When acting as pf, we normally skip vfs unless explicitly
* requested to probe them. */
if (num_vfs && extended_func_num(pdev) > probe_vf) {
@@ -1969,12 +2037,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (num_vfs) {
- mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+ mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
- mlx4_err(dev, "Failed to enable sriov,"
- "continuing without sriov enabled"
- " (err = %d).\n", err);
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+ err);
err = 0;
} else {
mlx4_warn(dev, "Running in master mode\n");
@@ -2089,6 +2156,7 @@ slave_start:
mlx4_sense_init(dev);
mlx4_start_sense(dev);
+ priv->pci_dev_data = pci_dev_data;
pci_set_drvdata(pdev, dev);
return 0;
@@ -2158,7 +2226,7 @@ static int __devinit mlx4_init_one(struct pci_dev *pdev,
{
printk_once(KERN_INFO "%s", mlx4_version);
- return __mlx4_init_one(pdev, id);
+ return __mlx4_init_one(pdev, id->driver_data);
}
static void mlx4_remove_one(struct pci_dev *pdev)
@@ -2217,12 +2285,18 @@ static void mlx4_remove_one(struct pci_dev *pdev)
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
if (dev->flags & MLX4_FLAG_SRIOV) {
- mlx4_warn(dev, "Disabling sriov\n");
+ mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev);
}
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
+
+ kfree(dev->caps.qp0_tunnel);
+ kfree(dev->caps.qp0_proxy);
+ kfree(dev->caps.qp1_tunnel);
+ kfree(dev->caps.qp1_proxy);
+
kfree(priv);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -2232,41 +2306,46 @@ static void mlx4_remove_one(struct pci_dev *pdev)
int mlx4_restart_one(struct pci_dev *pdev)
{
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int pci_dev_data;
+
+ pci_dev_data = priv->pci_dev_data;
mlx4_remove_one(pdev);
- return __mlx4_init_one(pdev, NULL);
+ return __mlx4_init_one(pdev, pci_dev_data);
}
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
/* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+ { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
/* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
/* MT27500 Family [ConnectX-3] */
{ PCI_VDEVICE(MELLANOX, 0x1003), 0 },
/* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
{ PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
{ PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
{ PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
@@ -2295,12 +2374,12 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
- int ret = __mlx4_init_one(pdev, NULL);
+ int ret = __mlx4_init_one(pdev, 0);
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
-static struct pci_error_handlers mlx4_err_handler = {
+static const struct pci_error_handlers mlx4_err_handler = {
.error_detected = mlx4_pci_err_detected,
.slot_reset = mlx4_pci_slot_reset,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index dba69d98734a..1cf42036d7bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -452,6 +452,7 @@ struct mlx4_slave_state {
/*initialized via the kzalloc*/
u8 is_slave_going_down;
u32 cookie;
+ enum slave_port_state port_state[MLX4_MAX_PORTS + 1];
};
struct slave_list {
@@ -472,6 +473,7 @@ struct mlx4_slave_event_eq {
u32 eqn;
u32 cons;
u32 prod;
+ spinlock_t event_lock;
struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
};
@@ -511,9 +513,9 @@ struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
struct mutex hcr_mutex;
+ struct mutex slave_cmd_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
- struct semaphore slave_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -766,6 +768,11 @@ struct _rule_hw {
};
};
+enum {
+ MLX4_PCI_DEV_IS_VF = 1 << 0,
+ MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -773,6 +780,8 @@ struct mlx4_priv {
struct list_head ctx_list;
spinlock_t ctx_lock;
+ int pci_dev_data;
+
struct list_head pgdir_list;
struct mutex pgdir_mutex;
@@ -807,6 +816,9 @@ struct mlx4_priv {
struct io_mapping *bf_mapping;
int reserved_mtts;
int fs_hash_mode;
+ u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
+ __be64 slave_node_guids[MLX4_MFUNC_MAX];
+
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -1011,16 +1023,61 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_2ERR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_RTS2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index e36dd0f2fa73..4c51b05efa28 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -732,6 +732,16 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
new_cap_mask = ((__be32 *) inbox->buf)[1];
}
+ /* slave may not set the IS_SM capability for the port */
+ if (slave != mlx4_master_func_num(dev) &&
+ (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
+ return -EINVAL;
+
+ /* No DEV_MGMT in multifunc mode */
+ if (mlx4_is_mfunc(dev) &&
+ (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
+ return -EINVAL;
+
agg_cap_mask = 0;
slave_cap_mask =
priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index fb2b36759cbf..81e2abe07bbb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -67,10 +67,18 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
complete(&qp->free);
}
-static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+/* used for INIT/CLOSE port logic */
+static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
{
- return qp->qpn >= dev->caps.sqp_start &&
- qp->qpn <= dev->caps.sqp_start + 1;
+ /* this procedure is called after we already know we are on the master */
+ /* qp0 is either the proxy qp0, or the real qp0 */
+ u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
+ *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
+
+ *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
+ qp->qpn <= dev->phys_caps.base_sqpn + 1;
+
+ return *real_qp0 || *proxy_qp0;
}
static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
@@ -122,6 +130,8 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
+ int real_qp0 = 0;
+ int proxy_qp0 = 0;
u8 port;
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
@@ -133,9 +143,12 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
cur_state != MLX4_QP_STATE_RST &&
- is_qp0(dev, qp)) {
+ is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
port = (qp->qpn & 1) + 1;
- priv->mfunc.master.qp0_state[port].qp0_active = 0;
+ if (proxy_qp0)
+ priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
+ else
+ priv->mfunc.master.qp0_state[port].qp0_active = 0;
}
return ret;
}
@@ -162,6 +175,23 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
new_state == MLX4_QP_STATE_RST ? 2 : 0,
op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
+ if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
+ port = (qp->qpn & 1) + 1;
+ if (cur_state != MLX4_QP_STATE_ERR &&
+ cur_state != MLX4_QP_STATE_RST &&
+ new_state == MLX4_QP_STATE_ERR) {
+ if (proxy_qp0)
+ priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
+ else
+ priv->mfunc.master.qp0_state[port].qp0_active = 0;
+ } else if (new_state == MLX4_QP_STATE_RTR) {
+ if (proxy_qp0)
+ priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
+ else
+ priv->mfunc.master.qp0_state[port].qp0_active = 1;
+ }
+ }
+
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
@@ -392,6 +422,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
int err;
int reserved_from_top = 0;
+ int k;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -406,7 +437,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
* We also reserve the MSB of the 24-bit QP number to indicate
* that a QP is an XRC QP.
*/
- dev->caps.sqp_start =
+ dev->phys_caps.base_sqpn =
ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
{
@@ -437,13 +468,66 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
}
+ /* Reserve 8 real SQPs in both native and SRIOV modes.
+ * In addition, in SRIOV mode, reserve 8 proxy SQPs per function
+ * (for all PFs and VFs), and 8 corresponding tunnel QPs.
+ * Each proxy SQP works opposite its own tunnel QP.
+ *
+ * The QPs are arranged as follows:
+ * a. 8 real SQPs
+ * b. All the proxy SQPs (8 per function)
+ * c. All the tunnel QPs (8 per function)
+ */
+
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
- (1 << 23) - 1, dev->caps.sqp_start + 8,
+ (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 +
+ 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
reserved_from_top);
if (err)
return err;
- return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
+ if (mlx4_is_mfunc(dev)) {
+ /* for PPF use */
+ dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
+ dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
+
+ /* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
+ * since the PF does not call mlx4_slave_caps */
+ dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+ dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+ dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+ dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
+
+ if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
+ !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ for (k = 0; k < dev->caps.num_ports; k++) {
+ dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
+ 8 * mlx4_master_func_num(dev) + k;
+ dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
+ dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
+ 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
+ dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
+ }
+ }
+
+
+ err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
+ if (err)
+ goto err_mem;
+ return 0;
+
+err_mem:
+ kfree(dev->caps.qp0_tunnel);
+ kfree(dev->caps.qp0_proxy);
+ kfree(dev->caps.qp1_tunnel);
+ kfree(dev->caps.qp1_proxy);
+ dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
+ dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
+ return err;
}
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index 11e7c1cb99bf..dd1b5093d8b1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -141,16 +141,16 @@ int mlx4_reset(struct mlx4_dev *dev)
/* Now restore the PCI headers */
if (pcie_cap) {
devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
- if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL,
- devctl)) {
+ if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
+ devctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express "
"Device Control register, aborting.\n");
goto out;
}
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
- if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL,
- linkctl)) {
+ if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
+ linkctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express "
"Link control register, aborting.\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 293c9e820c49..ba6506ff4abb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -242,6 +242,15 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res)
return 0;
}
+enum qp_transition {
+ QP_TRANS_INIT2RTR,
+ QP_TRANS_RTR2RTS,
+ QP_TRANS_RTS2RTS,
+ QP_TRANS_SQERR2RTS,
+ QP_TRANS_SQD2SQD,
+ QP_TRANS_SQD2RTS
+};
+
/* For Debug uses */
static const char *ResourceType(enum mlx4_resource rt)
{
@@ -308,14 +317,41 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
}
}
-static void update_ud_gid(struct mlx4_dev *dev,
- struct mlx4_qp_context *qp_ctx, u8 slave)
+static void update_pkey_index(struct mlx4_dev *dev, int slave,
+ struct mlx4_cmd_mailbox *inbox)
{
- u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+ u8 sched = *(u8 *)(inbox->buf + 64);
+ u8 orig_index = *(u8 *)(inbox->buf + 35);
+ u8 new_index;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port;
+
+ port = (sched >> 6 & 1) + 1;
+
+ new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
+ *(u8 *)(inbox->buf + 35) = new_index;
+
+ mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
+ "new pkey index = %d\n", port, orig_index, new_index);
+}
+
+static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
+ u8 slave)
+{
+ struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
+ enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
+ u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
if (MLX4_QP_ST_UD == ts)
qp_ctx->pri_path.mgid_index = 0x80 | slave;
+ if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
+ qp_ctx->pri_path.mgid_index = slave & 0x7F;
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
+ qp_ctx->alt_path.mgid_index = slave & 0x7F;
+ }
+
mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
slave, qp_ctx->pri_path.mgid_index);
}
@@ -360,8 +396,6 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
r->from_state = r->state;
r->state = RES_ANY_BUSY;
- mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
- ResourceType(type), r->res_id);
if (res)
*((struct res_common **)res) = r;
@@ -1105,7 +1139,13 @@ static void res_end_move(struct mlx4_dev *dev, int slave,
static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
{
- return mlx4_is_qp_reserved(dev, qpn);
+ return mlx4_is_qp_reserved(dev, qpn) &&
+ (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
+}
+
+static int fw_reserved(struct mlx4_dev *dev, int qpn)
+{
+ return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
}
static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
@@ -1145,7 +1185,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
return err;
- if (!valid_reserved(dev, slave, qpn)) {
+ if (!fw_reserved(dev, qpn)) {
err = __mlx4_qp_alloc_icm(dev, qpn);
if (err) {
res_abort_move(dev, slave, RES_QP, qpn);
@@ -1498,7 +1538,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err)
return err;
- if (!valid_reserved(dev, slave, qpn))
+ if (!fw_reserved(dev, qpn))
__mlx4_qp_free_icm(dev, qpn);
res_end_move(dev, slave, RES_QP, qpn);
@@ -1938,6 +1978,19 @@ static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
return be32_to_cpu(qpc->srqn) & 0x1ffffff;
}
+static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
+ struct mlx4_qp_context *context)
+{
+ u32 qpn = vhcr->in_modifier & 0xffffff;
+ u32 qkey = 0;
+
+ if (mlx4_get_parav_qkey(dev, qpn, &qkey))
+ return;
+
+ /* adjust qkey in qp context */
+ context->qkey = cpu_to_be32(qkey);
+}
+
int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -1990,6 +2043,8 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
goto ex_put_scq;
}
+ adjust_proxy_tun_qkey(dev, vhcr, qpc);
+ update_pkey_index(dev, slave, inbox);
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put_srq;
@@ -2135,6 +2190,48 @@ static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
return err;
}
+static int verify_qp_parameters(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *inbox,
+ enum qp_transition transition, u8 slave)
+{
+ u32 qp_type;
+ struct mlx4_qp_context *qp_ctx;
+ enum mlx4_qp_optpar optpar;
+
+ qp_ctx = inbox->buf + 8;
+ qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+ optpar = be32_to_cpu(*(__be32 *) inbox->buf);
+
+ switch (qp_type) {
+ case MLX4_QP_ST_RC:
+ case MLX4_QP_ST_UC:
+ switch (transition) {
+ case QP_TRANS_INIT2RTR:
+ case QP_TRANS_RTR2RTS:
+ case QP_TRANS_RTS2RTS:
+ case QP_TRANS_SQD2SQD:
+ case QP_TRANS_SQD2RTS:
+ if (slave != mlx4_master_func_num(dev))
+ /* slaves have only gid index 0 */
+ if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
+ if (qp_ctx->pri_path.mgid_index)
+ return -EINVAL;
+ if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
+ if (qp_ctx->alt_path.mgid_index)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -2622,16 +2719,123 @@ out:
return err;
}
+int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp_context *context = inbox->buf + 8;
+ adjust_proxy_tun_qkey(dev, vhcr, context);
+ update_pkey_index(dev, slave, inbox);
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
+ int err;
struct mlx4_qp_context *qpc = inbox->buf + 8;
- update_ud_gid(dev, qpc, (u8)slave);
+ err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
+ if (err)
+ return err;
+
+ update_pkey_index(dev, slave, inbox);
+ update_gid(dev, inbox, (u8)slave);
+ adjust_proxy_tun_qkey(dev, vhcr, qpc);
+
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ struct mlx4_qp_context *context = inbox->buf + 8;
+
+ err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
+ if (err)
+ return err;
+
+ update_pkey_index(dev, slave, inbox);
+ update_gid(dev, inbox, (u8)slave);
+ adjust_proxy_tun_qkey(dev, vhcr, context);
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ struct mlx4_qp_context *context = inbox->buf + 8;
+
+ err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
+ if (err)
+ return err;
+
+ update_pkey_index(dev, slave, inbox);
+ update_gid(dev, inbox, (u8)slave);
+ adjust_proxy_tun_qkey(dev, vhcr, context);
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+
+int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp_context *context = inbox->buf + 8;
+ adjust_proxy_tun_qkey(dev, vhcr, context);
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ struct mlx4_qp_context *context = inbox->buf + 8;
+
+ err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
+ if (err)
+ return err;
+
+ adjust_proxy_tun_qkey(dev, vhcr, context);
+ update_gid(dev, inbox, (u8)slave);
+ update_pkey_index(dev, slave, inbox);
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ struct mlx4_qp_context *context = inbox->buf + 8;
+
+ err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
+ if (err)
+ return err;
+ adjust_proxy_tun_qkey(dev, vhcr, context);
+ update_gid(dev, inbox, (u8)slave);
+ update_pkey_index(dev, slave, inbox);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index 34ee09bae36e..094773d88f80 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -139,5 +139,5 @@ void mlx4_sense_init(struct mlx4_dev *dev)
for (port = 1; port <= dev->caps.num_ports; port++)
sense->do_sense_port[port] = 1;
- INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
+ INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port);
}
diff --git a/drivers/net/ethernet/mipsnet.c b/drivers/net/ethernet/mipsnet.c
deleted file mode 100644
index db5285befe2a..000000000000
--- a/drivers/net/ethernet/mipsnet.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <asm/mips-boards/simint.h>
-
-#define MIPSNET_VERSION "2007-11-17"
-
-/*
- * Net status/control block as seen by sw in the core.
- */
-struct mipsnet_regs {
- /*
- * Device info for probing, reads as MIPSNET%d where %d is some
- * form of version.
- */
- u64 devId; /*0x00 */
-
- /*
- * read only busy flag.
- * Set and cleared by the Net Device to indicate that an rx or a tx
- * is in progress.
- */
- u32 busy; /*0x08 */
-
- /*
- * Set by the Net Device.
- * The device will set it once data has been received.
- * The value is the number of bytes that should be read from
- * rxDataBuffer. The value will decrease till 0 until all the data
- * from rxDataBuffer has been read.
- */
- u32 rxDataCount; /*0x0c */
-#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
-
- /*
- * Settable from the MIPS core, cleared by the Net Device.
- * The core should set the number of bytes it wants to send,
- * then it should write those bytes of data to txDataBuffer.
- * The device will clear txDataCount has been processed (not
- * necessarily sent).
- */
- u32 txDataCount; /*0x10 */
-
- /*
- * Interrupt control
- *
- * Used to clear the interrupted generated by this dev.
- * Write a 1 to clear the interrupt. (except bit31).
- *
- * Bit0 is set if it was a tx-done interrupt.
- * Bit1 is set when new rx-data is available.
- * Until this bit is cleared there will be no other RXs.
- *
- * Bit31 is used for testing, it clears after a read.
- * Writing 1 to this bit will cause an interrupt to be generated.
- * To clear the test interrupt, write 0 to this register.
- */
- u32 interruptControl; /*0x14 */
-#define MIPSNET_INTCTL_TXDONE (1u << 0)
-#define MIPSNET_INTCTL_RXDONE (1u << 1)
-#define MIPSNET_INTCTL_TESTBIT (1u << 31)
-
- /*
- * Readonly core-specific interrupt info for the device to signal
- * the core. The meaning of the contents of this field might change.
- */
- /* XXX: the whole memIntf interrupt scheme is messy: the device
- * should have no control what so ever of what VPE/register set is
- * being used.
- * The MemIntf should only expose interrupt lines, and something in
- * the config should be responsible for the line<->core/vpe bindings.
- */
- u32 interruptInfo; /*0x18 */
-
- /*
- * This is where the received data is read out.
- * There is more data to read until rxDataReady is 0.
- * Only 1 byte at this regs offset is used.
- */
- u32 rxDataBuffer; /*0x1c */
-
- /*
- * This is where the data to transmit is written.
- * Data should be written for the amount specified in the
- * txDataCount register.
- * Only 1 byte at this regs offset is used.
- */
- u32 txDataBuffer; /*0x20 */
-};
-
-#define regaddr(dev, field) \
- (dev->base_addr + offsetof(struct mipsnet_regs, field))
-
-static char mipsnet_string[] = "mipsnet";
-
-/*
- * Copy data from the MIPSNET rx data port
- */
-static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
- int len)
-{
- for (; len > 0; len--, kdata++)
- *kdata = inb(regaddr(dev, rxDataBuffer));
-
- return inl(regaddr(dev, rxDataCount));
-}
-
-static inline void mipsnet_put_todevice(struct net_device *dev,
- struct sk_buff *skb)
-{
- int count_to_go = skb->len;
- char *buf_ptr = skb->data;
-
- outl(skb->len, regaddr(dev, txDataCount));
-
- for (; count_to_go; buf_ptr++, count_to_go--)
- outb(*buf_ptr, regaddr(dev, txDataBuffer));
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- dev_kfree_skb(skb);
-}
-
-static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- /*
- * Only one packet at a time. Once TXDONE interrupt is serviced, the
- * queue will be restarted.
- */
- netif_stop_queue(dev);
- mipsnet_put_todevice(dev, skb);
-
- return NETDEV_TX_OK;
-}
-
-static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
-{
- struct sk_buff *skb;
-
- if (!len)
- return len;
-
- skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
- if (!skb) {
- dev->stats.rx_dropped++;
- return -ENOMEM;
- }
-
- skb_reserve(skb, NET_IP_ALIGN);
- if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
- return -EFAULT;
-
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- netif_rx(skb);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
-
- return len;
-}
-
-static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- u32 int_flags;
- irqreturn_t ret = IRQ_NONE;
-
- if (irq != dev->irq)
- goto out_badirq;
-
- /* TESTBIT is cleared on read. */
- int_flags = inl(regaddr(dev, interruptControl));
- if (int_flags & MIPSNET_INTCTL_TESTBIT) {
- /* TESTBIT takes effect after a write with 0. */
- outl(0, regaddr(dev, interruptControl));
- ret = IRQ_HANDLED;
- } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
- /* Only one packet at a time, we are done. */
- dev->stats.tx_packets++;
- netif_wake_queue(dev);
- outl(MIPSNET_INTCTL_TXDONE,
- regaddr(dev, interruptControl));
- ret = IRQ_HANDLED;
- } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
- mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
- outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
- ret = IRQ_HANDLED;
- }
- return ret;
-
-out_badirq:
- printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
- dev->name, __func__, irq);
- return ret;
-}
-
-static int mipsnet_open(struct net_device *dev)
-{
- int err;
-
- err = request_irq(dev->irq, mipsnet_interrupt,
- IRQF_SHARED, dev->name, (void *) dev);
- if (err) {
- release_region(dev->base_addr, sizeof(struct mipsnet_regs));
- return err;
- }
-
- netif_start_queue(dev);
-
- /* test interrupt handler */
- outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
-
- return 0;
-}
-
-static int mipsnet_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- free_irq(dev->irq, dev);
- return 0;
-}
-
-static void mipsnet_set_mclist(struct net_device *dev)
-{
-}
-
-static const struct net_device_ops mipsnet_netdev_ops = {
- .ndo_open = mipsnet_open,
- .ndo_stop = mipsnet_close,
- .ndo_start_xmit = mipsnet_xmit,
- .ndo_set_rx_mode = mipsnet_set_mclist,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
-};
-
-static int __devinit mipsnet_probe(struct platform_device *dev)
-{
- struct net_device *netdev;
- int err;
-
- netdev = alloc_etherdev(0);
- if (!netdev) {
- err = -ENOMEM;
- goto out;
- }
-
- platform_set_drvdata(dev, netdev);
-
- netdev->netdev_ops = &mipsnet_netdev_ops;
-
- /*
- * TODO: probe for these or load them from PARAM
- */
- netdev->base_addr = 0x4200;
- netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
- inl(regaddr(netdev, interruptInfo));
-
- /* Get the io region now, get irq on open() */
- if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
- "mipsnet")) {
- err = -EBUSY;
- goto out_free_netdev;
- }
-
- /*
- * Lacking any better mechanism to allocate a MAC address we use a
- * random one ...
- */
- eth_hw_addr_random(netdev);
-
- err = register_netdev(netdev);
- if (err) {
- printk(KERN_ERR "MIPSNet: failed to register netdev.\n");
- goto out_free_region;
- }
-
- return 0;
-
-out_free_region:
- release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
-
-out_free_netdev:
- free_netdev(netdev);
-
-out:
- return err;
-}
-
-static int __devexit mipsnet_device_remove(struct platform_device *device)
-{
- struct net_device *dev = platform_get_drvdata(device);
-
- unregister_netdev(dev);
- release_region(dev->base_addr, sizeof(struct mipsnet_regs));
- free_netdev(dev);
- platform_set_drvdata(device, NULL);
-
- return 0;
-}
-
-static struct platform_driver mipsnet_driver = {
- .driver = {
- .name = mipsnet_string,
- .owner = THIS_MODULE,
- },
- .probe = mipsnet_probe,
- .remove = __devexit_p(mipsnet_device_remove),
-};
-
-static int __init mipsnet_init_module(void)
-{
- int err;
-
- printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. "
- "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION);
-
- err = platform_driver_register(&mipsnet_driver);
- if (err)
- printk(KERN_ERR "Driver registration failed\n");
-
- return err;
-}
-
-static void __exit mipsnet_exit_module(void)
-{
- platform_driver_unregister(&mipsnet_driver);
-}
-
-module_init(mipsnet_init_module);
-module_exit(mipsnet_exit_module);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index fa85cf1353fd..83516e3369c9 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1078,22 +1078,16 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
#ifdef CONFIG_MYRI10GE_DCA
static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
{
- int ret, cap, err;
+ int ret;
u16 ctl;
- cap = pci_pcie_cap(pdev);
- if (!cap)
- return 0;
-
- err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
- if (err)
- return 0;
+ pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl);
ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
if (ret != on) {
ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
ctl |= (on << 4);
- pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
+ pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl);
}
return ret;
}
@@ -3192,18 +3186,13 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
struct device *dev = &mgp->pdev->dev;
int cap;
unsigned err_cap;
- u16 val;
- u8 ext_type;
int ret;
if (!myri10ge_ecrc_enable || !bridge)
return;
/* check that the bridge is a root port */
- cap = pci_pcie_cap(bridge);
- pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
- ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
- if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
+ if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) {
if (myri10ge_ecrc_enable > 1) {
struct pci_dev *prev_bridge, *old_bridge = bridge;
@@ -3218,11 +3207,8 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
" to force ECRC\n");
return;
}
- cap = pci_pcie_cap(bridge);
- pci_read_config_word(bridge,
- cap + PCI_CAP_FLAGS, &val);
- ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
- } while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
+ } while (pci_pcie_type(bridge) !=
+ PCI_EXP_TYPE_ROOT_PORT);
dev_info(dev,
"Forcing ECRC on non-root port %s"
@@ -3335,11 +3321,10 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
int overridden = 0;
if (myri10ge_force_firmware == 0) {
- int link_width, exp_cap;
+ int link_width;
u16 lnk;
- exp_cap = pci_pcie_cap(mgp->pdev);
- pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
+ pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk);
link_width = (lnk >> 4) & 0x3f;
/* Check to see if Link is less than 8 or if the
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index d958c2299372..de50547c187d 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -484,7 +484,7 @@ static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
MODULE_DEVICE_TABLE(pci, s2io_tbl);
-static struct pci_error_handlers s2io_err_handler = {
+static const struct pci_error_handlers s2io_err_handler = {
.error_detected = s2io_io_error_detected,
.slot_reset = s2io_io_slot_reset,
.resume = s2io_io_resume,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 32d06824fe3e..c2e420a84d22 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -757,7 +757,7 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
u16 lnk;
/* Get the negotiated link width and speed from PCI config space */
- pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
return VXGE_HW_ERR_INVALID_PCI_INFO;
@@ -1982,7 +1982,7 @@ u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
struct pci_dev *dev = hldev->pdev;
u16 lnk;
- pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
+ pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk);
return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index de2190443510..3e5b7509502c 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3521,7 +3521,7 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
strncpy(buf, dev->name, IFNAMSIZ);
- flush_work_sync(&vdev->reset_task);
+ flush_work(&vdev->reset_task);
/* in 2.6 will call stop() if device is up */
unregister_netdev(dev);
@@ -4799,7 +4799,7 @@ static void __devexit vxge_remove(struct pci_dev *pdev)
__LINE__);
}
-static struct pci_error_handlers vxge_err_handler = {
+static const struct pci_error_handlers vxge_err_handler = {
.error_detected = vxge_io_error_detected,
.slot_reset = vxge_io_slot_reset,
.resume = vxge_io_resume,
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 9d11ab7521bc..63e7af44366f 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -34,7 +34,7 @@
#include <mach/netx-regs.h>
#include <mach/pfifo.h>
#include <mach/xc.h>
-#include <mach/eth.h>
+#include <linux/platform_data/eth-netx.h>
/* XC Fifo Offsets */
#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index f45def01a98e..876beceaf2d7 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3409,7 +3409,7 @@ set_speed:
pause_flags = 0;
/* setup pause frame */
- if (np->duplex != 0) {
+ if (netif_running(dev) && (np->duplex != 0)) {
if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
@@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
regs->version = FORCEDETH_REGS_VER;
spin_lock_irq(&np->lock);
- for (i = 0; i <= np->register_size/sizeof(u32); i++)
+ for (i = 0; i < np->register_size/sizeof(u32); i++)
rbuf[i] = readl(base + i*sizeof(u32));
spin_unlock_irq(&np->lock);
}
@@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev)
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
+ nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
@@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_error;
}
+ netif_carrier_off(dev);
+
+ /* Some NICs freeze when TX pause is enabled while NIC is
+ * down, and this stays across warm reboots. The sequence
+ * below should be enough to recover from that state.
+ */
+ nv_update_pause(dev, 0);
+ nv_start_tx(dev);
+ nv_stop_tx(dev);
+
if (id->driver_data & DEV_HAS_VLAN)
nv_vlan_mode(dev, dev->features);
- netif_carrier_off(dev);
-
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index feb85d56c750..b2a94d02a521 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2795,7 +2795,7 @@ static const struct dev_pm_ops pch_gbe_pm_ops = {
};
#endif
-static struct pci_error_handlers pch_gbe_err_handler = {
+static const struct pci_error_handlers pch_gbe_err_handler = {
.error_detected = pch_gbe_io_error_detected,
.slot_reset = pch_gbe_io_slot_reset,
.resume = pch_gbe_io_resume
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index a77c558d8f40..df450616ab37 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1386,7 +1386,7 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
return;
- if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT)
+ if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT)
return;
aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
@@ -3340,7 +3340,7 @@ netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
{ }
#endif
-static struct pci_error_handlers netxen_err_handler = {
+static const struct pci_error_handlers netxen_err_handler = {
.error_detected = netxen_io_error_detected,
.slot_reset = netxen_io_slot_reset,
.resume = netxen_io_resume,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 212c12193275..473ce134ca63 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -4522,7 +4522,7 @@ static void
qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
-static struct pci_error_handlers qlcnic_err_handler = {
+static const struct pci_error_handlers qlcnic_err_handler = {
.error_detected = qlcnic_io_error_detected,
.slot_reset = qlcnic_io_slot_reset,
.resume = qlcnic_io_resume,
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b53a3b60b648..b262d6156816 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4847,7 +4847,7 @@ static void qlge_io_resume(struct pci_dev *pdev)
netif_device_attach(ndev);
}
-static struct pci_error_handlers qlge_err_handler = {
+static const struct pci_error_handlers qlge_err_handler = {
.error_detected = qlge_io_error_detected,
.slot_reset = qlge_io_slot_reset,
.resume = qlge_io_resume,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b47d5b35024e..e7ff886e8047 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -77,7 +77,7 @@
static const int multicast_filter_limit = 32;
#define MAX_READ_REQUEST_SHIFT 12
-#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
+ { PCI_VENDOR_ID_DLINK, 0x4300,
+ PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
@@ -833,15 +835,8 @@ static void rtl_unlock_work(struct rtl8169_private *tp)
static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
{
- int cap = pci_pcie_cap(pdev);
-
- if (cap) {
- u16 ctl;
-
- pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
- ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
- pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
- }
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_READRQ, force);
}
struct rtl_cond {
@@ -4739,28 +4734,14 @@ static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
static void rtl_disable_clock_request(struct pci_dev *pdev)
{
- int cap = pci_pcie_cap(pdev);
-
- if (cap) {
- u16 ctl;
-
- pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
- ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
- }
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
}
static void rtl_enable_clock_request(struct pci_dev *pdev)
{
- int cap = pci_pcie_cap(pdev);
-
- if (cap) {
- u16 ctl;
-
- pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
- ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
- }
+ pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
}
#define R8168_CPCMD_QUIRK_MASK (\
@@ -5405,14 +5386,9 @@ static void rtl_hw_start_8101(struct net_device *dev)
tp->event_slow &= ~RxFIFOOver;
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
- tp->mac_version == RTL_GIGA_MAC_VER_16) {
- int cap = pci_pcie_cap(pdev);
-
- if (cap) {
- pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_NOSNOOP_EN);
- }
- }
+ tp->mac_version == RTL_GIGA_MAC_VER_16)
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
RTL_W8(Cfg9346, Cfg9346_Unlock);
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index df808ac8cb65..6a40dd03a32f 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -99,13 +99,13 @@ typedef enum {
* The SEEQ8005 doesn't like us writing to its registers
* too quickly.
*/
-static inline void ether3_outb(int v, const void __iomem *r)
+static inline void ether3_outb(int v, void __iomem *r)
{
writeb(v, r);
udelay(1);
}
-static inline void ether3_outw(int v, const void __iomem *r)
+static inline void ether3_outw(int v, void __iomem *r)
{
writew(v, r);
udelay(1);
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index fb3cbc27063c..25906c1d1b15 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -34,3 +34,10 @@ config SFC_SRIOV
This enables support for the SFC9000 I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
+config SFC_PTP
+ bool "Solarflare SFC9000-family PTP support"
+ depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
+ default y
+ ---help---
+ This enables support for the Precision Time Protocol (PTP)
+ on SFC9000-family NICs
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index ea1f8db57318..e11f2ecf69d9 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -5,5 +5,6 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
mcdi.o mcdi_phy.o mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
+sfc-$(CONFIG_SFC_PTP) += ptp.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index b26a954c27fc..5400a33f254f 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -120,10 +120,10 @@ typedef union efx_oword {
* [0,high-low), with garbage in bits [high-low+1,...).
*/
#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
- (((low > max) || (high < min)) ? 0 : \
- ((low > min) ? \
- ((native_element) >> (low - min)) : \
- ((native_element) << (min - low))))
+ ((low) > (max) || (high) < (min) ? 0 : \
+ (low) > (min) ? \
+ (native_element) >> ((low) - (min)) : \
+ (native_element) << ((min) - (low)))
/*
* Extract bit field portion [low,high) from the 64-bit little-endian
@@ -142,27 +142,27 @@ typedef union efx_oword {
#define EFX_EXTRACT_OWORD64(oword, low, high) \
((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
- EFX_MASK64(high + 1 - low))
+ EFX_MASK64((high) + 1 - (low)))
#define EFX_EXTRACT_QWORD64(qword, low, high) \
(EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
- EFX_MASK64(high + 1 - low))
+ EFX_MASK64((high) + 1 - (low)))
#define EFX_EXTRACT_OWORD32(oword, low, high) \
((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
- EFX_MASK32(high + 1 - low))
+ EFX_MASK32((high) + 1 - (low)))
#define EFX_EXTRACT_QWORD32(qword, low, high) \
((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
- EFX_MASK32(high + 1 - low))
+ EFX_MASK32((high) + 1 - (low)))
#define EFX_EXTRACT_DWORD(dword, low, high) \
(EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
- EFX_MASK32(high + 1 - low))
+ EFX_MASK32((high) + 1 - (low)))
#define EFX_OWORD_FIELD64(oword, field) \
EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
@@ -442,10 +442,10 @@ typedef union efx_oword {
cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
#define EFX_INPLACE_MASK64(min, max, low, high) \
- EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
+ EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
#define EFX_INPLACE_MASK32(min, max, low, high) \
- EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
+ EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
#define EFX_SET_OWORD64(oword, low, high, value) do { \
(oword).u64[0] = (((oword).u64[0] \
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 65a8d49106a4..96bd980e828d 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
- if ((efx->state == STATE_RUNNING) || \
+ if ((efx->state == STATE_READY) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
+static int efx_check_disabled(struct efx_nic *efx)
+{
+ if (efx->state == STATE_DISABLED) {
+ netif_err(efx, drv, efx->net_dev,
+ "device is disabled due to earlier errors\n");
+ return -EIO;
+ }
+ return 0;
+}
+
/**************************************************************************
*
* Event queue processing
@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_buffer_order = get_order(efx->rx_buffer_len +
sizeof(struct efx_rx_page_state));
+ /* We must keep at least one descriptor in a TX ring empty.
+ * We could avoid this when the queue size does not exactly
+ * match the hardware ring size, but it's not that important.
+ * Therefore we stop the queue when one more skb might fill
+ * the ring completely. We wake it when half way back to
+ * empty.
+ */
+ efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+ efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
@@ -714,6 +734,7 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
+ channel->type->post_remove(channel);
}
static void efx_remove_channels(struct efx_nic *efx)
@@ -730,7 +751,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
unsigned i, next_buffer_table = 0;
- int rc = 0;
+ int rc;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
/* Not all channels should be reallocated. We must avoid
* reallocating their buffer table entries.
@@ -828,6 +853,7 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
static const struct efx_channel_type efx_default_channel_type = {
.pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
.get_name = efx_get_channel_name,
.copy = efx_copy_channel,
.keep_eventq = false,
@@ -838,6 +864,10 @@ int efx_channel_dummy_op_int(struct efx_channel *channel)
return 0;
}
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
/**************************************************************************
*
* Port handling
@@ -1365,6 +1395,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{
struct efx_channel *channel;
+ BUG_ON(efx->state == STATE_DISABLED);
+
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
@@ -1382,6 +1414,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{
struct efx_channel *channel;
+ if (efx->state == STATE_DISABLED)
+ return;
+
efx_mcdi_mode_poll(efx);
efx_nic_disable_interrupts(efx);
@@ -1422,10 +1457,16 @@ static void efx_set_channels(struct efx_nic *efx)
efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
- /* We need to adjust the TX queue numbers if we have separate
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
* RX-only and TX-only channels.
*/
efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->n_rx_channels)
+ channel->rx_queue.core_index = channel->channel;
+ else
+ channel->rx_queue.core_index = -1;
+
efx_for_each_channel_tx_queue(tx_queue, channel)
tx_queue->queue -= (efx->tx_channel_offset *
EFX_TXQ_TYPES);
@@ -1533,22 +1574,21 @@ static int efx_probe_all(struct efx_nic *efx)
return rc;
}
-/* Called after previous invocation(s) of efx_stop_all, restarts the port,
- * kernel transmit queues and NAPI processing, and ensures that the port is
- * scheduled to be reconfigured. This function is safe to call multiple
- * times when the NIC is in any state.
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured. Interrupts must already be enabled. This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
*/
static void efx_start_all(struct efx_nic *efx)
{
EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->state == STATE_DISABLED);
/* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */
- if (efx->port_enabled)
- return;
- if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
- return;
- if (!netif_running(efx->net_dev))
+ if (efx->port_enabled || !netif_running(efx->net_dev))
return;
efx_start_port(efx);
@@ -1582,11 +1622,11 @@ static void efx_flush_all(struct efx_nic *efx)
cancel_work_sync(&efx->mac_work);
}
-/* Quiesce hardware and software without bringing the link down.
- * Safe to call multiple times, when the nic and interface is in any
- * state. The caller is guaranteed to subsequently be in a position
- * to modify any hardware and software state they see fit without
- * taking locks. */
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down. Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled. Requires the RTNL lock.
+ */
static void efx_stop_all(struct efx_nic *efx)
{
EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1739,7 +1779,8 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct efx_nic *efx = netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
- EFX_ASSERT_RESET_SERIALISED(efx);
+ if (cmd == SIOCSHWTSTAMP)
+ return efx_ptp_ioctl(efx, ifr, cmd);
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -1820,13 +1861,14 @@ static void efx_netpoll(struct net_device *net_dev)
static int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_ASSERT_RESET_SERIALISED(efx);
+ int rc;
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
raw_smp_processor_id());
- if (efx->state == STATE_DISABLED)
- return -EIO;
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY;
if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
@@ -1852,10 +1894,8 @@ static int efx_net_stop(struct net_device *net_dev)
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
- if (efx->state != STATE_DISABLED) {
- /* Stop the device and flush all the channels */
- efx_stop_all(efx);
- }
+ /* Stop the device and flush all the channels */
+ efx_stop_all(efx);
return 0;
}
@@ -1915,9 +1955,11 @@ static void efx_watchdog(struct net_device *net_dev)
static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
- EFX_ASSERT_RESET_SERIALISED(efx);
-
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;
@@ -1926,8 +1968,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
mutex_lock(&efx->mac_lock);
- /* Reconfigure the MAC before enabling the dma queues so that
- * the RX buffers don't overflow */
net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
@@ -1942,8 +1982,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
struct sockaddr *addr = data;
char *new_addr = addr->sa_data;
- EFX_ASSERT_RESET_SERIALISED(efx);
-
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
"invalid ethernet MAC address requested: %pM\n",
@@ -2079,11 +2117,27 @@ static int efx_register_netdev(struct efx_nic *efx)
rtnl_lock();
+ /* Enable resets to be scheduled and check whether any were
+ * already requested. If so, the NIC is probably hosed so we
+ * abort.
+ */
+ efx->state = STATE_READY;
+ smp_mb(); /* ensure we change state before checking reset_pending */
+ if (efx->reset_pending) {
+ netif_err(efx, probe, efx->net_dev,
+ "aborting probe due to scheduled reset\n");
+ rc = -EIO;
+ goto fail_locked;
+ }
+
rc = dev_alloc_name(net_dev, net_dev->name);
if (rc < 0)
goto fail_locked;
efx_update_name(efx);
+ /* Always start with carrier off; PHY events will detect the link */
+ netif_carrier_off(net_dev);
+
rc = register_netdevice(net_dev);
if (rc)
goto fail_locked;
@@ -2094,9 +2148,6 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue);
}
- /* Always start with carrier off; PHY events will detect the link */
- netif_carrier_off(net_dev);
-
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2108,14 +2159,14 @@ static int efx_register_netdev(struct efx_nic *efx)
return 0;
+fail_registered:
+ rtnl_lock();
+ unregister_netdevice(net_dev);
fail_locked:
+ efx->state = STATE_UNINIT;
rtnl_unlock();
netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc;
-
-fail_registered:
- unregister_netdev(net_dev);
- return rc;
}
static void efx_unregister_netdev(struct efx_nic *efx)
@@ -2138,7 +2189,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
- unregister_netdev(efx->net_dev);
+
+ rtnl_lock();
+ unregister_netdevice(efx->net_dev);
+ efx->state = STATE_UNINIT;
+ rtnl_unlock();
}
/**************************************************************************
@@ -2154,9 +2209,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
- mutex_lock(&efx->mac_lock);
-
efx_stop_interrupts(efx, false);
+
+ mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx);
efx->type->fini(efx);
@@ -2276,16 +2331,15 @@ static void efx_reset_work(struct work_struct *data)
if (!pending)
return;
- /* If we're not RUNNING then don't reset. Leave the reset_pending
- * flags set so that efx_pci_probe_main will be retried */
- if (efx->state != STATE_RUNNING) {
- netif_info(efx, drv, efx->net_dev,
- "scheduled reset quenched. NIC not RUNNING\n");
- return;
- }
-
rtnl_lock();
- (void)efx_reset(efx, fls(pending) - 1);
+
+ /* We checked the state in efx_schedule_reset() but it may
+ * have changed by now. Now that we have the RTNL lock,
+ * it cannot change again.
+ */
+ if (efx->state == STATE_READY)
+ (void)efx_reset(efx, fls(pending) - 1);
+
rtnl_unlock();
}
@@ -2311,6 +2365,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
}
set_bit(method, &efx->reset_pending);
+ smp_mb(); /* ensure we change reset_pending before checking state */
+
+ /* If we're not READY then just leave the flags set as the cue
+ * to abort probing or reschedule the reset later.
+ */
+ if (ACCESS_ONCE(efx->state) != STATE_READY)
+ return;
/* efx_process_channel() will no longer read events once a
* reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2376,13 +2437,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
-static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
+static int efx_init_struct(struct efx_nic *efx,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
int i;
/* Initialise common structures */
- memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
@@ -2392,7 +2452,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
- efx->state = STATE_INIT;
+ efx->state = STATE_UNINIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
@@ -2409,8 +2469,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
goto fail;
}
- efx->type = type;
-
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */
@@ -2455,6 +2513,12 @@ static void efx_fini_struct(struct efx_nic *efx)
*/
static void efx_pci_remove_main(struct efx_nic *efx)
{
+ /* Flush reset_work. It can no longer be scheduled since we
+ * are not READY.
+ */
+ BUG_ON(efx->state == STATE_READY);
+ cancel_work_sync(&efx->reset_work);
+
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
@@ -2480,24 +2544,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
- efx->state = STATE_FINI;
dev_close(efx->net_dev);
-
- /* Allow any queued efx_resets() to complete */
+ efx_stop_interrupts(efx, false);
rtnl_unlock();
- efx_stop_interrupts(efx, false);
efx_sriov_fini(efx);
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
- /* Wait for any scheduled resets to complete. No more will be
- * scheduled from this point because efx_stop_all() has been
- * called, we are no longer registered with driverlink, and
- * the net_device's have been removed. */
- cancel_work_sync(&efx->reset_work);
-
efx_pci_remove_main(efx);
efx_fini_io(efx);
@@ -2617,7 +2672,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
- const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev;
struct efx_nic *efx;
int rc;
@@ -2627,10 +2681,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
EFX_MAX_RX_QUEUES);
if (!net_dev)
return -ENOMEM;
- net_dev->features |= (type->offload_features | NETIF_F_SG |
+ efx = netdev_priv(net_dev);
+ efx->type = (const struct efx_nic_type *) entry->driver_data;
+ net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO |
NETIF_F_RXCSUM);
- if (type->offload_features & NETIF_F_V6_CSUM)
+ if (efx->type->offload_features & NETIF_F_V6_CSUM)
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
@@ -2638,10 +2694,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
NETIF_F_RXCSUM);
/* All offloads can be toggled */
net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
- efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, type, pci_dev, net_dev);
+ rc = efx_init_struct(efx, pci_dev, net_dev);
if (rc)
goto fail1;
@@ -2656,28 +2711,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
goto fail2;
rc = efx_pci_probe_main(efx);
-
- /* Serialise against efx_reset(). No more resets will be
- * scheduled since efx_stop_all() has been called, and we have
- * not and never have been registered.
- */
- cancel_work_sync(&efx->reset_work);
-
if (rc)
goto fail3;
- /* If there was a scheduled reset during probe, the NIC is
- * probably hosed anyway.
- */
- if (efx->reset_pending) {
- rc = -EIO;
- goto fail4;
- }
-
- /* Switch to the running state before we expose the device to the OS,
- * so that dev_open()|efx_start_all() will actually start the device */
- efx->state = STATE_RUNNING;
-
rc = efx_register_netdev(efx);
if (rc)
goto fail4;
@@ -2717,12 +2753,18 @@ static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
- efx->state = STATE_FINI;
+ rtnl_lock();
- netif_device_detach(efx->net_dev);
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_UNINIT;
- efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ netif_device_detach(efx->net_dev);
+
+ efx_stop_all(efx);
+ efx_stop_interrupts(efx, false);
+ }
+
+ rtnl_unlock();
return 0;
}
@@ -2731,21 +2773,25 @@ static int efx_pm_thaw(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
- efx->state = STATE_INIT;
+ rtnl_lock();
- efx_start_interrupts(efx, false);
+ if (efx->state != STATE_DISABLED) {
+ efx_start_interrupts(efx, false);
- mutex_lock(&efx->mac_lock);
- efx->phy_op->reconfigure(efx);
- mutex_unlock(&efx->mac_lock);
+ mutex_lock(&efx->mac_lock);
+ efx->phy_op->reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
- efx_start_all(efx);
+ efx_start_all(efx);
- netif_device_attach(efx->net_dev);
+ netif_device_attach(efx->net_dev);
- efx->state = STATE_RUNNING;
+ efx->state = STATE_READY;
- efx->type->resume_wol(efx);
+ efx->type->resume_wol(efx);
+ }
+
+ rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
queue_work(reset_workqueue, &efx->reset_work);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 70755c97251a..f11170bc48bf 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -102,6 +102,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
/* Channels */
extern int efx_channel_dummy_op_int(struct efx_channel *channel);
+extern void efx_channel_dummy_op_void(struct efx_channel *channel);
extern void efx_process_channel_now(struct efx_channel *channel);
extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 5faedd855b77..90f078eff8e6 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -337,7 +337,8 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
unsigned int test_index,
struct ethtool_string *strings, u64 *data)
{
- struct efx_channel *channel = efx_get_channel(efx, 0);
+ struct efx_channel *channel =
+ efx_get_channel(efx, efx->tx_channel_offset);
struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -529,9 +530,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (!efx_tests)
goto fail;
-
- ASSERT_RTNL();
- if (efx->state != STATE_RUNNING) {
+ if (efx->state != STATE_READY) {
rc = -EIO;
goto fail1;
}
@@ -962,9 +961,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
int rc;
/* Check that user wants us to choose the location */
- if (rule->location != RX_CLS_LOC_ANY &&
- rule->location != RX_CLS_LOC_FIRST &&
- rule->location != RX_CLS_LOC_LAST)
+ if (rule->location != RX_CLS_LOC_ANY)
return -EINVAL;
/* Range-check ring_cookie */
@@ -978,9 +975,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
rule->m_ext.data[1]))
return -EINVAL;
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
- (rule->location == RX_CLS_LOC_FIRST) ?
- EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ?
0xfff : rule->ring_cookie);
@@ -1176,6 +1171,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_indir = efx_ethtool_get_rxfh_indir,
.set_rxfh_indir = efx_ethtool_set_rxfh_indir,
+ .get_ts_info = efx_ptp_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 8687a6c3db0d..ec1e99d0dcad 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
new_mode = PHY_MODE_SPECIAL;
if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
err = 0;
- } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
+ } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
err = -EBUSY;
} else {
/* Reset the PHY, reconfigure the MAC and enable/disable
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index c3fd61f0a95c..8af42cd1feda 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -162,20 +162,12 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
!!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS));
EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE,
- !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
- EFX_FILTER_FLAG_RX_OVERRIDE_IP));
- EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS));
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE,
- !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
- EFX_FILTER_FLAG_RX_OVERRIDE_IP));
}
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -480,14 +472,12 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
case EFX_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
- EFX_POPULATE_OWORD_8(
+ EFX_POPULATE_OWORD_7(
*filter,
FRF_CZ_RMFT_RSS_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
FRF_CZ_RMFT_SCATTER_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
- FRF_CZ_RMFT_IP_OVERRIDE,
- !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
@@ -567,49 +557,62 @@ static int efx_filter_search(struct efx_filter_table *table,
}
/*
- * Construct/deconstruct external filter IDs. These must be ordered
- * by matching priority, for RX NFC semantics.
+ * Construct/deconstruct external filter IDs. At least the RX filter
+ * IDs must be ordered by matching priority, for RX NFC semantics.
*
- * Each RX MAC filter entry has a flag for whether it can override an
- * RX IP filter that also matches. So we assign locations for MAC
- * filters with overriding behaviour, then for IP filters, then for
- * MAC filters without overriding behaviour.
+ * Deconstruction needs to be robust against invalid IDs so that
+ * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
+ * accept user-provided IDs.
*/
-#define EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP 0
-#define EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP 1
-#define EFX_FILTER_MATCH_PRI_NORMAL_BASE 2
+#define EFX_FILTER_MATCH_PRI_COUNT 5
+
+static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
+ [EFX_FILTER_TCP_FULL] = 0,
+ [EFX_FILTER_UDP_FULL] = 0,
+ [EFX_FILTER_TCP_WILD] = 1,
+ [EFX_FILTER_UDP_WILD] = 1,
+ [EFX_FILTER_MAC_FULL] = 2,
+ [EFX_FILTER_MAC_WILD] = 3,
+ [EFX_FILTER_UC_DEF] = 4,
+ [EFX_FILTER_MC_DEF] = 4,
+};
+
+static const enum efx_filter_table_id efx_filter_range_table[] = {
+ EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
+ EFX_FILTER_TABLE_RX_IP,
+ EFX_FILTER_TABLE_RX_MAC,
+ EFX_FILTER_TABLE_RX_MAC,
+ EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
+ EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
+ EFX_FILTER_TABLE_COUNT, /* invalid */
+ EFX_FILTER_TABLE_TX_MAC,
+ EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
+};
#define EFX_FILTER_INDEX_WIDTH 13
#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
-static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
- unsigned int index, u8 flags)
+static inline u32
+efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
{
- unsigned int match_pri = EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id;
+ unsigned int range;
- if (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) {
- if (table_id == EFX_FILTER_TABLE_RX_MAC)
- match_pri = EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP;
- else if (table_id == EFX_FILTER_TABLE_RX_DEF)
- match_pri = EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP;
- }
+ range = efx_filter_type_match_pri[spec->type];
+ if (!(spec->flags & EFX_FILTER_FLAG_RX))
+ range += EFX_FILTER_MATCH_PRI_COUNT;
- return match_pri << EFX_FILTER_INDEX_WIDTH | index;
+ return range << EFX_FILTER_INDEX_WIDTH | index;
}
static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
{
- unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
+ unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
- switch (match_pri) {
- case EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP:
- return EFX_FILTER_TABLE_RX_MAC;
- case EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP:
- return EFX_FILTER_TABLE_RX_DEF;
- default:
- return match_pri - EFX_FILTER_MATCH_PRI_NORMAL_BASE;
- }
+ if (range < ARRAY_SIZE(efx_filter_range_table))
+ return efx_filter_range_table[range];
+ else
+ return EFX_FILTER_TABLE_COUNT; /* invalid */
}
static inline unsigned int efx_filter_id_index(u32 id)
@@ -619,12 +622,9 @@ static inline unsigned int efx_filter_id_index(u32 id)
static inline u8 efx_filter_id_flags(u32 id)
{
- unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
+ unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
- if (match_pri < EFX_FILTER_MATCH_PRI_NORMAL_BASE)
- return EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP;
- else if (match_pri <=
- EFX_FILTER_MATCH_PRI_NORMAL_BASE + EFX_FILTER_TABLE_RX_DEF)
+ if (range < EFX_FILTER_MATCH_PRI_COUNT)
return EFX_FILTER_FLAG_RX;
else
return EFX_FILTER_FLAG_TX;
@@ -633,14 +633,15 @@ static inline u8 efx_filter_id_flags(u32 id)
u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
- unsigned int table_id = EFX_FILTER_TABLE_RX_DEF;
+ unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
+ enum efx_filter_table_id table_id;
do {
+ table_id = efx_filter_range_table[range];
if (state->table[table_id].size != 0)
- return ((EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id)
- << EFX_FILTER_INDEX_WIDTH) +
+ return range << EFX_FILTER_INDEX_WIDTH |
state->table[table_id].size;
- } while (table_id--);
+ } while (range--);
return 0;
}
@@ -718,7 +719,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
__func__, spec->type, filter_idx, spec->dmaq_id);
- rc = efx_filter_make_id(table->id, filter_idx, spec->flags);
+ rc = efx_filter_make_id(spec, filter_idx);
out:
spin_unlock_bh(&state->lock);
@@ -781,8 +782,7 @@ int efx_filter_remove_id_safe(struct efx_nic *efx,
spin_lock_bh(&state->lock);
if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority &&
- !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
+ spec->priority == priority) {
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
efx_filter_table_reset_search_depth(table);
@@ -833,8 +833,7 @@ int efx_filter_get_filter_safe(struct efx_nic *efx,
spin_lock_bh(&state->lock);
if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority &&
- !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
+ spec->priority == priority) {
*spec_buf = *spec;
rc = 0;
} else {
@@ -927,8 +926,7 @@ s32 efx_filter_get_rx_ids(struct efx_nic *efx,
goto out;
}
buf[count++] = efx_filter_make_id(
- table_id, filter_idx,
- table->spec[filter_idx].flags);
+ &table->spec[filter_idx], filter_idx);
}
}
}
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3c77802aed6c..5cb54723b824 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -61,16 +61,12 @@ enum efx_filter_priority {
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
- * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
- * any IP filter that matches the same packet. By default, IP
- * filters take precedence.
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
- EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
@@ -88,8 +84,7 @@ enum efx_filter_flags {
*
* The @priority field is used by software to determine whether a new
* filter may replace an old one. The hardware priority of a filter
- * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP
- * flag.
+ * depends on the filter type.
*/
struct efx_filter_spec {
u8 type:4;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index fc5e7bbcbc9e..aea43cbd0520 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -320,14 +320,20 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
efx_mcdi_complete(mcdi);
}
-/* Issue the given command by writing the data into the shared memory PDU,
- * ring the doorbell and wait for completion. Copyout the result. */
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
size_t *outlen_actual)
{
+ efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ return efx_mcdi_rpc_finish(efx, cmd, inlen,
+ outbuf, outlen, outlen_actual);
+}
+
+void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
+ size_t inlen)
+{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- int rc;
+
BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
efx_mcdi_acquire(mcdi);
@@ -338,6 +344,15 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
spin_unlock_bh(&mcdi->iface_lock);
efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+}
+
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ u8 *outbuf, size_t outlen, size_t *outlen_actual)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
+
+ BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
if (mcdi->mode == MCDI_MODE_POLL)
rc = efx_mcdi_poll(efx);
@@ -563,6 +578,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_FLR:
efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
break;
+ case MCDI_EVENT_CODE_PTP_RX:
+ case MCDI_EVENT_CODE_PTP_FAULT:
+ case MCDI_EVENT_CODE_PTP_PPS:
+ efx_ptp_event(efx, event);
+ break;
default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
@@ -641,9 +661,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities)
{
uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
- size_t outlen;
+ size_t outlen, offset, i;
int port_num = efx_port_num(efx);
- int offset;
int rc;
BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
@@ -663,11 +682,18 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
: MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
if (mac_address)
memcpy(mac_address, outbuf + offset, ETH_ALEN);
- if (fw_subtype_list)
- memcpy(fw_subtype_list,
- outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM *
- sizeof(fw_subtype_list[0]));
+ if (fw_subtype_list) {
+ /* Byte-swap and truncate or zero-pad as necessary */
+ offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
+ for (i = 0;
+ i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM;
+ i++) {
+ fw_subtype_list[i] =
+ (offset + 2 <= outlen) ?
+ le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
+ offset += 2;
+ }
+ }
if (capabilities) {
if (port_num)
*capabilities = MCDI_DWORD(outbuf,
@@ -1169,6 +1195,9 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
__le32 *qid;
int rc, count;
+ BUILD_BUG_ON(EFX_MAX_CHANNELS >
+ MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
if (qid == NULL)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 0bdf3e331832..3ba2e5b5a9cc 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -71,6 +71,12 @@ extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
size_t inlen, u8 *outbuf, size_t outlen,
size_t *outlen_actual);
+extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const u8 *inbuf, size_t inlen);
+extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ u8 *outbuf, size_t outlen,
+ size_t *outlen_actual);
+
extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
extern void efx_mcdi_mode_poll(struct efx_nic *efx);
extern void efx_mcdi_mode_event(struct efx_nic *efx);
@@ -107,11 +113,13 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
- EFX_DWORD_FIELD( \
+ EFX_EXTRACT_DWORD( \
*((efx_dword_t *) \
(MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
(MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2)
+ MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
+ (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
+ MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index db4beed97669..9d426d0457bd 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -289,6 +289,7 @@
#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
@@ -491,12 +492,12 @@
/* MC_CMD_GET_FPGAREG_OUT msgresponse */
#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
-#define MC_CMD_GET_FPGAREG_OUT_LENMAX 255
+#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252
#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 255
+#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
/***********************************/
@@ -507,13 +508,13 @@
/* MC_CMD_PUT_FPGAREG_IN msgrequest */
#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
-#define MC_CMD_PUT_FPGAREG_IN_LENMAX 255
+#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252
#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 251
+#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
@@ -560,7 +561,7 @@
/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
-#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 255
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
@@ -568,7 +569,7 @@
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
-#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 243
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
@@ -1145,7 +1146,7 @@
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
-#define MC_CMD_PUTS_IN_LENMAX 255
+#define MC_CMD_PUTS_IN_LENMAX 252
#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
#define MC_CMD_PUTS_IN_DEST_OFST 0
#define MC_CMD_PUTS_IN_UART_LBN 0
@@ -1157,7 +1158,7 @@
#define MC_CMD_PUTS_IN_STRING_OFST 12
#define MC_CMD_PUTS_IN_STRING_LEN 1
#define MC_CMD_PUTS_IN_STRING_MINNUM 1
-#define MC_CMD_PUTS_IN_STRING_MAXNUM 243
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
/* MC_CMD_PUTS_OUT msgresponse */
#define MC_CMD_PUTS_OUT_LEN 0
@@ -1947,12 +1948,12 @@
/* MC_CMD_NVRAM_READ_OUT msgresponse */
#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
-#define MC_CMD_NVRAM_READ_OUT_LENMAX 255
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
-#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
/***********************************/
@@ -1963,7 +1964,7 @@
/* MC_CMD_NVRAM_WRITE_IN msgrequest */
#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
-#define MC_CMD_NVRAM_WRITE_IN_LENMAX 255
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
/* Enum values, see field(s): */
@@ -1973,7 +1974,7 @@
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
-#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
@@ -2305,13 +2306,13 @@
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
-#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
-#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
/***********************************/
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 758148379b0e..08f825b71ac8 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -585,6 +585,7 @@ static const struct siena_nvram_type_info siena_nvram_types[] = {
[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
[MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
[MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
};
static int siena_mtd_probe_partition(struct efx_nic *efx,
@@ -598,7 +599,8 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
bool protected;
int rc;
- if (type >= ARRAY_SIZE(siena_nvram_types))
+ if (type >= ARRAY_SIZE(siena_nvram_types) ||
+ siena_nvram_types[type].name == NULL)
return -ENODEV;
info = &siena_nvram_types[type];
@@ -627,7 +629,8 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
struct efx_mtd *efx_mtd)
{
struct efx_mtd_partition *part;
- uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM];
+ uint16_t fw_subtype_list[
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
int rc;
rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index cd9c0a989692..c1a010cda89b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -37,7 +37,7 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "3.1"
+#define EFX_DRIVER_VERSION "3.2"
#ifdef DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -56,7 +56,8 @@
#define EFX_MAX_CHANNELS 32U
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
#define EFX_EXTRA_CHANNEL_IOV 0
-#define EFX_MAX_EXTRA_CHANNELS 1U
+#define EFX_EXTRA_CHANNEL_PTP 1
+#define EFX_MAX_EXTRA_CHANNELS 2U
/* Checksum generation is a per-queue option in hardware, so each
* queue visible to the networking core is backed by two hardware TX
@@ -68,6 +69,9 @@
#define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+/* Forward declare Precision Time Protocol (PTP) support structure. */
+struct efx_ptp_data;
+
struct efx_self_tests;
/**
@@ -91,29 +95,31 @@ struct efx_special_buffer {
};
/**
- * struct efx_tx_buffer - An Efx TX buffer
- * @skb: The associated socket buffer.
- * Set only on the final fragment of a packet; %NULL for all other
- * fragments. When this fragment completes, then we can free this
- * skb.
- * @tsoh: The associated TSO header structure, or %NULL if this
- * buffer is not a TSO header.
+ * struct efx_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
+ * freed when descriptor completes
+ * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
+ * freed when descriptor completes.
* @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
- * @continuation: True if this fragment is not the end of a packet.
- * @unmap_single: True if dma_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap
*/
struct efx_tx_buffer {
- const struct sk_buff *skb;
- struct efx_tso_header *tsoh;
+ union {
+ const struct sk_buff *skb;
+ void *heap_buf;
+ };
dma_addr_t dma_addr;
+ unsigned short flags;
unsigned short len;
- bool continuation;
- bool unmap_single;
unsigned short unmap_len;
};
+#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
+#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
+#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
+#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -133,6 +139,7 @@ struct efx_tx_buffer {
* @channel: The associated channel
* @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
+ * @tsoh_page: Array of pages of TSO header buffers
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
* @initialised: Has hardware queue been initialised?
@@ -156,9 +163,6 @@ struct efx_tx_buffer {
* variable indicates that the queue is full. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
- * @tso_headers_free: A list of TSO headers allocated for this TX queue
- * that are not in use, and so available for new TSO sends. The list
- * is protected by the TX queue lock.
* @tso_bursts: Number of times TSO xmit invoked by kernel
* @tso_long_headers: Number of packets with headers too long for standard
* blocks
@@ -175,6 +179,7 @@ struct efx_tx_queue {
struct efx_channel *channel;
struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer;
+ struct efx_buffer *tsoh_page;
struct efx_special_buffer txd;
unsigned int ptr_mask;
bool initialised;
@@ -187,7 +192,6 @@ struct efx_tx_queue {
unsigned int insert_count ____cacheline_aligned_in_smp;
unsigned int write_count;
unsigned int old_read_count;
- struct efx_tso_header *tso_headers_free;
unsigned int tso_bursts;
unsigned int tso_long_headers;
unsigned int tso_packets;
@@ -242,6 +246,8 @@ struct efx_rx_page_state {
/**
* struct efx_rx_queue - An Efx RX queue
* @efx: The associated Efx NIC
+ * @core_index: Index of network core RX queue. Will be >= 0 iff this
+ * is associated with a real RX queue.
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
@@ -263,6 +269,7 @@ struct efx_rx_page_state {
*/
struct efx_rx_queue {
struct efx_nic *efx;
+ int core_index;
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
unsigned int ptr_mask;
@@ -390,14 +397,17 @@ struct efx_channel {
* @get_name: Generate the channel's name (used for its IRQ handler)
* @copy: Copy the channel state prior to reallocation. May be %NULL if
* reallocation is not supported.
+ * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
* @keep_eventq: Flag for whether event queue should be kept initialised
* while the device is stopped
*/
struct efx_channel_type {
void (*handle_no_channel)(struct efx_nic *);
int (*pre_probe)(struct efx_channel *);
+ void (*post_remove)(struct efx_channel *);
void (*get_name)(struct efx_channel *, char *buf, size_t len);
struct efx_channel *(*copy)(const struct efx_channel *);
+ void (*receive_skb)(struct efx_channel *, struct sk_buff *);
bool keep_eventq;
};
@@ -430,11 +440,9 @@ enum efx_int_mode {
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
enum nic_state {
- STATE_INIT = 0,
- STATE_RUNNING = 1,
- STATE_FINI = 2,
- STATE_DISABLED = 3,
- STATE_MAX,
+ STATE_UNINIT = 0, /* device being probed/removed or is frozen */
+ STATE_READY = 1, /* hardware ready and netdev registered */
+ STATE_DISABLED = 2, /* device disabled due to hardware errors */
};
/*
@@ -654,7 +662,7 @@ struct vfdi_status;
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irq_rx_moderation: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
- * @state: Device state flag. Serialised by the rtnl_lock.
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
* @reset_pending: Bitmask for pending resets
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
@@ -664,6 +672,8 @@ struct vfdi_status;
* should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
* @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
* @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
* @sram_lim_qw: Qword address limit of SRAM
@@ -730,6 +740,7 @@ struct vfdi_status;
* %local_addr_list. Protected by %local_lock.
* @local_lock: Mutex protecting %local_addr_list and %local_page_list.
* @peer_work: Work item to broadcast peer addresses to VMs.
+ * @ptp_data: PTP state data
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -774,6 +785,9 @@ struct efx_nic {
unsigned rxq_entries;
unsigned txq_entries;
+ unsigned int txq_stop_thresh;
+ unsigned int txq_wake_thresh;
+
unsigned tx_dc_base;
unsigned rx_dc_base;
unsigned sram_lim_qw;
@@ -854,6 +868,10 @@ struct efx_nic {
struct work_struct peer_work;
#endif
+#ifdef CONFIG_SFC_PTP
+ struct efx_ptp_data *ptp_data;
+#endif
+
/* The following fields may be written more often */
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -1044,7 +1062,7 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
{
- return channel->channel < channel->efx->n_rx_channels;
+ return channel->rx_queue.core_index >= 0;
}
static inline struct efx_rx_queue *
@@ -1116,5 +1134,13 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
#define EFX_MAX_FRAME_LEN(mtu) \
((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
+static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+}
+static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
+{
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 326d799762d6..cdff40b65729 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
/**************************************************************************
*
* Generic buffer handling
- * These buffers are used for interrupt status and MAC stats
+ * These buffers are used for interrupt status, MAC stats, etc.
*
**************************************************************************/
@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
++tx_queue->write_count;
/* Create TX descriptor ring entry */
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
EFX_POPULATE_QWORD_4(*txd,
- FSF_AZ_TX_KER_CONT, buffer->continuation,
+ FSF_AZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
FSF_AZ_TX_KER_BUF_REGION, 0,
FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index bab5cd9f5740..438cef11f727 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -11,6 +11,7 @@
#ifndef EFX_NIC_H
#define EFX_NIC_H
+#include <linux/net_tstamp.h>
#include <linux/i2c-algo-bit.h>
#include "net_driver.h"
#include "efx.h"
@@ -250,6 +251,41 @@ extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
bool spoofchk);
+struct ethtool_ts_info;
+#ifdef CONFIG_SFC_PTP
+extern void efx_ptp_probe(struct efx_nic *efx);
+extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+extern int efx_ptp_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *ts_info);
+extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+#else
+static inline void efx_ptp_probe(struct efx_nic *efx) {}
+static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *ts_info)
+{
+ ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE);
+ ts_info->phc_index = -1;
+
+ return 0;
+}
+static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ return false;
+}
+static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ return NETDEV_TX_OK;
+}
+static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
+#endif
+
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
new file mode 100644
index 000000000000..5b3dd028ce85
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -0,0 +1,1484 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Theory of operation:
+ *
+ * PTP support is assisted by firmware running on the MC, which provides
+ * the hardware timestamping capabilities. Both transmitted and received
+ * PTP event packets are queued onto internal queues for subsequent processing;
+ * this is because the MC operations are relatively long and would block
+ * block NAPI/interrupt operation.
+ *
+ * Receive event processing:
+ * The event contains the packet's UUID and sequence number, together
+ * with the hardware timestamp. The PTP receive packet queue is searched
+ * for this UUID/sequence number and, if found, put on a pending queue.
+ * Packets not matching are delivered without timestamps (MCDI events will
+ * always arrive after the actual packet).
+ * It is important for the operation of the PTP protocol that the ordering
+ * of packets between the event and general port is maintained.
+ *
+ * Work queue processing:
+ * If work waiting, synchronise host/hardware time
+ *
+ * Transmit: send packet through MC, which returns the transmission time
+ * that is converted to an appropriate timestamp.
+ *
+ * Receive: the packet's reception time is converted to an appropriate
+ * timestamp.
+ */
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/pps_kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "io.h"
+#include "regs.h"
+#include "nic.h"
+
+/* Maximum number of events expected to make up a PTP event */
+#define MAX_EVENT_FRAGS 3
+
+/* Maximum delay, ms, to begin synchronisation */
+#define MAX_SYNCHRONISE_WAIT_MS 2
+
+/* How long, at most, to spend synchronising */
+#define SYNCHRONISE_PERIOD_NS 250000
+
+/* How often to update the shared memory time */
+#define SYNCHRONISATION_GRANULARITY_NS 200
+
+/* Minimum permitted length of a (corrected) synchronisation time */
+#define MIN_SYNCHRONISATION_NS 120
+
+/* Maximum permitted length of a (corrected) synchronisation time */
+#define MAX_SYNCHRONISATION_NS 1000
+
+/* How many (MC) receive events that can be queued */
+#define MAX_RECEIVE_EVENTS 8
+
+/* Length of (modified) moving average. */
+#define AVERAGE_LENGTH 16
+
+/* How long an unmatched event or packet can be held */
+#define PKT_EVENT_LIFETIME_MS 10
+
+/* Offsets into PTP packet for identification. These offsets are from the
+ * start of the IP header, not the MAC header. Note that neither PTP V1 nor
+ * PTP V2 permit the use of IPV4 options.
+ */
+#define PTP_DPORT_OFFSET 22
+
+#define PTP_V1_VERSION_LENGTH 2
+#define PTP_V1_VERSION_OFFSET 28
+
+#define PTP_V1_UUID_LENGTH 6
+#define PTP_V1_UUID_OFFSET 50
+
+#define PTP_V1_SEQUENCE_LENGTH 2
+#define PTP_V1_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V1 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define PTP_V1_MIN_LENGTH 64
+
+#define PTP_V2_VERSION_LENGTH 1
+#define PTP_V2_VERSION_OFFSET 29
+
+/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
+ * the MC only captures the last six bytes of the clock identity. These values
+ * reflect those, not the ones used in the standard. The standard permits
+ * mapping of V1 UUIDs to V2 UUIDs with these same values.
+ */
+#define PTP_V2_MC_UUID_LENGTH 6
+#define PTP_V2_MC_UUID_OFFSET 50
+
+#define PTP_V2_SEQUENCE_LENGTH 2
+#define PTP_V2_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V2 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define PTP_V2_MIN_LENGTH 63
+
+#define PTP_MIN_LENGTH 63
+
+#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */
+#define PTP_EVENT_PORT 319
+#define PTP_GENERAL_PORT 320
+
+/* Annoyingly the format of the version numbers are different between
+ * versions 1 and 2 so it isn't possible to simply look for 1 or 2.
+ */
+#define PTP_VERSION_V1 1
+
+#define PTP_VERSION_V2 2
+#define PTP_VERSION_V2_MASK 0x0f
+
+enum ptp_packet_state {
+ PTP_PACKET_STATE_UNMATCHED = 0,
+ PTP_PACKET_STATE_MATCHED,
+ PTP_PACKET_STATE_TIMED_OUT,
+ PTP_PACKET_STATE_MATCH_UNWANTED
+};
+
+/* NIC synchronised with single word of time only comprising
+ * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds.
+ */
+#define MC_NANOSECOND_BITS 30
+#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1)
+#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1)
+
+/* Maximum parts-per-billion adjustment that is acceptable */
+#define MAX_PPB 1000000
+
+/* Number of bits required to hold the above */
+#define MAX_PPB_BITS 20
+
+/* Number of extra bits allowed when calculating fractional ns.
+ * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
+ * be less than 63.
+ */
+#define PPB_EXTRA_BITS 2
+
+/* Precalculate scale word to avoid long long division at runtime */
+#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
+ MAX_PPB_BITS)) / 1000000000LL)
+
+#define PTP_SYNC_ATTEMPTS 4
+
+/**
+ * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area.
+ * @words: UUID and (partial) sequence number
+ * @expiry: Time after which the packet should be delivered irrespective of
+ * event arrival.
+ * @state: The state of the packet - whether it is ready for processing or
+ * whether that is of no interest.
+ */
+struct efx_ptp_match {
+ u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)];
+ unsigned long expiry;
+ enum ptp_packet_state state;
+};
+
+/**
+ * struct efx_ptp_event_rx - A PTP receive event (from MC)
+ * @seq0: First part of (PTP) UUID
+ * @seq1: Second part of (PTP) UUID and sequence number
+ * @hwtimestamp: Event timestamp
+ */
+struct efx_ptp_event_rx {
+ struct list_head link;
+ u32 seq0;
+ u32 seq1;
+ ktime_t hwtimestamp;
+ unsigned long expiry;
+};
+
+/**
+ * struct efx_ptp_timeset - Synchronisation between host and MC
+ * @host_start: Host time immediately before hardware timestamp taken
+ * @seconds: Hardware timestamp, seconds
+ * @nanoseconds: Hardware timestamp, nanoseconds
+ * @host_end: Host time immediately after hardware timestamp taken
+ * @waitns: Number of nanoseconds between hardware timestamp being read and
+ * host end time being seen
+ * @window: Difference of host_end and host_start
+ * @valid: Whether this timeset is valid
+ */
+struct efx_ptp_timeset {
+ u32 host_start;
+ u32 seconds;
+ u32 nanoseconds;
+ u32 host_end;
+ u32 waitns;
+ u32 window; /* Derived: end - start, allowing for wrap */
+};
+
+/**
+ * struct efx_ptp_data - Precision Time Protocol (PTP) state
+ * @channel: The PTP channel
+ * @rxq: Receive queue (awaiting timestamps)
+ * @txq: Transmit queue
+ * @evt_list: List of MC receive events awaiting packets
+ * @evt_free_list: List of free events
+ * @evt_lock: Lock for manipulating evt_list and evt_free_list
+ * @rx_evts: Instantiated events (on evt_list and evt_free_list)
+ * @workwq: Work queue for processing pending PTP operations
+ * @work: Work task
+ * @reset_required: A serious error has occurred and the PTP task needs to be
+ * reset (disable, enable).
+ * @rxfilter_event: Receive filter when operating
+ * @rxfilter_general: Receive filter when operating
+ * @config: Current timestamp configuration
+ * @enabled: PTP operation enabled
+ * @mode: Mode in which PTP operating (PTP version)
+ * @evt_frags: Partly assembled PTP events
+ * @evt_frag_idx: Current fragment number
+ * @evt_code: Last event code
+ * @start: Address at which MC indicates ready for synchronisation
+ * @host_time_pps: Host time at last PPS
+ * @last_sync_ns: Last number of nanoseconds between readings when synchronising
+ * @base_sync_ns: Number of nanoseconds for last synchronisation.
+ * @base_sync_valid: Whether base_sync_time is valid.
+ * @current_adjfreq: Current ppb adjustment.
+ * @phc_clock: Pointer to registered phc device
+ * @phc_clock_info: Registration structure for phc device
+ * @pps_work: pps work task for handling pps events
+ * @pps_workwq: pps work queue
+ * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
+ * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
+ * allocations in main data path).
+ * @debug_ptp_dir: PTP debugfs directory
+ * @missed_rx_sync: Number of packets received without syncrhonisation.
+ * @good_syncs: Number of successful synchronisations.
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @bad_sync_durations: Number of synchronisations with bad durations.
+ * @bad_syncs: Number of failed synchronisations.
+ * @last_sync_time: Number of nanoseconds for last synchronisation.
+ * @sync_timeouts: Number of synchronisation timeouts
+ * @fast_syncs: Number of synchronisations requiring short delay
+ * @min_sync_delta: Minimum time between event and synchronisation
+ * @max_sync_delta: Maximum time between event and synchronisation
+ * @average_sync_delta: Average time between event and synchronisation.
+ * Modified moving average.
+ * @last_sync_delta: Last time between event and synchronisation
+ * @mc_stats: Context value for MC statistics
+ * @timeset: Last set of synchronisation statistics.
+ */
+struct efx_ptp_data {
+ struct efx_channel *channel;
+ struct sk_buff_head rxq;
+ struct sk_buff_head txq;
+ struct list_head evt_list;
+ struct list_head evt_free_list;
+ spinlock_t evt_lock;
+ struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
+ struct workqueue_struct *workwq;
+ struct work_struct work;
+ bool reset_required;
+ u32 rxfilter_event;
+ u32 rxfilter_general;
+ bool rxfilter_installed;
+ struct hwtstamp_config config;
+ bool enabled;
+ unsigned int mode;
+ efx_qword_t evt_frags[MAX_EVENT_FRAGS];
+ int evt_frag_idx;
+ int evt_code;
+ struct efx_buffer start;
+ struct pps_event_time host_time_pps;
+ unsigned last_sync_ns;
+ unsigned base_sync_ns;
+ bool base_sync_valid;
+ s64 current_adjfreq;
+ struct ptp_clock *phc_clock;
+ struct ptp_clock_info phc_clock_info;
+ struct work_struct pps_work;
+ struct workqueue_struct *pps_workwq;
+ bool nic_ts_enabled;
+ u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(
+ MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
+ struct efx_ptp_timeset
+ timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+};
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts);
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec *e_ts);
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request, int on);
+
+/* Enable MCDI PTP support. */
+static int efx_ptp_enable(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN];
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
+ efx->ptp_data->channel->channel);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
+
+ return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+/* Disable MCDI PTP support.
+ *
+ * Note that this function should never rely on the presence of ptp_data -
+ * may be called before that exists.
+ */
+static int efx_ptp_disable(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN];
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+ return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(q))) {
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+ }
+}
+
+static void efx_ptp_handle_no_channel(struct efx_nic *efx)
+{
+ netif_err(efx, drv, efx->net_dev,
+ "ERROR: PTP requires MSI-X and 1 additional interrupt"
+ "vector. PTP disabled\n");
+}
+
+/* Repeatedly send the host time to the MC which will capture the hardware
+ * time.
+ */
+static void efx_ptp_send_times(struct efx_nic *efx,
+ struct pps_event_time *last_time)
+{
+ struct pps_event_time now;
+ struct timespec limit;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct timespec start;
+ int *mc_running = ptp->start.addr;
+
+ pps_get_ts(&now);
+ start = now.ts_real;
+ limit = now.ts_real;
+ timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
+
+ /* Write host time for specified period or until MC is done */
+ while ((timespec_compare(&now.ts_real, &limit) < 0) &&
+ ACCESS_ONCE(*mc_running)) {
+ struct timespec update_time;
+ unsigned int host_time;
+
+ /* Don't update continuously to avoid saturating the PCIe bus */
+ update_time = now.ts_real;
+ timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
+ do {
+ pps_get_ts(&now);
+ } while ((timespec_compare(&now.ts_real, &update_time) < 0) &&
+ ACCESS_ONCE(*mc_running));
+
+ /* Synchronise NIC with single word of time only */
+ host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
+ now.ts_real.tv_nsec);
+ /* Update host time in NIC memory */
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+ }
+ *last_time = now;
+}
+
+/* Read a timeset from the MC's results and partial process. */
+static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
+{
+ unsigned start_ns, end_ns;
+
+ timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
+ timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS);
+ timeset->nanoseconds = MCDI_DWORD(data,
+ PTP_OUT_SYNCHRONIZE_NANOSECONDS);
+ timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+ timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+
+ /* Ignore seconds */
+ start_ns = timeset->host_start & MC_NANOSECOND_MASK;
+ end_ns = timeset->host_end & MC_NANOSECOND_MASK;
+ /* Allow for rollover */
+ if (end_ns < start_ns)
+ end_ns += NSEC_PER_SEC;
+ /* Determine duration of operation */
+ timeset->window = end_ns - start_ns;
+}
+
+/* Process times received from MC.
+ *
+ * Extract times from returned results, and establish the minimum value
+ * seen. The minimum value represents the "best" possible time and events
+ * too much greater than this are rejected - the machine is, perhaps, too
+ * busy. A number of readings are taken so that, hopefully, at least one good
+ * synchronisation will be seen in the results.
+ */
+static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
+ size_t response_length,
+ const struct pps_event_time *last_time)
+{
+ unsigned number_readings = (response_length /
+ MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
+ unsigned i;
+ unsigned min;
+ unsigned min_set = 0;
+ unsigned total;
+ unsigned ngood = 0;
+ unsigned last_good = 0;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ bool min_valid = false;
+ u32 last_sec;
+ u32 start_sec;
+ struct timespec delta;
+
+ if (number_readings == 0)
+ return -EAGAIN;
+
+ /* Find minimum value in this set of results, discarding clearly
+ * erroneous results.
+ */
+ for (i = 0; i < number_readings; i++) {
+ efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
+ synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
+ if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
+ if (min_valid) {
+ if (ptp->timeset[i].window < min_set)
+ min_set = ptp->timeset[i].window;
+ } else {
+ min_valid = true;
+ min_set = ptp->timeset[i].window;
+ }
+ }
+ }
+
+ if (min_valid) {
+ if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
+ min = ptp->base_sync_ns;
+ else
+ min = min_set;
+ } else {
+ min = SYNCHRONISATION_GRANULARITY_NS;
+ }
+
+ /* Discard excessively long synchronise durations. The MC times
+ * when it finishes reading the host time so the corrected window
+ * time should be fairly constant for a given platform.
+ */
+ total = 0;
+ for (i = 0; i < number_readings; i++)
+ if (ptp->timeset[i].window > ptp->timeset[i].waitns) {
+ unsigned win;
+
+ win = ptp->timeset[i].window - ptp->timeset[i].waitns;
+ if (win >= MIN_SYNCHRONISATION_NS &&
+ win < MAX_SYNCHRONISATION_NS) {
+ total += ptp->timeset[i].window;
+ ngood++;
+ last_good = i;
+ }
+ }
+
+ if (ngood == 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "PTP no suitable synchronisations %dns %dns\n",
+ ptp->base_sync_ns, min_set);
+ return -EAGAIN;
+ }
+
+ /* Average minimum this synchronisation */
+ ptp->last_sync_ns = DIV_ROUND_UP(total, ngood);
+ if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) {
+ ptp->base_sync_valid = true;
+ ptp->base_sync_ns = ptp->last_sync_ns;
+ }
+
+ /* Calculate delay from actual PPS to last_time */
+ delta.tv_nsec =
+ ptp->timeset[last_good].nanoseconds +
+ last_time->ts_real.tv_nsec -
+ (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+ /* It is possible that the seconds rolled over between taking
+ * the start reading and the last value written by the host. The
+ * timescales are such that a gap of more than one second is never
+ * expected.
+ */
+ start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
+ last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
+ if (start_sec != last_sec) {
+ if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP bad synchronisation seconds\n");
+ return -EAGAIN;
+ } else {
+ delta.tv_sec = 1;
+ }
+ } else {
+ delta.tv_sec = 0;
+ }
+
+ ptp->host_time_pps = *last_time;
+ pps_sub_ts(&ptp->host_time_pps, delta);
+
+ return 0;
+}
+
+/* Synchronize times between the host and the MC */
+static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX];
+ size_t response_length;
+ int rc;
+ unsigned long timeout;
+ struct pps_event_time last_time = {};
+ unsigned int loops = 0;
+ int *start = ptp->start.addr;
+
+ MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
+ num_readings);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO,
+ (u32)ptp->start.dma_addr);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
+ (u32)((u64)ptp->start.dma_addr >> 32));
+
+ /* Clear flag that signals MC ready */
+ ACCESS_ONCE(*start) = 0;
+ efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+
+ /* Wait for start from MCDI (or timeout) */
+ timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
+ while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
+ udelay(20); /* Usually start MCDI execution quickly */
+ loops++;
+ }
+
+ if (ACCESS_ONCE(*start))
+ efx_ptp_send_times(efx, &last_time);
+
+ /* Collect results */
+ rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
+ synch_buf, sizeof(synch_buf),
+ &response_length);
+ if (rc == 0)
+ rc = efx_ptp_process_times(efx, synch_buf, response_length,
+ &last_time);
+
+ return rc;
+}
+
+/* Transmit a PTP packet, via the MCDI interface, to the wire. */
+static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
+{
+ u8 *txbuf = efx->ptp_data->txbuf;
+ struct skb_shared_hwtstamps timestamps;
+ int rc = -EIO;
+ /* MCDI driver requires word aligned lengths */
+ size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4);
+ u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
+
+ MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+ MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ rc = skb_linearize(skb);
+ if (rc != 0)
+ goto fail;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ rc = skb_checksum_help(skb);
+ if (rc != 0)
+ goto fail;
+ }
+ skb_copy_from_linear_data(skb,
+ &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST],
+ len);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime,
+ sizeof(txtime), &len);
+ if (rc != 0)
+ goto fail;
+
+ memset(&timestamps, 0, sizeof(timestamps));
+ timestamps.hwtstamp = ktime_set(
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS),
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS));
+
+ skb_tstamp_tx(skb, &timestamps);
+
+ rc = 0;
+
+fail:
+ dev_kfree_skb(skb);
+
+ return rc;
+}
+
+static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct list_head *cursor;
+ struct list_head *next;
+
+ /* Drop time-expired events */
+ spin_lock_bh(&ptp->evt_lock);
+ if (!list_empty(&ptp->evt_list)) {
+ list_for_each_safe(cursor, next, &ptp->evt_list) {
+ struct efx_ptp_event_rx *evt;
+
+ evt = list_entry(cursor, struct efx_ptp_event_rx,
+ link);
+ if (time_after(jiffies, evt->expiry)) {
+ list_del(&evt->link);
+ list_add(&evt->link, &ptp->evt_free_list);
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP rx event dropped\n");
+ }
+ }
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+}
+
+static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
+ struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ bool evts_waiting;
+ struct list_head *cursor;
+ struct list_head *next;
+ struct efx_ptp_match *match;
+ enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+
+ spin_lock_bh(&ptp->evt_lock);
+ evts_waiting = !list_empty(&ptp->evt_list);
+ spin_unlock_bh(&ptp->evt_lock);
+
+ if (!evts_waiting)
+ return PTP_PACKET_STATE_UNMATCHED;
+
+ match = (struct efx_ptp_match *)skb->cb;
+ /* Look for a matching timestamp in the event queue */
+ spin_lock_bh(&ptp->evt_lock);
+ list_for_each_safe(cursor, next, &ptp->evt_list) {
+ struct efx_ptp_event_rx *evt;
+
+ evt = list_entry(cursor, struct efx_ptp_event_rx, link);
+ if ((evt->seq0 == match->words[0]) &&
+ (evt->seq1 == match->words[1])) {
+ struct skb_shared_hwtstamps *timestamps;
+
+ /* Match - add in hardware timestamp */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp = evt->hwtimestamp;
+
+ match->state = PTP_PACKET_STATE_MATCHED;
+ rc = PTP_PACKET_STATE_MATCHED;
+ list_del(&evt->link);
+ list_add(&evt->link, &ptp->evt_free_list);
+ break;
+ }
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+
+ return rc;
+}
+
+/* Process any queued receive events and corresponding packets
+ *
+ * q is returned with all the packets that are ready for delivery.
+ * true is returned if at least one of those packets requires
+ * synchronisation.
+ */
+static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ bool rc = false;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ptp->rxq))) {
+ struct efx_ptp_match *match;
+
+ match = (struct efx_ptp_match *)skb->cb;
+ if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) {
+ __skb_queue_tail(q, skb);
+ } else if (efx_ptp_match_rx(efx, skb) ==
+ PTP_PACKET_STATE_MATCHED) {
+ rc = true;
+ __skb_queue_tail(q, skb);
+ } else if (time_after(jiffies, match->expiry)) {
+ match->state = PTP_PACKET_STATE_TIMED_OUT;
+ netif_warn(efx, rx_err, efx->net_dev,
+ "PTP packet - no timestamp seen\n");
+ __skb_queue_tail(q, skb);
+ } else {
+ /* Replace unprocessed entry and stop */
+ skb_queue_head(&ptp->rxq, skb);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/* Complete processing of a received packet */
+static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_filter_spec rxfilter;
+ int rc;
+
+ ptp->reset_required = false;
+
+ /* Must filter on both event and general ports to ensure
+ * that there is no packet re-ordering.
+ */
+ efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+ efx_rx_queue_index(
+ efx_channel_get_rx_queue(ptp->channel)));
+ rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+ htonl(PTP_ADDRESS),
+ htons(PTP_EVENT_PORT));
+ if (rc != 0)
+ return rc;
+
+ rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ if (rc < 0)
+ return rc;
+ ptp->rxfilter_event = rc;
+
+ efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+ efx_rx_queue_index(
+ efx_channel_get_rx_queue(ptp->channel)));
+ rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+ htonl(PTP_ADDRESS),
+ htons(PTP_GENERAL_PORT));
+ if (rc != 0)
+ goto fail;
+
+ rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ if (rc < 0)
+ goto fail;
+ ptp->rxfilter_general = rc;
+
+ rc = efx_ptp_enable(efx);
+ if (rc != 0)
+ goto fail2;
+
+ ptp->evt_frag_idx = 0;
+ ptp->current_adjfreq = 0;
+ ptp->rxfilter_installed = true;
+
+ return 0;
+
+fail2:
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_general);
+fail:
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+
+ return rc;
+}
+
+static int efx_ptp_stop(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc = efx_ptp_disable(efx);
+ struct list_head *cursor;
+ struct list_head *next;
+
+ if (ptp->rxfilter_installed) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_general);
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ ptp->rxfilter_installed = false;
+ }
+
+ /* Make sure RX packets are really delivered */
+ efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
+ skb_queue_purge(&efx->ptp_data->txq);
+
+ /* Drop any pending receive events */
+ spin_lock_bh(&efx->ptp_data->evt_lock);
+ list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
+ list_del(cursor);
+ list_add(cursor, &efx->ptp_data->evt_free_list);
+ }
+ spin_unlock_bh(&efx->ptp_data->evt_lock);
+
+ return rc;
+}
+
+static void efx_ptp_pps_worker(struct work_struct *work)
+{
+ struct efx_ptp_data *ptp =
+ container_of(work, struct efx_ptp_data, pps_work);
+ struct efx_nic *efx = ptp->channel->efx;
+ struct ptp_clock_event ptp_evt;
+
+ if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
+ return;
+
+ ptp_evt.type = PTP_CLOCK_PPSUSR;
+ ptp_evt.pps_times = ptp->host_time_pps;
+ ptp_clock_event(ptp->phc_clock, &ptp_evt);
+}
+
+/* Process any pending transmissions and timestamp any received packets.
+ */
+static void efx_ptp_worker(struct work_struct *work)
+{
+ struct efx_ptp_data *ptp_data =
+ container_of(work, struct efx_ptp_data, work);
+ struct efx_nic *efx = ptp_data->channel->efx;
+ struct sk_buff *skb;
+ struct sk_buff_head tempq;
+
+ if (ptp_data->reset_required) {
+ efx_ptp_stop(efx);
+ efx_ptp_start(efx);
+ return;
+ }
+
+ efx_ptp_drop_time_expired_events(efx);
+
+ __skb_queue_head_init(&tempq);
+ if (efx_ptp_process_events(efx, &tempq) ||
+ !skb_queue_empty(&ptp_data->txq)) {
+
+ while ((skb = skb_dequeue(&ptp_data->txq)))
+ efx_ptp_xmit_skb(efx, skb);
+ }
+
+ while ((skb = __skb_dequeue(&tempq)))
+ efx_ptp_process_rx(efx, skb);
+}
+
+/* Initialise PTP channel and state.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp;
+ int rc = 0;
+ unsigned int pos;
+
+ channel->irq_moderation = 0;
+ channel->rx_queue.core_index = 0;
+
+ ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
+ efx->ptp_data = ptp;
+ if (!efx->ptp_data)
+ return -ENOMEM;
+
+ rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
+ if (rc != 0)
+ goto fail1;
+
+ ptp->channel = channel;
+ skb_queue_head_init(&ptp->rxq);
+ skb_queue_head_init(&ptp->txq);
+ ptp->workwq = create_singlethread_workqueue("sfc_ptp");
+ if (!ptp->workwq) {
+ rc = -ENOMEM;
+ goto fail2;
+ }
+
+ INIT_WORK(&ptp->work, efx_ptp_worker);
+ ptp->config.flags = 0;
+ ptp->config.tx_type = HWTSTAMP_TX_OFF;
+ ptp->config.rx_filter = HWTSTAMP_FILTER_NONE;
+ INIT_LIST_HEAD(&ptp->evt_list);
+ INIT_LIST_HEAD(&ptp->evt_free_list);
+ spin_lock_init(&ptp->evt_lock);
+ for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
+ list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
+
+ ptp->phc_clock_info.owner = THIS_MODULE;
+ snprintf(ptp->phc_clock_info.name,
+ sizeof(ptp->phc_clock_info.name),
+ "%pm", efx->net_dev->perm_addr);
+ ptp->phc_clock_info.max_adj = MAX_PPB;
+ ptp->phc_clock_info.n_alarm = 0;
+ ptp->phc_clock_info.n_ext_ts = 0;
+ ptp->phc_clock_info.n_per_out = 0;
+ ptp->phc_clock_info.pps = 1;
+ ptp->phc_clock_info.adjfreq = efx_phc_adjfreq;
+ ptp->phc_clock_info.adjtime = efx_phc_adjtime;
+ ptp->phc_clock_info.gettime = efx_phc_gettime;
+ ptp->phc_clock_info.settime = efx_phc_settime;
+ ptp->phc_clock_info.enable = efx_phc_enable;
+
+ ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+ &efx->pci_dev->dev);
+ if (!ptp->phc_clock)
+ goto fail3;
+
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ ptp->nic_ts_enabled = false;
+
+ return 0;
+fail4:
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+
+fail3:
+ destroy_workqueue(efx->ptp_data->workwq);
+
+fail2:
+ efx_nic_free_buffer(efx, &ptp->start);
+
+fail1:
+ kfree(efx->ptp_data);
+ efx->ptp_data = NULL;
+
+ return rc;
+}
+
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ if (!efx->ptp_data)
+ return;
+
+ (void)efx_ptp_disable(channel->efx);
+
+ cancel_work_sync(&efx->ptp_data->work);
+ cancel_work_sync(&efx->ptp_data->pps_work);
+
+ skb_queue_purge(&efx->ptp_data->rxq);
+ skb_queue_purge(&efx->ptp_data->txq);
+
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+
+ destroy_workqueue(efx->ptp_data->workwq);
+ destroy_workqueue(efx->ptp_data->pps_workwq);
+
+ efx_nic_free_buffer(efx, &efx->ptp_data->start);
+ kfree(efx->ptp_data);
+}
+
+static void efx_ptp_get_channel_name(struct efx_channel *channel,
+ char *buf, size_t len)
+{
+ snprintf(buf, len, "%s-ptp", channel->efx->name);
+}
+
+/* Determine whether this packet should be processed by the PTP module
+ * or transmitted conventionally.
+ */
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ return efx->ptp_data &&
+ efx->ptp_data->enabled &&
+ skb->len >= PTP_MIN_LENGTH &&
+ skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
+ likely(skb->protocol == htons(ETH_P_IP)) &&
+ ip_hdr(skb)->protocol == IPPROTO_UDP &&
+ udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
+}
+
+/* Receive a PTP packet. Packets are queued until the arrival of
+ * the receive timestamp from the MC - this will probably occur after the
+ * packet arrival because of the processing in the MC.
+ */
+static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
+ u8 *data;
+ unsigned int version;
+
+ match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+
+ /* Correct version? */
+ if (ptp->mode == MC_CMD_PTP_MODE_V1) {
+ if (skb->len < PTP_V1_MIN_LENGTH) {
+ netif_receive_skb(skb);
+ return;
+ }
+ version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
+ if (version != PTP_VERSION_V1) {
+ netif_receive_skb(skb);
+ return;
+ }
+ } else {
+ if (skb->len < PTP_V2_MIN_LENGTH) {
+ netif_receive_skb(skb);
+ return;
+ }
+ version = skb->data[PTP_V2_VERSION_OFFSET];
+
+ BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
+ BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
+ BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
+
+ if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
+ netif_receive_skb(skb);
+ return;
+ }
+ }
+
+ /* Does this packet require timestamping? */
+ if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+ struct skb_shared_hwtstamps *timestamps;
+
+ match->state = PTP_PACKET_STATE_UNMATCHED;
+
+ /* Clear all timestamps held: filled in later */
+ timestamps = skb_hwtstamps(skb);
+ memset(timestamps, 0, sizeof(*timestamps));
+
+ /* Extract UUID/Sequence information */
+ data = skb->data + PTP_V1_UUID_OFFSET;
+ match->words[0] = (data[0] |
+ (data[1] << 8) |
+ (data[2] << 16) |
+ (data[3] << 24));
+ match->words[1] = (data[4] |
+ (data[5] << 8) |
+ (skb->data[PTP_V1_SEQUENCE_OFFSET +
+ PTP_V1_SEQUENCE_LENGTH - 1] <<
+ 16));
+ } else {
+ match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
+ }
+
+ skb_queue_tail(&ptp->rxq, skb);
+ queue_work(ptp->workwq, &ptp->work);
+}
+
+/* Transmit a PTP packet. This has to be transmitted by the MC
+ * itself, through an MCDI call. MCDI calls aren't permitted
+ * in the transmit path so defer the actual transmission to a suitable worker.
+ */
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ skb_queue_tail(&ptp->txq, skb);
+
+ if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) &&
+ (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM))
+ efx_xmit_hwtstamp_pending(skb);
+ queue_work(ptp->workwq, &ptp->work);
+
+ return NETDEV_TX_OK;
+}
+
+static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode)
+{
+ if ((enable_wanted != efx->ptp_data->enabled) ||
+ (enable_wanted && (efx->ptp_data->mode != new_mode))) {
+ int rc;
+
+ if (enable_wanted) {
+ /* Change of mode requires disable */
+ if (efx->ptp_data->enabled &&
+ (efx->ptp_data->mode != new_mode)) {
+ efx->ptp_data->enabled = false;
+ rc = efx_ptp_stop(efx);
+ if (rc != 0)
+ return rc;
+ }
+
+ /* Set new operating mode and establish
+ * baseline synchronisation, which must
+ * succeed.
+ */
+ efx->ptp_data->mode = new_mode;
+ rc = efx_ptp_start(efx);
+ if (rc == 0) {
+ rc = efx_ptp_synchronize(efx,
+ PTP_SYNC_ATTEMPTS * 2);
+ if (rc != 0)
+ efx_ptp_stop(efx);
+ }
+ } else {
+ rc = efx_ptp_stop(efx);
+ }
+
+ if (rc != 0)
+ return rc;
+
+ efx->ptp_data->enabled = enable_wanted;
+ }
+
+ return 0;
+}
+
+static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+{
+ bool enable_wanted = false;
+ unsigned int new_mode;
+ int rc;
+
+ if (init->flags)
+ return -EINVAL;
+
+ if ((init->tx_type != HWTSTAMP_TX_OFF) &&
+ (init->tx_type != HWTSTAMP_TX_ON))
+ return -ERANGE;
+
+ new_mode = efx->ptp_data->mode;
+ /* Determine whether any PTP HW operations are required */
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ new_mode = MC_CMD_PTP_MODE_V1;
+ enable_wanted = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* Although these three are accepted only IPV4 packets will be
+ * timestamped
+ */
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ new_mode = MC_CMD_PTP_MODE_V2;
+ enable_wanted = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ /* Non-IP + IPv6 timestamping not supported */
+ return -ERANGE;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (init->tx_type != HWTSTAMP_TX_OFF)
+ enable_wanted = true;
+
+ rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
+ if (rc != 0)
+ return rc;
+
+ efx->ptp_data->config = *init;
+
+ return 0;
+}
+
+int
+efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE);
+ ts_info->phc_index = ptp_clock_index(ptp->phc_clock);
+ ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
+ ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ return 0;
+}
+
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ int rc;
+
+ /* Not a PTP enabled port */
+ if (!efx->ptp_data)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ rc = efx_ptp_ts_init(efx, &config);
+ if (rc != 0)
+ return rc;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config))
+ ? -EFAULT : 0;
+}
+
+static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ netif_err(efx, hw, efx->net_dev,
+ "PTP unexpected event length: got %d expected %d\n",
+ ptp->evt_frag_idx, expected_frag_len);
+ ptp->reset_required = true;
+ queue_work(ptp->workwq, &ptp->work);
+}
+
+/* Process a completed receive event. Put it on the event queue and
+ * start worker thread. This is required because event and their
+ * correspoding packets may come in either order.
+ */
+static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+ struct efx_ptp_event_rx *evt = NULL;
+
+ if (ptp->evt_frag_idx != 3) {
+ ptp_event_failure(efx, 3);
+ return;
+ }
+
+ spin_lock_bh(&ptp->evt_lock);
+ if (!list_empty(&ptp->evt_free_list)) {
+ evt = list_first_entry(&ptp->evt_free_list,
+ struct efx_ptp_event_rx, link);
+ list_del(&evt->link);
+
+ evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA);
+ evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2],
+ MCDI_EVENT_SRC) |
+ (EFX_QWORD_FIELD(ptp->evt_frags[1],
+ MCDI_EVENT_SRC) << 8) |
+ (EFX_QWORD_FIELD(ptp->evt_frags[0],
+ MCDI_EVENT_SRC) << 16));
+ evt->hwtimestamp = ktime_set(
+ EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
+ EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA));
+ evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+ list_add_tail(&evt->link, &ptp->evt_list);
+
+ queue_work(ptp->workwq, &ptp->work);
+ } else {
+ netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+}
+
+static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+ int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA);
+ if (ptp->evt_frag_idx != 1) {
+ ptp_event_failure(efx, 1);
+ return;
+ }
+
+ netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code);
+}
+
+static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+ if (ptp->nic_ts_enabled)
+ queue_work(ptp->pps_workwq, &ptp->pps_work);
+}
+
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+
+ if (!ptp->enabled)
+ return;
+
+ if (ptp->evt_frag_idx == 0) {
+ ptp->evt_code = code;
+ } else if (ptp->evt_code != code) {
+ netif_err(efx, hw, efx->net_dev,
+ "PTP out of sequence event %d\n", code);
+ ptp->evt_frag_idx = 0;
+ }
+
+ ptp->evt_frags[ptp->evt_frag_idx++] = *ev;
+ if (!MCDI_EVENT_FIELD(*ev, CONT)) {
+ /* Process resulting event */
+ switch (code) {
+ case MCDI_EVENT_CODE_PTP_RX:
+ ptp_event_rx(efx, ptp);
+ break;
+ case MCDI_EVENT_CODE_PTP_FAULT:
+ ptp_event_fault(efx, ptp);
+ break;
+ case MCDI_EVENT_CODE_PTP_PPS:
+ ptp_event_pps(efx, ptp);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "PTP unknown event %d\n", code);
+ break;
+ }
+ ptp->evt_frag_idx = 0;
+ } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) {
+ netif_err(efx, hw, efx->net_dev,
+ "PTP too many event fragments\n");
+ ptp->evt_frag_idx = 0;
+ }
+}
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ struct efx_nic *efx = ptp_data->channel->efx;
+ u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN];
+ s64 adjustment_ns;
+ int rc;
+
+ if (delta > MAX_PPB)
+ delta = MAX_PPB;
+ else if (delta < -MAX_PPB)
+ delta = -MAX_PPB;
+
+ /* Convert ppb to fixed point ns. */
+ adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
+ (PPB_EXTRA_BITS + MAX_PPB_BITS));
+
+ MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+ MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns);
+ MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI,
+ (u32)(adjustment_ns >> 32));
+ MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
+ MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
+ NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ ptp_data->current_adjfreq = delta;
+ return 0;
+}
+
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ struct efx_nic *efx = ptp_data->channel->efx;
+ struct timespec delta_ts = ns_to_timespec(delta);
+ u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN];
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
+ return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ struct efx_nic *efx = ptp_data->channel->efx;
+ u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN];
+ u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc != 0)
+ return rc;
+
+ ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS);
+ ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS);
+ return 0;
+}
+
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec *e_ts)
+{
+ /* Get the current NIC time, efx_phc_gettime.
+ * Subtract from the desired time to get the offset
+ * call efx_phc_adjtime with the offset
+ */
+ int rc;
+ struct timespec time_now;
+ struct timespec delta;
+
+ rc = efx_phc_gettime(ptp, &time_now);
+ if (rc != 0)
+ return rc;
+
+ delta = timespec_sub(*e_ts, time_now);
+
+ efx_phc_adjtime(ptp, timespec_to_ns(&delta));
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request,
+ int enable)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ if (request->type != PTP_CLK_REQ_PPS)
+ return -EOPNOTSUPP;
+
+ ptp_data->nic_ts_enabled = !!enable;
+ return 0;
+}
+
+static const struct efx_channel_type efx_ptp_channel_type = {
+ .handle_no_channel = efx_ptp_handle_no_channel,
+ .pre_probe = efx_ptp_probe_channel,
+ .post_remove = efx_ptp_remove_channel,
+ .get_name = efx_ptp_get_channel_name,
+ /* no copy operation; there is no need to reallocate this channel */
+ .receive_skb = efx_ptp_rx,
+ .keep_eventq = false,
+};
+
+void efx_ptp_probe(struct efx_nic *efx)
+{
+ /* Check whether PTP is implemented on this NIC. The DISABLE
+ * operation will succeed if and only if it is implemented.
+ */
+ if (efx_ptp_disable(efx) == 0)
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
+ &efx_ptp_channel_type;
+}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 719319b89d7a..9e0ad1b75c33 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -479,7 +479,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
- skb_record_rx_queue(skb, channel->channel);
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
gro_result = napi_gro_frags(napi);
} else {
@@ -571,8 +571,14 @@ static void efx_rx_deliver(struct efx_channel *channel,
/* Set the SKB flags */
skb_checksum_none_assert(skb);
+ /* Record the rx_queue */
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
/* Pass the packet up */
- netif_receive_skb(skb);
+ if (channel->type->receive_skb)
+ channel->type->receive_skb(channel, skb);
+ else
+ netif_receive_skb(skb);
/* Update allocation strategy method */
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
@@ -608,13 +614,14 @@ void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
* at the ethernet header */
skb->protocol = eth_type_trans(skb, efx->net_dev);
- skb_record_rx_queue(skb, channel->channel);
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
}
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
- if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)))
+ if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
+ !channel->type->receive_skb)
efx_rx_packet_gro(channel, rx_buf, eh);
else
efx_rx_deliver(channel, rx_buf);
@@ -624,6 +631,11 @@ void efx_rx_strategy(struct efx_channel *channel)
{
enum efx_rx_alloc_method method = rx_alloc_method;
+ if (channel->type->receive_skb) {
+ channel->rx_alloc_push_pages = false;
+ return;
+ }
+
/* Only makes sense to use page based allocation if GRO is enabled */
if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
method = RX_ALLOC_METHOD_SKB;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 96068d15b601..ce72ae4f399f 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -614,7 +614,8 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
{
enum efx_loopback_mode mode;
struct efx_loopback_state *state;
- struct efx_channel *channel = efx_get_channel(efx, 0);
+ struct efx_channel *channel =
+ efx_get_channel(efx, efx->tx_channel_offset);
struct efx_tx_queue *tx_queue;
int rc = 0;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 6bafd216e55e..84b41bf08a38 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -335,6 +335,7 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
efx_sriov_probe(efx);
+ efx_ptp_probe(efx);
return 0;
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 9cb3b84ecae9..d49b53dc2a50 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -21,6 +21,9 @@
/* Number of longs required to track all the VIs in a VF */
#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
+/* Maximum number of RX queues supported */
+#define VF_MAX_RX_QUEUES 63
+
/**
* enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
* @VF_TX_FILTER_OFF: Disabled
@@ -578,6 +581,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
efx_oword_t reg;
if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
+ vf_rxq >= VF_MAX_RX_QUEUES ||
bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
@@ -683,6 +687,9 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
__le32 *rxqs;
int rc;
+ BUILD_BUG_ON(VF_MAX_RX_QUEUES >
+ MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
if (rxqs == NULL)
return VFDI_RC_ENOMEM;
@@ -1028,6 +1035,7 @@ efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
static const struct efx_channel_type efx_sriov_channel_type = {
.handle_no_channel = efx_sriov_handle_no_channel,
.pre_probe = efx_sriov_probe_channel,
+ .post_remove = efx_channel_dummy_op_void,
.get_name = efx_sriov_get_channel_name,
/* no copy operation; channel must not be reallocated */
.keep_eventq = true,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 18713436b443..5e090e54298e 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -22,14 +22,6 @@
#include "nic.h"
#include "workarounds.h"
-/*
- * TX descriptor ring full threshold
- *
- * The tx_queue descriptor ring fill-level must fall below this value
- * before we restart the netif queue
- */
-#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
-
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
- if (buffer->unmap_single)
+ if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
else
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
buffer->unmap_len = 0;
- buffer->unmap_single = false;
}
- if (buffer->skb) {
+ if (buffer->flags & EFX_TX_BUF_SKB) {
(*pkts_compl)++;
(*bytes_compl) += buffer->skb->len;
dev_kfree_skb_any((struct sk_buff *) buffer->skb);
- buffer->skb = NULL;
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_HEAP) {
+ kfree(buffer->heap_buf);
}
-}
-/**
- * struct efx_tso_header - a DMA mapped buffer for packet headers
- * @next: Linked list of free ones.
- * The list is protected by the TX queue lock.
- * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
- * @dma_addr: The DMA address of the header below.
- *
- * This controls the memory used for a TSO header. Use TSOH_DATA()
- * to find the packet header data. Use TSOH_SIZE() to calculate the
- * total size required for a given packet header length. TSO headers
- * in the free list are exactly %TSOH_STD_SIZE bytes in size.
- */
-struct efx_tso_header {
- union {
- struct efx_tso_header *next;
- size_t unmap_len;
- };
- dma_addr_t dma_addr;
-};
+ buffer->len = 0;
+ buffer->flags = 0;
+}
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
struct sk_buff *skb);
-static void efx_fini_tso(struct efx_tx_queue *tx_queue);
-static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
- struct efx_tso_header *tsoh);
-
-static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
-{
- if (buffer->tsoh) {
- if (likely(!buffer->tsoh->unmap_len)) {
- buffer->tsoh->next = tx_queue->tso_headers_free;
- tx_queue->tso_headers_free = buffer->tsoh;
- } else {
- efx_tsoh_heap_free(tx_queue, buffer->tsoh);
- }
- buffer->tsoh = NULL;
- }
-}
-
static inline unsigned
efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
return max_descs;
}
+/* Get partner of a TX queue, seen as part of the same net core queue */
+static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
+{
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+ return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
+ else
+ return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
+}
+
+static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
+{
+ /* We need to consider both queues that the net core sees as one */
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
+ struct efx_nic *efx = txq1->efx;
+ unsigned int fill_level;
+
+ fill_level = max(txq1->insert_count - txq1->old_read_count,
+ txq2->insert_count - txq2->old_read_count);
+ if (likely(fill_level < efx->txq_stop_thresh))
+ return;
+
+ /* We used the stale old_read_count above, which gives us a
+ * pessimistic estimate of the fill level (which may even
+ * validly be >= efx->txq_entries). Now try again using
+ * read_count (more likely to be a cache miss).
+ *
+ * If we read read_count and then conditionally stop the
+ * queue, it is possible for the completion path to race with
+ * us and complete all outstanding descriptors in the middle,
+ * after which there will be no more completions to wake it.
+ * Therefore we stop the queue first, then read read_count
+ * (with a memory barrier to ensure the ordering), then
+ * restart the queue if the fill level turns out to be low
+ * enough.
+ */
+ netif_tx_stop_queue(txq1->core_txq);
+ smp_mb();
+ txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+ txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+
+ fill_level = max(txq1->insert_count - txq1->old_read_count,
+ txq2->insert_count - txq2->old_read_count);
+ EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
+ if (likely(fill_level < efx->txq_stop_thresh)) {
+ smp_mb();
+ if (likely(!efx->loopback_selftest))
+ netif_tx_start_queue(txq1->core_txq);
+ }
+}
+
/*
* Add a socket buffer to a TX queue
*
@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
* This function is split out from efx_hard_start_xmit to allow the
* loopback test to direct packets via specific TX queues.
*
- * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
+ * Returns NETDEV_TX_OK.
* You must hold netif_tx_lock() to call this function.
*/
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
struct device *dma_dev = &efx->pci_dev->dev;
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
- unsigned int len, unmap_len = 0, fill_level, insert_ptr;
+ unsigned int len, unmap_len = 0, insert_ptr;
dma_addr_t dma_addr, unmap_addr = 0;
unsigned int dma_len;
- bool unmap_single;
- int q_space, i = 0;
- netdev_tx_t rc = NETDEV_TX_OK;
+ unsigned short dma_flags;
+ int i = 0;
EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK;
}
- fill_level = tx_queue->insert_count - tx_queue->old_read_count;
- q_space = efx->txq_entries - 1 - fill_level;
-
/* Map for DMA. Use dma_map_single rather than dma_map_page
* since this is more efficient on machines with sparse
* memory.
*/
- unmap_single = true;
+ dma_flags = EFX_TX_BUF_MAP_SINGLE;
dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
/* Process all fragments */
@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Add to TX queue, splitting across DMA boundaries */
do {
- if (unlikely(q_space-- <= 0)) {
- /* It might be that completions have
- * happened since the xmit path last
- * checked. Update the xmit path's
- * copy of read_count.
- */
- netif_tx_stop_queue(tx_queue->core_txq);
- /* This memory barrier protects the
- * change of queue state from the access
- * of read_count. */
- smp_mb();
- tx_queue->old_read_count =
- ACCESS_ONCE(tx_queue->read_count);
- fill_level = (tx_queue->insert_count
- - tx_queue->old_read_count);
- q_space = efx->txq_entries - 1 - fill_level;
- if (unlikely(q_space-- <= 0)) {
- rc = NETDEV_TX_BUSY;
- goto unwind;
- }
- smp_mb();
- if (likely(!efx->loopback_selftest))
- netif_tx_start_queue(
- tx_queue->core_txq);
- }
-
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
- efx_tsoh_free(tx_queue, buffer);
- EFX_BUG_ON_PARANOID(buffer->tsoh);
- EFX_BUG_ON_PARANOID(buffer->skb);
+ EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(!buffer->continuation);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
dma_len = efx_max_tx_len(efx, dma_addr);
@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Fill out per descriptor fields */
buffer->len = dma_len;
buffer->dma_addr = dma_addr;
+ buffer->flags = EFX_TX_BUF_CONT;
len -= dma_len;
dma_addr += dma_len;
++tx_queue->insert_count;
} while (len);
/* Transfer ownership of the unmapping to the final buffer */
- buffer->unmap_single = unmap_single;
+ buffer->flags = EFX_TX_BUF_CONT | dma_flags;
buffer->unmap_len = unmap_len;
unmap_len = 0;
@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
len = skb_frag_size(fragment);
i++;
/* Map for DMA */
- unmap_single = false;
+ dma_flags = 0;
dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
DMA_TO_DEVICE);
}
/* Transfer ownership of the skb to the final buffer */
buffer->skb = skb;
- buffer->continuation = false;
+ buffer->flags = EFX_TX_BUF_SKB | dma_flags;
netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
+ efx_tx_maybe_stop_queue(tx_queue);
+
return NETDEV_TX_OK;
dma_err:
@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Mark the packet as transmitted, and free the SKB ourselves */
dev_kfree_skb_any(skb);
- unwind:
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
- buffer->len = 0;
}
/* Free the fragment we were mid-way through pushing */
if (unmap_len) {
- if (unmap_single)
+ if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, unmap_len,
DMA_TO_DEVICE);
else
@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
DMA_TO_DEVICE);
}
- return rc;
+ return NETDEV_TX_OK;
}
/* Remove packets from the TX queue
@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
}
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
- buffer->continuation = true;
- buffer->len = 0;
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@@ -366,6 +339,12 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
+ /* PTP "event" packet */
+ if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
+ unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+ return efx_ptp_tx(efx, skb);
+ }
+
index = skb_get_queue_mapping(skb);
type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
if (index >= efx->n_tx_channels) {
@@ -450,6 +429,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
+ struct efx_tx_queue *txq2;
unsigned int pkts_compl = 0, bytes_compl = 0;
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
@@ -457,15 +437,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
- /* See if we need to restart the netif queue. This barrier
- * separates the update of read_count from the test of the
- * queue state. */
+ /* See if we need to restart the netif queue. This memory
+ * barrier ensures that we write read_count (inside
+ * efx_dequeue_buffers()) before reading the queue status.
+ */
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
- fill_level = tx_queue->insert_count - tx_queue->read_count;
- if (fill_level < EFX_TXQ_THRESHOLD(efx))
+ txq2 = efx_tx_queue_partner(tx_queue);
+ fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+ txq2->insert_count - txq2->read_count);
+ if (fill_level <= efx->txq_wake_thresh)
netif_tx_wake_queue(tx_queue->core_txq);
}
@@ -480,11 +463,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
}
}
+/* Size of page-based TSO header buffers. Larger blocks must be
+ * allocated from the heap.
+ */
+#define TSOH_STD_SIZE 128
+#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
+
+/* At most half the descriptors in the queue at any time will refer to
+ * a TSO header buffer, since they must always be followed by a
+ * payload descriptor referring to an skb.
+ */
+static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
+{
+ return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
+}
+
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int entries;
- int i, rc;
+ int rc;
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
@@ -500,17 +498,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
- for (i = 0; i <= tx_queue->ptr_mask; ++i)
- tx_queue->buffer[i].continuation = true;
+
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
+ tx_queue->tsoh_page =
+ kcalloc(efx_tsoh_page_count(tx_queue),
+ sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
+ if (!tx_queue->tsoh_page) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+ }
/* Allocate hardware ring */
rc = efx_nic_probe_tx(tx_queue);
if (rc)
- goto fail;
+ goto fail2;
return 0;
- fail:
+fail2:
+ kfree(tx_queue->tsoh_page);
+ tx_queue->tsoh_page = NULL;
+fail1:
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
return rc;
@@ -546,8 +555,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
- buffer->continuation = true;
- buffer->len = 0;
++tx_queue->read_count;
}
@@ -568,13 +575,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
efx_nic_fini_tx(tx_queue);
efx_release_tx_buffers(tx_queue);
-
- /* Free up TSO header cache */
- efx_fini_tso(tx_queue);
}
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
+ int i;
+
if (!tx_queue->buffer)
return;
@@ -582,6 +588,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
"destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue);
+ if (tx_queue->tsoh_page) {
+ for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
+ efx_nic_free_buffer(tx_queue->efx,
+ &tx_queue->tsoh_page[i]);
+ kfree(tx_queue->tsoh_page);
+ tx_queue->tsoh_page = NULL;
+ }
+
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
}
@@ -604,22 +618,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
#define TSOH_OFFSET NET_IP_ALIGN
#endif
-#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
-
-/* Total size of struct efx_tso_header, buffer and padding */
-#define TSOH_SIZE(hdr_len) \
- (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
-
-/* Size of blocks on free list. Larger blocks must be allocated from
- * the heap.
- */
-#define TSOH_STD_SIZE 128
-
#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
-#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
-#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
-#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
-#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
/**
* struct tso_state - TSO state for an SKB
@@ -631,10 +630,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* @in_len: Remaining length in current SKB fragment
* @unmap_len: Length of SKB fragment
* @unmap_addr: DMA address of SKB fragment
- * @unmap_single: DMA single vs page mapping flag
+ * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
* @protocol: Network protocol (after any VLAN header)
+ * @ip_off: Offset of IP header
+ * @tcp_off: Offset of TCP header
* @header_len: Number of bytes of header
- * @full_packet_size: Number of bytes to put in each outgoing segment
+ * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
*
* The state used during segmentation. It is put into this data structure
* just to make it easy to pass into inline functions.
@@ -651,11 +652,13 @@ struct tso_state {
unsigned in_len;
unsigned unmap_len;
dma_addr_t unmap_addr;
- bool unmap_single;
+ unsigned short dma_flags;
__be16 protocol;
+ unsigned int ip_off;
+ unsigned int tcp_off;
unsigned header_len;
- int full_packet_size;
+ unsigned int ip_base_len;
};
@@ -687,91 +690,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
return protocol;
}
-
-/*
- * Allocate a page worth of efx_tso_header structures, and string them
- * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
- */
-static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer, unsigned int len)
{
- struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
- struct efx_tso_header *tsoh;
- dma_addr_t dma_addr;
- u8 *base_kva, *kva;
+ u8 *result;
- base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
- if (base_kva == NULL) {
- netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
- "Unable to allocate page for TSO headers\n");
- return -ENOMEM;
- }
-
- /* dma_alloc_coherent() allocates pages. */
- EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
-
- for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
- tsoh = (struct efx_tso_header *)kva;
- tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
- tsoh->next = tx_queue->tso_headers_free;
- tx_queue->tso_headers_free = tsoh;
- }
-
- return 0;
-}
-
-
-/* Free up a TSO header, and all others in the same page. */
-static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
- struct efx_tso_header *tsoh,
- struct device *dma_dev)
-{
- struct efx_tso_header **p;
- unsigned long base_kva;
- dma_addr_t base_dma;
-
- base_kva = (unsigned long)tsoh & PAGE_MASK;
- base_dma = tsoh->dma_addr & PAGE_MASK;
-
- p = &tx_queue->tso_headers_free;
- while (*p != NULL) {
- if (((unsigned long)*p & PAGE_MASK) == base_kva)
- *p = (*p)->next;
- else
- p = &(*p)->next;
- }
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->flags);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
- dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
-}
+ if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+ unsigned index =
+ (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
+ struct efx_buffer *page_buf =
+ &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
+ unsigned offset =
+ TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+
+ if (unlikely(!page_buf->addr) &&
+ efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+ return NULL;
+
+ result = (u8 *)page_buf->addr + offset;
+ buffer->dma_addr = page_buf->dma_addr + offset;
+ buffer->flags = EFX_TX_BUF_CONT;
+ } else {
+ tx_queue->tso_long_headers++;
-static struct efx_tso_header *
-efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
-{
- struct efx_tso_header *tsoh;
-
- tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
- if (unlikely(!tsoh))
- return NULL;
-
- tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
- TSOH_BUFFER(tsoh), header_len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
- tsoh->dma_addr))) {
- kfree(tsoh);
- return NULL;
+ buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+ if (unlikely(!buffer->heap_buf))
+ return NULL;
+ result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+ buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
}
- tsoh->unmap_len = header_len;
- return tsoh;
-}
+ buffer->len = len;
-static void
-efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
-{
- dma_unmap_single(&tx_queue->efx->pci_dev->dev,
- tsoh->dma_addr, tsoh->unmap_len,
- DMA_TO_DEVICE);
- kfree(tsoh);
+ return result;
}
/**
@@ -781,47 +736,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
* @len: Length of fragment
* @final_buffer: The final buffer inserted into the queue
*
- * Push descriptors onto the TX queue. Return 0 on success or 1 if
- * @tx_queue full.
+ * Push descriptors onto the TX queue.
*/
-static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr, unsigned len,
- struct efx_tx_buffer **final_buffer)
+static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned len,
+ struct efx_tx_buffer **final_buffer)
{
struct efx_tx_buffer *buffer;
struct efx_nic *efx = tx_queue->efx;
- unsigned dma_len, fill_level, insert_ptr;
- int q_space;
+ unsigned dma_len, insert_ptr;
EFX_BUG_ON_PARANOID(len <= 0);
- fill_level = tx_queue->insert_count - tx_queue->old_read_count;
- /* -1 as there is no way to represent all descriptors used */
- q_space = efx->txq_entries - 1 - fill_level;
-
while (1) {
- if (unlikely(q_space-- <= 0)) {
- /* It might be that completions have happened
- * since the xmit path last checked. Update
- * the xmit path's copy of read_count.
- */
- netif_tx_stop_queue(tx_queue->core_txq);
- /* This memory barrier protects the change of
- * queue state from the access of read_count. */
- smp_mb();
- tx_queue->old_read_count =
- ACCESS_ONCE(tx_queue->read_count);
- fill_level = (tx_queue->insert_count
- - tx_queue->old_read_count);
- q_space = efx->txq_entries - 1 - fill_level;
- if (unlikely(q_space-- <= 0)) {
- *final_buffer = NULL;
- return 1;
- }
- smp_mb();
- netif_tx_start_queue(tx_queue->core_txq);
- }
-
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
++tx_queue->insert_count;
@@ -830,12 +757,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
tx_queue->read_count >=
efx->txq_entries);
- efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
- EFX_BUG_ON_PARANOID(buffer->skb);
- EFX_BUG_ON_PARANOID(!buffer->continuation);
- EFX_BUG_ON_PARANOID(buffer->tsoh);
+ EFX_BUG_ON_PARANOID(buffer->flags);
buffer->dma_addr = dma_addr;
@@ -845,7 +769,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
if (dma_len >= len)
break;
- buffer->len = dma_len; /* Don't set the other members */
+ buffer->len = dma_len;
+ buffer->flags = EFX_TX_BUF_CONT;
dma_addr += dma_len;
len -= dma_len;
}
@@ -853,7 +778,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(!len);
buffer->len = len;
*final_buffer = buffer;
- return 0;
}
@@ -864,54 +788,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
* a single fragment, and we know it doesn't cross a page boundary. It
* also allows us to not worry about end-of-packet etc.
*/
-static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
- struct efx_tso_header *tsoh, unsigned len)
+static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer, u8 *header)
{
- struct efx_tx_buffer *buffer;
-
- buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
- efx_tsoh_free(tx_queue, buffer);
- EFX_BUG_ON_PARANOID(buffer->len);
- EFX_BUG_ON_PARANOID(buffer->unmap_len);
- EFX_BUG_ON_PARANOID(buffer->skb);
- EFX_BUG_ON_PARANOID(!buffer->continuation);
- EFX_BUG_ON_PARANOID(buffer->tsoh);
- buffer->len = len;
- buffer->dma_addr = tsoh->dma_addr;
- buffer->tsoh = tsoh;
+ if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
+ buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
+ header, buffer->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+ buffer->dma_addr))) {
+ kfree(buffer->heap_buf);
+ buffer->len = 0;
+ buffer->flags = 0;
+ return -ENOMEM;
+ }
+ buffer->unmap_len = buffer->len;
+ buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
+ }
++tx_queue->insert_count;
+ return 0;
}
-/* Remove descriptors put into a tx_queue. */
+/* Remove buffers put into a tx_queue. None of the buffers must have
+ * an skb attached.
+ */
static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
- dma_addr_t unmap_addr;
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
buffer = &tx_queue->buffer[tx_queue->insert_count &
tx_queue->ptr_mask];
- efx_tsoh_free(tx_queue, buffer);
- EFX_BUG_ON_PARANOID(buffer->skb);
- if (buffer->unmap_len) {
- unmap_addr = (buffer->dma_addr + buffer->len -
- buffer->unmap_len);
- if (buffer->unmap_single)
- dma_unmap_single(&tx_queue->efx->pci_dev->dev,
- unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(&tx_queue->efx->pci_dev->dev,
- unmap_addr, buffer->unmap_len,
- DMA_TO_DEVICE);
- buffer->unmap_len = 0;
- }
- buffer->len = 0;
- buffer->continuation = true;
+ efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
}
}
@@ -919,17 +831,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
/* Parse the SKB header and initialise state. */
static void tso_start(struct tso_state *st, const struct sk_buff *skb)
{
- /* All ethernet/IP/TCP headers combined size is TCP header size
- * plus offset of TCP header relative to start of packet.
- */
- st->header_len = ((tcp_hdr(skb)->doff << 2u)
- + PTR_DIFF(tcp_hdr(skb), skb->data));
- st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
-
- if (st->protocol == htons(ETH_P_IP))
+ st->ip_off = skb_network_header(skb) - skb->data;
+ st->tcp_off = skb_transport_header(skb) - skb->data;
+ st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+ if (st->protocol == htons(ETH_P_IP)) {
+ st->ip_base_len = st->header_len - st->ip_off;
st->ipv4_id = ntohs(ip_hdr(skb)->id);
- else
+ } else {
+ st->ip_base_len = st->header_len - st->tcp_off;
st->ipv4_id = 0;
+ }
st->seqnum = ntohl(tcp_hdr(skb)->seq);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
@@ -938,7 +849,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
st->out_len = skb->len - st->header_len;
st->unmap_len = 0;
- st->unmap_single = false;
+ st->dma_flags = 0;
}
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -947,7 +858,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
- st->unmap_single = false;
+ st->dma_flags = 0;
st->unmap_len = skb_frag_size(frag);
st->in_len = skb_frag_size(frag);
st->dma_addr = st->unmap_addr;
@@ -965,7 +876,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
len, DMA_TO_DEVICE);
if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
- st->unmap_single = true;
+ st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
st->unmap_len = len;
st->in_len = len;
st->dma_addr = st->unmap_addr;
@@ -982,20 +893,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
* @st: TSO state
*
* Form descriptors for the current fragment, until we reach the end
- * of fragment or end-of-packet. Return 0 on success, 1 if not enough
- * space in @tx_queue.
+ * of fragment or end-of-packet.
*/
-static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb,
- struct tso_state *st)
+static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
{
struct efx_tx_buffer *buffer;
- int n, end_of_packet, rc;
+ int n;
if (st->in_len == 0)
- return 0;
+ return;
if (st->packet_space == 0)
- return 0;
+ return;
EFX_BUG_ON_PARANOID(st->in_len <= 0);
EFX_BUG_ON_PARANOID(st->packet_space <= 0);
@@ -1006,25 +916,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
st->out_len -= n;
st->in_len -= n;
- rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
- if (likely(rc == 0)) {
- if (st->out_len == 0)
- /* Transfer ownership of the skb */
- buffer->skb = skb;
+ efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
- end_of_packet = st->out_len == 0 || st->packet_space == 0;
- buffer->continuation = !end_of_packet;
+ if (st->out_len == 0) {
+ /* Transfer ownership of the skb */
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB;
+ } else if (st->packet_space != 0) {
+ buffer->flags = EFX_TX_BUF_CONT;
+ }
- if (st->in_len == 0) {
- /* Transfer ownership of the DMA mapping */
- buffer->unmap_len = st->unmap_len;
- buffer->unmap_single = st->unmap_single;
- st->unmap_len = 0;
- }
+ if (st->in_len == 0) {
+ /* Transfer ownership of the DMA mapping */
+ buffer->unmap_len = st->unmap_len;
+ buffer->flags |= st->dma_flags;
+ st->unmap_len = 0;
}
st->dma_addr += n;
- return rc;
}
@@ -1035,36 +944,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
* @st: TSO state
*
* Generate a new header and prepare for the new packet. Return 0 on
- * success, or -1 if failed to alloc header.
+ * success, or -%ENOMEM if failed to alloc header.
*/
static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
const struct sk_buff *skb,
struct tso_state *st)
{
- struct efx_tso_header *tsoh;
+ struct efx_tx_buffer *buffer =
+ &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
struct tcphdr *tsoh_th;
unsigned ip_length;
u8 *header;
+ int rc;
- /* Allocate a DMA-mapped header buffer. */
- if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
- if (tx_queue->tso_headers_free == NULL) {
- if (efx_tsoh_block_alloc(tx_queue))
- return -1;
- }
- EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
- tsoh = tx_queue->tso_headers_free;
- tx_queue->tso_headers_free = tsoh->next;
- tsoh->unmap_len = 0;
- } else {
- tx_queue->tso_long_headers++;
- tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
- if (unlikely(!tsoh))
- return -1;
- }
+ /* Allocate and insert a DMA-mapped header buffer. */
+ header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+ if (!header)
+ return -ENOMEM;
- header = TSOH_BUFFER(tsoh);
- tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+ tsoh_th = (struct tcphdr *)(header + st->tcp_off);
/* Copy and update the headers. */
memcpy(header, skb->data, st->header_len);
@@ -1073,19 +971,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
st->seqnum += skb_shinfo(skb)->gso_size;
if (st->out_len > skb_shinfo(skb)->gso_size) {
/* This packet will not finish the TSO burst. */
- ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
+ st->packet_space = skb_shinfo(skb)->gso_size;
tsoh_th->fin = 0;
tsoh_th->psh = 0;
} else {
/* This packet will be the last in the TSO burst. */
- ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
+ st->packet_space = st->out_len;
tsoh_th->fin = tcp_hdr(skb)->fin;
tsoh_th->psh = tcp_hdr(skb)->psh;
}
+ ip_length = st->ip_base_len + st->packet_space;
if (st->protocol == htons(ETH_P_IP)) {
- struct iphdr *tsoh_iph =
- (struct iphdr *)(header + SKB_IPV4_OFF(skb));
+ struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
tsoh_iph->tot_len = htons(ip_length);
@@ -1094,16 +992,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
st->ipv4_id++;
} else {
struct ipv6hdr *tsoh_iph =
- (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb));
+ (struct ipv6hdr *)(header + st->ip_off);
- tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph));
+ tsoh_iph->payload_len = htons(ip_length);
}
- st->packet_space = skb_shinfo(skb)->gso_size;
- ++tx_queue->tso_packets;
+ rc = efx_tso_put_header(tx_queue, buffer, header);
+ if (unlikely(rc))
+ return rc;
- /* Form a descriptor for this header. */
- efx_tso_put_header(tx_queue, tsoh, st->header_len);
+ ++tx_queue->tso_packets;
return 0;
}
@@ -1118,13 +1016,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
*
* Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
* @skb was not enqueued. In all cases @skb is consumed. Return
- * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ * %NETDEV_TX_OK.
*/
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
struct sk_buff *skb)
{
struct efx_nic *efx = tx_queue->efx;
- int frag_i, rc, rc2 = NETDEV_TX_OK;
+ int frag_i, rc;
struct tso_state state;
/* Find the packet protocol and sanity-check it */
@@ -1156,11 +1054,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
goto mem_err;
while (1) {
- rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
- if (unlikely(rc)) {
- rc2 = NETDEV_TX_BUSY;
- goto unwind;
- }
+ tso_fill_packet_with_fragment(tx_queue, skb, &state);
/* Move onto the next fragment? */
if (state.in_len == 0) {
@@ -1184,6 +1078,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
+ efx_tx_maybe_stop_queue(tx_queue);
+
tx_queue->tso_bursts++;
return NETDEV_TX_OK;
@@ -1192,10 +1088,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
"Out of memory for TSO headers, or DMA mapping error\n");
dev_kfree_skb_any(skb);
- unwind:
/* Free the DMA mapping we were in the process of writing out */
if (state.unmap_len) {
- if (state.unmap_single)
+ if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
state.unmap_len, DMA_TO_DEVICE);
else
@@ -1204,25 +1099,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
}
efx_enqueue_unwind(tx_queue);
- return rc2;
-}
-
-
-/*
- * Free up all TSO datastructures associated with tx_queue. This
- * routine should be called only once the tx_queue is both empty and
- * will no longer be used.
- */
-static void efx_fini_tso(struct efx_tx_queue *tx_queue)
-{
- unsigned i;
-
- if (tx_queue->buffer) {
- for (i = 0; i <= tx_queue->ptr_mask; ++i)
- efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
- }
-
- while (tx_queue->tso_headers_free != NULL)
- efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
- &tx_queue->efx->pci_dev->dev);
+ return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index b5ba3084c7fc..3e5519a0acc7 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1147,15 +1147,17 @@ static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
{
#define COSMISC_CONSTANT 6
- struct uart_port port = {
- .irq = 0,
- .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
- .iotype = UPIO_MEM,
- .regshift = 0,
- .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
-
- .membase = (unsigned char __iomem *) uart,
- .mapbase = (unsigned long) uart,
+ struct uart_8250_port port = {
+ .port = {
+ .irq = 0,
+ .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 0,
+ .uartclk = (22000000 << 1) / COSMISC_CONSTANT,
+
+ .membase = (unsigned char __iomem *) uart,
+ .mapbase = (unsigned long) uart,
+ }
};
unsigned char lcr;
@@ -1164,7 +1166,7 @@ static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
uart->iu_scr = COSMISC_CONSTANT,
uart->iu_lcr = lcr;
uart->iu_lcr;
- serial8250_register_port(&port);
+ serial8250_register_8250_port(&port);
}
static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ade108232048..0376a5e6b2bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- new_bus->name, mdio_bus_data->bus_id);
+ new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
new_bus->irq = irqlist;
new_bus->phy_mask = mdio_bus_data->phy_mask;
@@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev)
* and no PHY number was provided to the MAC,
* use the one probed here.
*/
- if ((priv->plat->bus_id == mdio_bus_data->bus_id) &&
- (priv->plat->phy_addr == -1))
+ if (priv->plat->phy_addr == -1)
priv->plat->phy_addr = addr;
- act = (priv->plat->bus_id == mdio_bus_data->bus_id) &&
- (priv->plat->phy_addr == addr);
+ act = (priv->plat->phy_addr == addr);
switch (phydev->irq) {
case PHY_POLL:
irq_str = "POLL";
@@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev)
{
struct stmmac_priv *priv = netdev_priv(ndev);
+ if (!priv->mii)
+ return 0;
+
mdiobus_unregister(priv->mii);
priv->mii->priv = NULL;
mdiobus_free(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 13afb8edfadc..1f069b0f6af5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -40,7 +40,6 @@ static void stmmac_default_data(void)
plat_dat.has_gmac = 1;
plat_dat.force_sf_dma_mode = 1;
- mdio_data.bus_id = 1;
mdio_data.phy_reset = NULL;
mdio_data.phy_mask = 0;
plat_dat.mdio_bus_data = &mdio_data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index b93245c11995..ed112b55ae7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -78,6 +78,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;
+ struct device *dev = &pdev->dev;
void __iomem *addr = NULL;
struct stmmac_priv *priv = NULL;
struct plat_stmmacenet_data *plat_dat = NULL;
@@ -87,18 +88,10 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- pr_err("%s: ERROR: memory allocation failed"
- "cannot get the I/O addr 0x%x\n",
- __func__, (unsigned int)res->start);
- return -EBUSY;
- }
-
- addr = ioremap(res->start, resource_size(res));
+ addr = devm_request_and_ioremap(dev, res);
if (!addr) {
pr_err("%s: ERROR: memory mapping failed", __func__);
- ret = -ENOMEM;
- goto out_release_region;
+ return -ENOMEM;
}
if (pdev->dev.of_node) {
@@ -107,14 +100,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!plat_dat) {
pr_err("%s: ERROR: no memory", __func__);
- ret = -ENOMEM;
- goto out_unmap;
+ return -ENOMEM;
}
ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
if (ret) {
pr_err("%s: main dt probe failed", __func__);
- goto out_unmap;
+ return ret;
}
} else {
plat_dat = pdev->dev.platform_data;
@@ -124,13 +116,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
if (plat_dat->init) {
ret = plat_dat->init(pdev);
if (unlikely(ret))
- goto out_unmap;
+ return ret;
}
priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
if (!priv) {
pr_err("%s: main driver probe failed", __func__);
- goto out_unmap;
+ return -ENODEV;
}
/* Get MAC address if available (DT) */
@@ -142,8 +134,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
if (priv->dev->irq == -ENXIO) {
pr_err("%s: ERROR: MAC IRQ configuration "
"information not found\n", __func__);
- ret = -ENXIO;
- goto out_unmap;
+ return -ENXIO;
}
/*
@@ -165,15 +156,6 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
pr_debug("STMMAC platform driver registration completed");
return 0;
-
-out_unmap:
- iounmap(addr);
- platform_set_drvdata(pdev, NULL);
-
-out_release_region:
- release_mem_region(res->start, resource_size(res));
-
- return ret;
}
/**
@@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- struct resource *res;
int ret = stmmac_dvr_remove(ndev);
if (priv->plat->exit)
@@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- iounmap((void __force __iomem *)priv->ioaddr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
return ret;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index ce4df61b4b56..c8251be104d6 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3890,7 +3890,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
schedule_work(&cp->reset_task);
#endif
- flush_work_sync(&cp->reset_task);
+ flush_work(&cp->reset_task);
return 0;
}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index c2a0fe393267..8419bf385e08 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9762,9 +9762,8 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
union niu_parent_id parent_id;
struct net_device *dev;
struct niu *np;
- int err, pos;
+ int err;
u64 dma_mask;
- u16 val16;
niu_driver_version();
@@ -9787,8 +9786,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
goto err_out_disable_pdev;
}
- pos = pci_pcie_cap(pdev);
- if (pos <= 0) {
+ if (!pci_is_pcie(pdev)) {
dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
goto err_out_free_res;
}
@@ -9813,14 +9811,11 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
goto err_out_free_dev;
}
- pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
- val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
- val16 |= (PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE |
- PCI_EXP_DEVCTL_RELAX_EN);
- pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_NOSNOOP_EN,
+ PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
+ PCI_EXP_DEVCTL_RELAX_EN);
dma_mask = DMA_BIT_MASK(44);
err = pci_set_dma_mask(pdev, dma_mask);
@@ -9932,7 +9927,7 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- flush_work_sync(&np->reset_task);
+ flush_work(&np->reset_task);
niu_netif_stop(np);
del_timer_sync(&np->timer);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 967fe8cb476e..c9c977bf02ac 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -212,7 +212,6 @@ static void bigmac_clean_rings(struct bigmac *bp)
static void bigmac_init_rings(struct bigmac *bp, int from_irq)
{
struct bmac_init_block *bb = bp->bmac_block;
- struct net_device *dev = bp->dev;
int i;
gfp_t gfp_flags = GFP_KERNEL;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 1b173a6145d6..b26cbda5efa9 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
select PHYLIB
---help---
This driver supports TI's DaVinci MDIO module.
@@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO
config TI_DAVINCI_CPDMA
tristate "TI DaVinci CPDMA Support"
- depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
---help---
This driver supports TI's DaVinci CPDMA dma engine.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1e5d85b06e71..df55e2403746 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -28,6 +28,9 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
#include <linux/platform_data/cpsw.h>
@@ -383,6 +386,11 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
mac_control |= BIT(7); /* GIGABITEN */
if (phy->duplex)
mac_control |= BIT(0); /* FULLDUPLEXEN */
+
+ /* set speed_in input in case RMII mode is used in 100Mbps */
+ if (phy->speed == 100)
+ mac_control |= BIT(15);
+
*link = true;
} else {
mac_control = 0;
@@ -709,6 +717,158 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->sliver = regs + data->sliver_reg_ofs;
}
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *slave_node;
+ int i = 0, ret;
+ u32 prop;
+
+ if (!node)
+ return -EINVAL;
+
+ if (of_property_read_u32(node, "slaves", &prop)) {
+ pr_err("Missing slaves property in the DT.\n");
+ return -EINVAL;
+ }
+ data->slaves = prop;
+
+ data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
+ data->slaves, GFP_KERNEL);
+ if (!data->slave_data) {
+ pr_err("Could not allocate slave memory.\n");
+ return -EINVAL;
+ }
+
+ data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
+
+ if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+ pr_err("Missing cpdma_channels property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->channels = prop;
+
+ if (of_property_read_u32(node, "host_port_no", &prop)) {
+ pr_err("Missing host_port_no property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->host_port_num = prop;
+
+ if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
+ pr_err("Missing cpdma_reg_ofs property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->cpdma_reg_ofs = prop;
+
+ if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
+ pr_err("Missing cpdma_sram_ofs property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->cpdma_sram_ofs = prop;
+
+ if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
+ pr_err("Missing ale_reg_ofs property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->ale_reg_ofs = prop;
+
+ if (of_property_read_u32(node, "ale_entries", &prop)) {
+ pr_err("Missing ale_entries property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->ale_entries = prop;
+
+ if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
+ pr_err("Missing host_port_reg_ofs property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->host_port_reg_ofs = prop;
+
+ if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
+ pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->hw_stats_reg_ofs = prop;
+
+ if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
+ pr_err("Missing bd_ram_ofs property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->bd_ram_ofs = prop;
+
+ if (of_property_read_u32(node, "bd_ram_size", &prop)) {
+ pr_err("Missing bd_ram_size property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->bd_ram_size = prop;
+
+ if (of_property_read_u32(node, "rx_descs", &prop)) {
+ pr_err("Missing rx_descs property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->rx_descs = prop;
+
+ if (of_property_read_u32(node, "mac_control", &prop)) {
+ pr_err("Missing mac_control property in the DT.\n");
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ data->mac_control = prop;
+
+ for_each_child_of_node(node, slave_node) {
+ struct cpsw_slave_data *slave_data = data->slave_data + i;
+ const char *phy_id = NULL;
+ const void *mac_addr = NULL;
+
+ if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
+ pr_err("Missing slave[%d] phy_id property\n", i);
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ slave_data->phy_id = phy_id;
+
+ if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
+ pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ slave_data->slave_reg_ofs = prop;
+
+ if (of_property_read_u32(slave_node, "sliver_reg_ofs",
+ &prop)) {
+ pr_err("Missing slave[%d] sliver_reg_ofs property\n",
+ i);
+ ret = -EINVAL;
+ goto error_ret;
+ }
+ slave_data->sliver_reg_ofs = prop;
+
+ mac_addr = of_get_mac_address(slave_node);
+ if (mac_addr)
+ memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+
+ i++;
+ }
+
+ return 0;
+
+error_ret:
+ kfree(data->slave_data);
+ return ret;
+}
+
static int __devinit cpsw_probe(struct platform_device *pdev)
{
struct cpsw_platform_data *data = pdev->dev.platform_data;
@@ -720,11 +880,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0, i, k = 0;
- if (!data) {
- pr_err("platform data missing\n");
- return -ENODEV;
- }
-
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
if (!ndev) {
pr_err("error allocating net_device\n");
@@ -734,13 +889,19 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
spin_lock_init(&priv->lock);
- priv->data = *data;
priv->pdev = pdev;
priv->ndev = ndev;
priv->dev = &ndev->dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
+ if (cpsw_probe_dt(&priv->data, pdev)) {
+ pr_err("cpsw: platform data missing\n");
+ ret = -ENODEV;
+ goto clean_ndev_ret;
+ }
+ data = &priv->data;
+
if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
pr_info("Detected MACID = %pM", priv->mac_addr);
@@ -996,11 +1157,17 @@ static const struct dev_pm_ops cpsw_pm_ops = {
.resume = cpsw_resume,
};
+static const struct of_device_id cpsw_of_mtable[] = {
+ { .compatible = "ti,cpsw", },
+ { /* sentinel */ },
+};
+
static struct platform_driver cpsw_driver = {
.driver = {
.name = "cpsw",
.owner = THIS_MODULE,
.pm = &cpsw_pm_ops,
+ .of_match_table = of_match_ptr(cpsw_of_mtable),
},
.probe = cpsw_probe,
.remove = __devexit_p(cpsw_remove),
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index a9ca4a03d31b..51a96dbee9ac 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -36,6 +36,8 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
/*
* This timeout definition is a worst-case ultra defensive measure against
@@ -289,6 +291,25 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
return 0;
}
+static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
+ struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ u32 prop;
+
+ if (!node)
+ return -EINVAL;
+
+ if (of_property_read_u32(node, "bus_freq", &prop)) {
+ pr_err("Missing bus_freq property in the DT.\n");
+ return -EINVAL;
+ }
+ data->bus_freq = prop;
+
+ return 0;
+}
+
+
static int __devinit davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = pdev->dev.platform_data;
@@ -304,8 +325,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
}
- data->pdata = pdata ? (*pdata) : default_pdata;
-
data->bus = mdiobus_alloc();
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
@@ -313,14 +332,22 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
goto bail_out;
}
+ if (dev->of_node) {
+ if (davinci_mdio_probe_dt(&data->pdata, pdev))
+ data->pdata = default_pdata;
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+ } else {
+ data->pdata = pdata ? (*pdata) : default_pdata;
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+ }
+
data->bus->name = dev_name(dev);
data->bus->read = davinci_mdio_read,
data->bus->write = davinci_mdio_write,
data->bus->reset = davinci_mdio_reset,
data->bus->parent = dev;
data->bus->priv = data;
- snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
- pdev->name, pdev->id);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -456,11 +483,17 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = {
.resume = davinci_mdio_resume,
};
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+ { .compatible = "ti,davinci_mdio", },
+ { /* sentinel */ },
+};
+
static struct platform_driver davinci_mdio_driver = {
.driver = {
.name = "davinci_mdio",
.owner = THIS_MODULE,
.pm = &davinci_mdio_pm_ops,
+ .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
},
.probe = davinci_mdio_probe,
.remove = __devexit_p(davinci_mdio_remove),
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 277c93e9ff4d..8fa947a2d929 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1359,7 +1359,6 @@ static int tsi108_open(struct net_device *dev)
}
data->rxskbs[i] = skb;
- data->rxskbs[i] = skb;
data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
}
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a5826a3111a6..2c08bf6e7bf3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
- eth_random_addr(ndev->dev_addr);
- ndev->addr_assign_type |= NET_ADDR_RANDOM;
+ eth_hw_addr_random(ndev);
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index bdd8891c215a..88943d90c765 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
- eth_random_addr(ndev->dev_addr);
- ndev->addr_assign_type |= NET_ADDR_RANDOM;
+ eth_hw_addr_random(ndev);
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 95ceb3593043..5fd6f4674326 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -35,6 +35,7 @@ struct hv_netvsc_packet;
/* Represent the xfer page packet which contains 1 or more netvsc packet */
struct xferpage_packet {
struct list_head list_ent;
+ u32 status;
/* # of netvsc packets this xfer packet contains */
u32 count;
@@ -47,6 +48,7 @@ struct xferpage_packet {
struct hv_netvsc_packet {
/* Bookkeeping stuff */
struct list_head list_ent;
+ u32 status;
struct hv_device *device;
bool is_data_pkt;
@@ -465,8 +467,6 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
-#define NETVSC_RECEIVE_SG_COUNT 1
-
/* Preallocated receive packets */
#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4a1a5f58fa73..1cd77483da50 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -558,7 +558,7 @@ int netvsc_send(struct hv_device *device,
}
static void netvsc_send_recv_completion(struct hv_device *device,
- u64 transaction_id)
+ u64 transaction_id, u32 status)
{
struct nvsp_message recvcompMessage;
int retries = 0;
@@ -571,9 +571,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
recvcompMessage.hdr.msg_type =
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
- /* FIXME: Pass in the status */
- recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
- NVSP_STAT_SUCCESS;
+ recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
retry_send_cmplt:
/* Send the completion */
@@ -613,6 +611,7 @@ static void netvsc_receive_completion(void *context)
bool fsend_receive_comp = false;
unsigned long flags;
struct net_device *ndev;
+ u32 status = NVSP_STAT_NONE;
/*
* Even though it seems logical to do a GetOutboundNetDevice() here to
@@ -627,6 +626,9 @@ static void netvsc_receive_completion(void *context)
/* Overloading use of the lock. */
spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
+ if (packet->status != NVSP_STAT_SUCCESS)
+ packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
+
packet->xfer_page_pkt->count--;
/*
@@ -636,6 +638,7 @@ static void netvsc_receive_completion(void *context)
if (packet->xfer_page_pkt->count == 0) {
fsend_receive_comp = true;
transaction_id = packet->completion.recv.recv_completion_tid;
+ status = packet->xfer_page_pkt->status;
list_add_tail(&packet->xfer_page_pkt->list_ent,
&net_device->recv_pkt_list);
@@ -647,7 +650,7 @@ static void netvsc_receive_completion(void *context)
/* Send a receive completion for the xfer page packet */
if (fsend_receive_comp)
- netvsc_send_recv_completion(device, transaction_id);
+ netvsc_send_recv_completion(device, transaction_id, status);
}
@@ -736,7 +739,8 @@ static void netvsc_receive(struct hv_device *device,
flags);
netvsc_send_recv_completion(device,
- vmxferpage_packet->d.trans_id);
+ vmxferpage_packet->d.trans_id,
+ NVSP_STAT_FAIL);
return;
}
@@ -744,6 +748,7 @@ static void netvsc_receive(struct hv_device *device,
/* Remove the 1st packet to represent the xfer page packet itself */
xferpage_packet = (struct xferpage_packet *)listHead.next;
list_del(&xferpage_packet->list_ent);
+ xferpage_packet->status = NVSP_STAT_SUCCESS;
/* This is how much we can satisfy */
xferpage_packet->count = count - 1;
@@ -760,6 +765,7 @@ static void netvsc_receive(struct hv_device *device,
list_del(&netvsc_packet->list_ent);
/* Initialize the netvsc packet */
+ netvsc_packet->status = NVSP_STAT_SUCCESS;
netvsc_packet->xfer_page_pkt = xferpage_packet;
netvsc_packet->completion.recv.recv_completion =
netvsc_receive_completion;
@@ -904,9 +910,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
INIT_LIST_HEAD(&net_device->recv_pkt_list);
for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
- packet = kzalloc(sizeof(struct hv_netvsc_packet) +
- (NETVSC_RECEIVE_SG_COUNT *
- sizeof(struct hv_page_buffer)), GFP_KERNEL);
+ packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
if (!packet)
break;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8c5a1c43c81d..f825a629a699 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -265,6 +265,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
if (!net) {
netdev_err(net, "got receive callback but net device"
" not initialized yet\n");
+ packet->status = NVSP_STAT_FAIL;
return 0;
}
@@ -272,6 +273,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
if (unlikely(!skb)) {
++net->stats.rx_dropped;
+ packet->status = NVSP_STAT_FAIL;
return 0;
}
@@ -400,7 +402,7 @@ static void netvsc_send_garp(struct work_struct *w)
ndev_ctx = container_of(w, struct net_device_context, dwork.work);
net_device = hv_get_drvdata(ndev_ctx->device_ctx);
net = net_device->ndev;
- netif_notify_peers(net);
+ netdev_notify_peers(net);
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 1e88a1095934..928148cc3220 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -32,23 +32,31 @@
#include "hyperv_net.h"
+#define RNDIS_EXT_LEN 100
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
+ struct rndis_message response_msg;
/*
- * FIXME: We assumed a fixed size response here. If we do ever need to
- * handle a bigger response, we can either define a max response
- * message or add a response buffer variable above this field
+ * The buffer for extended info after the RNDIS response message. It's
+ * referenced based on the data offset in the RNDIS message. Its size
+ * is enough for current needs, and should be sufficient for the near
+ * future.
*/
- struct rndis_message response_msg;
+ u8 response_ext[RNDIS_EXT_LEN];
/* Simplify allocation by having a netvsc packet inline */
struct hv_netvsc_packet pkt;
- struct hv_page_buffer buf;
- /* FIXME: We assumed a fixed size request here. */
+ /* Set 2 pages for rndis requests crossing page boundary */
+ struct hv_page_buffer buf[2];
+
struct rndis_message request_msg;
- u8 ext[100];
+ /*
+ * The buffer for the extended info after the RNDIS request message.
+ * It is referenced and sized in a similar way as response_ext.
+ */
+ u8 request_ext[RNDIS_EXT_LEN];
};
static void rndis_filter_send_completion(void *ctx);
@@ -221,6 +229,18 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf[0].offset =
(unsigned long)&req->request_msg & (PAGE_SIZE - 1);
+ /* Add one page_buf when request_msg crossing page boundary */
+ if (packet->page_buf[0].offset + packet->page_buf[0].len > PAGE_SIZE) {
+ packet->page_buf_cnt++;
+ packet->page_buf[0].len = PAGE_SIZE -
+ packet->page_buf[0].offset;
+ packet->page_buf[1].pfn = virt_to_phys((void *)&req->request_msg
+ + packet->page_buf[0].len) >> PAGE_SHIFT;
+ packet->page_buf[1].offset = 0;
+ packet->page_buf[1].len = req->request_msg.msg_len -
+ packet->page_buf[0].len;
+ }
+
packet->completion.send.send_completion_ctx = req;/* packet; */
packet->completion.send.send_completion =
rndis_filter_send_request_completion;
@@ -255,7 +275,8 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
spin_unlock_irqrestore(&dev->request_lock, flags);
if (found) {
- if (resp->msg_len <= sizeof(struct rndis_message)) {
+ if (resp->msg_len <=
+ sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
memcpy(&request->response_msg, resp,
resp->msg_len);
} else {
@@ -392,9 +413,12 @@ int rndis_filter_receive(struct hv_device *dev,
struct rndis_device *rndis_dev;
struct rndis_message *rndis_msg;
struct net_device *ndev;
+ int ret = 0;
- if (!net_dev)
- return -EINVAL;
+ if (!net_dev) {
+ ret = -EINVAL;
+ goto exit;
+ }
ndev = net_dev->ndev;
@@ -402,14 +426,16 @@ int rndis_filter_receive(struct hv_device *dev,
if (!net_dev->extension) {
netdev_err(ndev, "got rndis message but no rndis device - "
"dropping this message!\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto exit;
}
rndis_dev = (struct rndis_device *)net_dev->extension;
if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
netdev_err(ndev, "got rndis message but rndis device "
"uninitialized...dropping this message!\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto exit;
}
rndis_msg = pkt->data;
@@ -441,7 +467,11 @@ int rndis_filter_receive(struct hv_device *dev,
break;
}
- return 0;
+exit:
+ if (ret != 0)
+ pkt->status = NVSP_STAT_FAIL;
+
+ return ret;
}
static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
@@ -641,6 +671,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
if (t == 0) {
netdev_err(ndev,
"timeout before we got a set response...\n");
+ ret = -ETIMEDOUT;
/*
* We can't deallocate the request since we may still receive a
* send completion for it.
@@ -678,8 +709,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
init = &request->request_msg.msg.init_req;
init->major_ver = RNDIS_MAJOR_VERSION;
init->minor_ver = RNDIS_MINOR_VERSION;
- /* FIXME: Use 1536 - rounded ethernet frame size */
- init->max_xfer_size = 2048;
+ init->max_xfer_size = 0x4000;
dev->state = RNDIS_DEV_INITIALIZING;
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
new file mode 100644
index 000000000000..08ae4655423a
--- /dev/null
+++ b/drivers/net/ieee802154/Kconfig
@@ -0,0 +1,47 @@
+menuconfig IEEE802154_DRIVERS
+ tristate "IEEE 802.15.4 drivers"
+ depends on NETDEVICES && IEEE802154
+ default y
+ ---help---
+ Say Y here to get to see options for IEEE 802.15.4 Low-Rate
+ Wireless Personal Area Network device drivers. This option alone
+ does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled.
+
+config IEEE802154_FAKEHARD
+ tristate "Fake LR-WPAN driver with several interconnected devices"
+ depends on IEEE802154_DRIVERS
+ ---help---
+ Say Y here to enable the fake driver that serves as an example
+ of HardMAC device driver.
+
+ This driver can also be built as a module. To do so say M here.
+ The module will be called 'fakehard'.
+
+config IEEE802154_FAKELB
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "IEEE 802.15.4 loopback driver"
+ ---help---
+ Say Y here to enable the fake driver that can emulate a net
+ of several interconnected radio devices.
+
+ This driver can also be built as a module. To do so say M here.
+ The module will be called 'fakelb'.
+
+config IEEE802154_AT86RF230
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "AT86RF230/231 transceiver driver"
+ depends on SPI
+
+config IEEE802154_MRF24J40
+ tristate "Microchip MRF24J40 transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
+ ---help---
+ Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so, say M here.
+ the module will be called 'mrf24j40'.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
new file mode 100644
index 000000000000..abb0c08decb0
--- /dev/null
+++ b/drivers/net/ieee802154/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
+obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
+obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
+obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
new file mode 100644
index 000000000000..ba753d87a32f
--- /dev/null
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -0,0 +1,958 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/at86rf230.h>
+#include <linux/skbuff.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+struct at86rf230_local {
+ struct spi_device *spi;
+ int rstn, slp_tr, dig2;
+
+ u8 part;
+ u8 vers;
+
+ u8 buf[2];
+ struct mutex bmux;
+
+ struct work_struct irqwork;
+ struct completion tx_complete;
+
+ struct ieee802154_dev *dev;
+
+ spinlock_t lock;
+ bool irq_disabled;
+ bool is_tx;
+};
+
+#define RG_TRX_STATUS (0x01)
+#define SR_TRX_STATUS 0x01, 0x1f, 0
+#define SR_RESERVED_01_3 0x01, 0x20, 5
+#define SR_CCA_STATUS 0x01, 0x40, 6
+#define SR_CCA_DONE 0x01, 0x80, 7
+#define RG_TRX_STATE (0x02)
+#define SR_TRX_CMD 0x02, 0x1f, 0
+#define SR_TRAC_STATUS 0x02, 0xe0, 5
+#define RG_TRX_CTRL_0 (0x03)
+#define SR_CLKM_CTRL 0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
+#define SR_PAD_IO_CLKM 0x03, 0x30, 4
+#define SR_PAD_IO 0x03, 0xc0, 6
+#define RG_TRX_CTRL_1 (0x04)
+#define SR_IRQ_POLARITY 0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
+#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
+#define SR_RX_BL_CTRL 0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
+#define SR_PA_EXT_EN 0x04, 0x80, 7
+#define RG_PHY_TX_PWR (0x05)
+#define SR_TX_PWR 0x05, 0x0f, 0
+#define SR_PA_LT 0x05, 0x30, 4
+#define SR_PA_BUF_LT 0x05, 0xc0, 6
+#define RG_PHY_RSSI (0x06)
+#define SR_RSSI 0x06, 0x1f, 0
+#define SR_RND_VALUE 0x06, 0x60, 5
+#define SR_RX_CRC_VALID 0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL (0x07)
+#define SR_ED_LEVEL 0x07, 0xff, 0
+#define RG_PHY_CC_CCA (0x08)
+#define SR_CHANNEL 0x08, 0x1f, 0
+#define SR_CCA_MODE 0x08, 0x60, 5
+#define SR_CCA_REQUEST 0x08, 0x80, 7
+#define RG_CCA_THRES (0x09)
+#define SR_CCA_ED_THRES 0x09, 0x0f, 0
+#define SR_RESERVED_09_1 0x09, 0xf0, 4
+#define RG_RX_CTRL (0x0a)
+#define SR_PDT_THRES 0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
+#define RG_SFD_VALUE (0x0b)
+#define SR_SFD_VALUE 0x0b, 0xff, 0
+#define RG_TRX_CTRL_2 (0x0c)
+#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
+#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
+#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
+#define RG_ANT_DIV (0x0d)
+#define SR_ANT_CTRL 0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
+#define SR_ANT_DIV_EN 0x0d, 0x08, 3
+#define SR_RESERVED_0d_2 0x0d, 0x70, 4
+#define SR_ANT_SEL 0x0d, 0x80, 7
+#define RG_IRQ_MASK (0x0e)
+#define SR_IRQ_MASK 0x0e, 0xff, 0
+#define RG_IRQ_STATUS (0x0f)
+#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
+#define SR_IRQ_5_AMI 0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
+#define RG_VREG_CTRL (0x10)
+#define SR_RESERVED_10_6 0x10, 0x03, 0
+#define SR_DVDD_OK 0x10, 0x04, 2
+#define SR_DVREG_EXT 0x10, 0x08, 3
+#define SR_RESERVED_10_3 0x10, 0x30, 4
+#define SR_AVDD_OK 0x10, 0x40, 6
+#define SR_AVREG_EXT 0x10, 0x80, 7
+#define RG_BATMON (0x11)
+#define SR_BATMON_VTH 0x11, 0x0f, 0
+#define SR_BATMON_HR 0x11, 0x10, 4
+#define SR_BATMON_OK 0x11, 0x20, 5
+#define SR_RESERVED_11_1 0x11, 0xc0, 6
+#define RG_XOSC_CTRL (0x12)
+#define SR_XTAL_TRIM 0x12, 0x0f, 0
+#define SR_XTAL_MODE 0x12, 0xf0, 4
+#define RG_RX_SYN (0x15)
+#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
+#define SR_RESERVED_15_2 0x15, 0x70, 4
+#define SR_RX_PDT_DIS 0x15, 0x80, 7
+#define RG_XAH_CTRL_1 (0x17)
+#define SR_RESERVED_17_8 0x17, 0x01, 0
+#define SR_AACK_PROM_MODE 0x17, 0x02, 1
+#define SR_AACK_ACK_TIME 0x17, 0x04, 2
+#define SR_RESERVED_17_5 0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
+#define SR_RESERVED_17_2 0x17, 0x40, 6
+#define SR_RESERVED_17_1 0x17, 0x80, 7
+#define RG_FTN_CTRL (0x18)
+#define SR_RESERVED_18_2 0x18, 0x7f, 0
+#define SR_FTN_START 0x18, 0x80, 7
+#define RG_PLL_CF (0x1a)
+#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
+#define SR_PLL_CF_START 0x1a, 0x80, 7
+#define RG_PLL_DCU (0x1b)
+#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2 0x1b, 0x40, 6
+#define SR_PLL_DCU_START 0x1b, 0x80, 7
+#define RG_PART_NUM (0x1c)
+#define SR_PART_NUM 0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM 0x1d, 0xff, 0
+#define RG_MAN_ID_0 (0x1e)
+#define SR_MAN_ID_0 0x1e, 0xff, 0
+#define RG_MAN_ID_1 (0x1f)
+#define SR_MAN_ID_1 0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0 (0x20)
+#define SR_SHORT_ADDR_0 0x20, 0xff, 0
+#define RG_SHORT_ADDR_1 (0x21)
+#define SR_SHORT_ADDR_1 0x21, 0xff, 0
+#define RG_PAN_ID_0 (0x22)
+#define SR_PAN_ID_0 0x22, 0xff, 0
+#define RG_PAN_ID_1 (0x23)
+#define SR_PAN_ID_1 0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0 0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1 0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2 0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3 0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4 0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5 0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
+#define RG_XAH_CTRL_0 (0x2c)
+#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0 0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1 0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
+#define SR_AACK_SET_PD 0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
+#define RG_CSMA_BE (0x2f)
+#define SR_MIN_BE 0x2f, 0x0f, 0
+#define SR_MAX_BE 0x2f, 0xf0, 4
+
+#define CMD_REG 0x80
+#define CMD_REG_MASK 0x3f
+#define CMD_WRITE 0x40
+#define CMD_FB 0x20
+
+#define IRQ_BAT_LOW (1 << 7)
+#define IRQ_TRX_UR (1 << 6)
+#define IRQ_AMI (1 << 5)
+#define IRQ_CCA_ED (1 << 4)
+#define IRQ_TRX_END (1 << 3)
+#define IRQ_RX_START (1 << 2)
+#define IRQ_PLL_UNL (1 << 1)
+#define IRQ_PLL_LOCK (1 << 0)
+
+#define STATE_P_ON 0x00 /* BUSY */
+#define STATE_BUSY_RX 0x01
+#define STATE_BUSY_TX 0x02
+#define STATE_FORCE_TRX_OFF 0x03
+#define STATE_FORCE_TX_ON 0x04 /* IDLE */
+/* 0x05 */ /* INVALID_PARAMETER */
+#define STATE_RX_ON 0x06
+/* 0x07 */ /* SUCCESS */
+#define STATE_TRX_OFF 0x08
+#define STATE_TX_ON 0x09
+/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP 0x0F
+#define STATE_BUSY_RX_AACK 0x11
+#define STATE_BUSY_TX_ARET 0x12
+#define STATE_BUSY_RX_AACK_ON 0x16
+#define STATE_BUSY_TX_ARET_ON 0x19
+#define STATE_RX_ON_NOCLK 0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+static int
+__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ };
+
+ buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+ buf[1] = data;
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ return status;
+}
+
+static int
+__at86rf230_read_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 *data)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+
+ buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
+ buf[1] = 0xff;
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status == 0)
+ *data = buf[1];
+
+ return status;
+}
+
+static int
+at86rf230_read_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 *data)
+{
+ int status;
+
+ mutex_lock(&lp->bmux);
+ status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_write_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 data)
+{
+ int status;
+ u8 val;
+
+ mutex_lock(&lp->bmux);
+ status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
+ if (status)
+ goto out;
+
+ val &= ~mask;
+ val |= (data << shift) & mask;
+
+ status = __at86rf230_write(lp, addr, val);
+out:
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer_head = {
+ .len = 2,
+ .tx_buf = buf,
+
+ };
+ struct spi_transfer xfer_buf = {
+ .len = len,
+ .tx_buf = data,
+ };
+
+ mutex_lock(&lp->bmux);
+ buf[0] = CMD_WRITE | CMD_FB;
+ buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
+
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head, &msg);
+ spi_message_add_tail(&xfer_buf, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ mutex_unlock(&lp->bmux);
+ return status;
+}
+
+static int
+at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer_head = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ struct spi_transfer xfer_head1 = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ struct spi_transfer xfer_buf = {
+ .len = 0,
+ .rx_buf = data,
+ };
+
+ mutex_lock(&lp->bmux);
+
+ buf[0] = CMD_FB;
+ buf[1] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+
+ xfer_buf.len = *(buf + 1) + 1;
+ *len = buf[1];
+
+ buf[0] = CMD_FB;
+ buf[1] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head1, &msg);
+ spi_message_add_tail(&xfer_buf, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status) {
+ if (lqi && (*len > lp->buf[1]))
+ *lqi = data[lp->buf[1]];
+ }
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+{
+ might_sleep();
+ BUG_ON(!level);
+ *level = 0xbe;
+ return 0;
+}
+
+static int
+at86rf230_state(struct ieee802154_dev *dev, int state)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+ u8 val;
+ u8 desired_status;
+
+ might_sleep();
+
+ if (state == STATE_FORCE_TX_ON)
+ desired_status = STATE_TX_ON;
+ else if (state == STATE_FORCE_TRX_OFF)
+ desired_status = STATE_TRX_OFF;
+ else
+ desired_status = state;
+
+ do {
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+ if (rc)
+ goto err;
+ } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+ if (val == desired_status)
+ return 0;
+
+ /* state is equal to phy states */
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
+ if (rc)
+ goto err;
+
+ do {
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+ if (rc)
+ goto err;
+ } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+
+ if (val == desired_status)
+ return 0;
+
+ pr_err("unexpected state change: %d, asked for %d\n", val, state);
+ return -EBUSY;
+
+err:
+ pr_err("error: %d\n", rc);
+ return rc;
+}
+
+static int
+at86rf230_start(struct ieee802154_dev *dev)
+{
+ struct at86rf230_local *lp = dev->priv;
+ u8 rc;
+
+ rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
+ if (rc)
+ return rc;
+
+ return at86rf230_state(dev, STATE_RX_ON);
+}
+
+static void
+at86rf230_stop(struct ieee802154_dev *dev)
+{
+ at86rf230_state(dev, STATE_FORCE_TRX_OFF);
+}
+
+static int
+at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+
+ might_sleep();
+
+ if (page != 0 || channel < 11 || channel > 26) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+ msleep(1); /* Wait for PLL */
+ dev->phy->current_channel = channel;
+
+ return 0;
+}
+
+static int
+at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+ unsigned long flags;
+
+ spin_lock(&lp->lock);
+ if (lp->irq_disabled) {
+ spin_unlock(&lp->lock);
+ return -EBUSY;
+ }
+ spin_unlock(&lp->lock);
+
+ might_sleep();
+
+ rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
+ if (rc)
+ goto err;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->is_tx = 1;
+ INIT_COMPLETION(lp->tx_complete);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
+ if (rc)
+ goto err_rx;
+
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
+ if (rc)
+ goto err_rx;
+
+ rc = wait_for_completion_interruptible(&lp->tx_complete);
+ if (rc < 0)
+ goto err_rx;
+
+ rc = at86rf230_start(dev);
+
+ return rc;
+
+err_rx:
+ at86rf230_start(dev);
+err:
+ pr_err("error: %d\n", rc);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->is_tx = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return rc;
+}
+
+static int at86rf230_rx(struct at86rf230_local *lp)
+{
+ u8 len = 128, lqi = 0;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+
+ if (!skb)
+ return -ENOMEM;
+
+ if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi))
+ goto err;
+
+ if (len < 2)
+ goto err;
+
+ skb_trim(skb, len - 2); /* We do not put CRC into the frame */
+
+ ieee802154_rx_irqsafe(lp->dev, skb, lqi);
+
+ dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
+
+ return 0;
+err:
+ pr_debug("received frame is too small\n");
+
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static struct ieee802154_ops at86rf230_ops = {
+ .owner = THIS_MODULE,
+ .xmit = at86rf230_xmit,
+ .ed = at86rf230_ed,
+ .set_channel = at86rf230_channel,
+ .start = at86rf230_start,
+ .stop = at86rf230_stop,
+};
+
+static void at86rf230_irqwork(struct work_struct *work)
+{
+ struct at86rf230_local *lp =
+ container_of(work, struct at86rf230_local, irqwork);
+ u8 status = 0, val;
+ int rc;
+ unsigned long flags;
+
+ rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
+ status |= val;
+
+ status &= ~IRQ_PLL_LOCK; /* ignore */
+ status &= ~IRQ_RX_START; /* ignore */
+ status &= ~IRQ_AMI; /* ignore */
+ status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
+
+ if (status & IRQ_TRX_END) {
+ spin_lock_irqsave(&lp->lock, flags);
+ status &= ~IRQ_TRX_END;
+ if (lp->is_tx) {
+ lp->is_tx = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+ complete(&lp->tx_complete);
+ } else {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ at86rf230_rx(lp);
+ }
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->irq_disabled = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ enable_irq(lp->spi->irq);
+}
+
+static irqreturn_t at86rf230_isr(int irq, void *data)
+{
+ struct at86rf230_local *lp = data;
+
+ disable_irq_nosync(irq);
+
+ spin_lock(&lp->lock);
+ lp->irq_disabled = 1;
+ spin_unlock(&lp->lock);
+
+ schedule_work(&lp->irqwork);
+
+ return IRQ_HANDLED;
+}
+
+
+static int at86rf230_hw_init(struct at86rf230_local *lp)
+{
+ u8 status;
+ int rc;
+
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+ if (status == STATE_P_ON) {
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
+ if (rc)
+ return rc;
+ msleep(1);
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+ }
+
+ rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR |
+ * IRQ_CCA_ED |
+ * IRQ_TRX_END |
+ * IRQ_PLL_UNL |
+ * IRQ_PLL_LOCK
+ */
+ if (rc)
+ return rc;
+
+ /* CLKM changes are applied immediately */
+ rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
+ if (rc)
+ return rc;
+
+ /* Turn CLKM Off */
+ rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00);
+ if (rc)
+ return rc;
+ /* Wait the next SLEEP cycle */
+ msleep(100);
+
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
+ if (rc)
+ return rc;
+ msleep(1);
+
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+
+ rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+ if (rc)
+ return rc;
+ if (!status) {
+ dev_err(&lp->spi->dev, "DVDD error\n");
+ return -EINVAL;
+ }
+
+ rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
+ if (rc)
+ return rc;
+ if (!status) {
+ dev_err(&lp->spi->dev, "AVDD error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
+{
+ return 0;
+}
+
+static int at86rf230_resume(struct spi_device *spi)
+{
+ return 0;
+}
+
+static int at86rf230_fill_data(struct spi_device *spi)
+{
+ struct at86rf230_local *lp = spi_get_drvdata(spi);
+ struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&spi->dev, "no platform_data\n");
+ return -EINVAL;
+ }
+
+ lp->rstn = pdata->rstn;
+ lp->slp_tr = pdata->slp_tr;
+ lp->dig2 = pdata->dig2;
+
+ return 0;
+}
+
+static int __devinit at86rf230_probe(struct spi_device *spi)
+{
+ struct ieee802154_dev *dev;
+ struct at86rf230_local *lp;
+ u8 man_id_0, man_id_1;
+ int rc;
+ const char *chip;
+ int supported = 0;
+
+ if (!spi->irq) {
+ dev_err(&spi->dev, "no IRQ specified\n");
+ return -EINVAL;
+ }
+
+ dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
+ if (!dev)
+ return -ENOMEM;
+
+ lp = dev->priv;
+ lp->dev = dev;
+
+ lp->spi = spi;
+
+ dev->priv = lp;
+ dev->parent = &spi->dev;
+ dev->extra_tx_headroom = 0;
+ /* We do support only 2.4 Ghz */
+ dev->phy->channels_supported[0] = 0x7FFF800;
+ dev->flags = IEEE802154_HW_OMIT_CKSUM;
+
+ mutex_init(&lp->bmux);
+ INIT_WORK(&lp->irqwork, at86rf230_irqwork);
+ spin_lock_init(&lp->lock);
+ init_completion(&lp->tx_complete);
+
+ spi_set_drvdata(spi, lp);
+
+ rc = at86rf230_fill_data(spi);
+ if (rc)
+ goto err_fill;
+
+ rc = gpio_request(lp->rstn, "rstn");
+ if (rc)
+ goto err_rstn;
+
+ if (gpio_is_valid(lp->slp_tr)) {
+ rc = gpio_request(lp->slp_tr, "slp_tr");
+ if (rc)
+ goto err_slp_tr;
+ }
+
+ rc = gpio_direction_output(lp->rstn, 1);
+ if (rc)
+ goto err_gpio_dir;
+
+ if (gpio_is_valid(lp->slp_tr)) {
+ rc = gpio_direction_output(lp->slp_tr, 0);
+ if (rc)
+ goto err_gpio_dir;
+ }
+
+ /* Reset */
+ msleep(1);
+ gpio_set_value(lp->rstn, 0);
+ msleep(1);
+ gpio_set_value(lp->rstn, 1);
+ msleep(1);
+
+ rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
+ if (rc)
+ goto err_gpio_dir;
+ rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
+ if (rc)
+ goto err_gpio_dir;
+
+ if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+ dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
+ man_id_1, man_id_0);
+ rc = -EINVAL;
+ goto err_gpio_dir;
+ }
+
+ rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
+ if (rc)
+ goto err_gpio_dir;
+
+ switch (lp->part) {
+ case 2:
+ chip = "at86rf230";
+ /* supported = 1; FIXME: should be easy to support; */
+ break;
+ case 3:
+ chip = "at86rf231";
+ supported = 1;
+ break;
+ default:
+ chip = "UNKNOWN";
+ break;
+ }
+
+ dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
+ if (!supported) {
+ rc = -ENOTSUPP;
+ goto err_gpio_dir;
+ }
+
+ rc = at86rf230_hw_init(lp);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED,
+ dev_name(&spi->dev), lp);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = ieee802154_register_device(lp->dev);
+ if (rc)
+ goto err_irq;
+
+ return rc;
+
+ ieee802154_unregister_device(lp->dev);
+err_irq:
+ free_irq(spi->irq, lp);
+ flush_work(&lp->irqwork);
+err_gpio_dir:
+ if (gpio_is_valid(lp->slp_tr))
+ gpio_free(lp->slp_tr);
+err_slp_tr:
+ gpio_free(lp->rstn);
+err_rstn:
+err_fill:
+ spi_set_drvdata(spi, NULL);
+ mutex_destroy(&lp->bmux);
+ ieee802154_free_device(lp->dev);
+ return rc;
+}
+
+static int __devexit at86rf230_remove(struct spi_device *spi)
+{
+ struct at86rf230_local *lp = spi_get_drvdata(spi);
+
+ ieee802154_unregister_device(lp->dev);
+
+ free_irq(spi->irq, lp);
+ flush_work(&lp->irqwork);
+
+ if (gpio_is_valid(lp->slp_tr))
+ gpio_free(lp->slp_tr);
+ gpio_free(lp->rstn);
+
+ spi_set_drvdata(spi, NULL);
+ mutex_destroy(&lp->bmux);
+ ieee802154_free_device(lp->dev);
+
+ dev_dbg(&spi->dev, "unregistered at86rf230\n");
+ return 0;
+}
+
+static struct spi_driver at86rf230_driver = {
+ .driver = {
+ .name = "at86rf230",
+ .owner = THIS_MODULE,
+ },
+ .probe = at86rf230_probe,
+ .remove = __devexit_p(at86rf230_remove),
+ .suspend = at86rf230_suspend,
+ .resume = at86rf230_resume,
+};
+
+module_spi_driver(at86rf230_driver);
+
+MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
new file mode 100644
index 000000000000..7d39add7d467
--- /dev/null
+++ b/drivers/net/ieee802154/fakehard.c
@@ -0,0 +1,448 @@
+/*
+ * Sample driver for HardMAC IEEE 802.15.4 devices
+ *
+ * Copyright (C) 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#include <net/af_ieee802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/ieee802154.h>
+#include <net/nl802154.h>
+#include <net/wpan-phy.h>
+
+struct fakehard_priv {
+ struct wpan_phy *phy;
+};
+
+static struct wpan_phy *fake_to_phy(const struct net_device *dev)
+{
+ struct fakehard_priv *priv = netdev_priv(dev);
+ return priv->phy;
+}
+
+/**
+ * fake_get_phy - Return a phy corresponding to this device.
+ * @dev: The network device for which to return the wan-phy object
+ *
+ * This function returns a wpan-phy object corresponding to the passed
+ * network device. Reference counter for wpan-phy object is incremented,
+ * so when the wpan-phy isn't necessary, you should drop the reference
+ * via @wpan_phy_put() call.
+ */
+static struct wpan_phy *fake_get_phy(const struct net_device *dev)
+{
+ struct wpan_phy *phy = fake_to_phy(dev);
+ return to_phy(get_device(&phy->dev));
+}
+
+/**
+ * fake_get_pan_id - Retrieve the PAN ID of the device.
+ * @dev: The network device to retrieve the PAN of.
+ *
+ * Return the ID of the PAN from the PIB.
+ */
+static u16 fake_get_pan_id(const struct net_device *dev)
+{
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return 0xeba1;
+}
+
+/**
+ * fake_get_short_addr - Retrieve the short address of the device.
+ * @dev: The network device to retrieve the short address of.
+ *
+ * Returns the IEEE 802.15.4 short-form address cached for this
+ * device. If the device has not yet had a short address assigned
+ * then this should return 0xFFFF to indicate a lack of association.
+ */
+static u16 fake_get_short_addr(const struct net_device *dev)
+{
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return 0x1;
+}
+
+/**
+ * fake_get_dsn - Retrieve the DSN of the device.
+ * @dev: The network device to retrieve the DSN for.
+ *
+ * Returns the IEEE 802.15.4 DSN for the network device.
+ * The DSN is the sequence number which will be added to each
+ * packet or MAC command frame by the MAC during transmission.
+ *
+ * DSN means 'Data Sequence Number'.
+ *
+ * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
+ * document.
+ */
+static u8 fake_get_dsn(const struct net_device *dev)
+{
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return 0x00; /* DSN are implemented in HW, so return just 0 */
+}
+
+/**
+ * fake_get_bsn - Retrieve the BSN of the device.
+ * @dev: The network device to retrieve the BSN for.
+ *
+ * Returns the IEEE 802.15.4 BSN for the network device.
+ * The BSN is the sequence number which will be added to each
+ * beacon frame sent by the MAC.
+ *
+ * BSN means 'Beacon Sequence Number'.
+ *
+ * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
+ * document.
+ */
+static u8 fake_get_bsn(const struct net_device *dev)
+{
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ return 0x00; /* BSN are implemented in HW, so return just 0 */
+}
+
+/**
+ * fake_assoc_req - Make an association request to the HW.
+ * @dev: The network device which we are associating to a network.
+ * @addr: The coordinator with which we wish to associate.
+ * @channel: The channel on which to associate.
+ * @cap: The capability information field to use in the association.
+ *
+ * Start an association with a coordinator. The coordinator's address
+ * and PAN ID can be found in @addr.
+ *
+ * Note: This is in section 7.3.1 and 7.5.3.1 of the IEEE
+ * 802.15.4-2006 document.
+ */
+static int fake_assoc_req(struct net_device *dev,
+ struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap)
+{
+ struct wpan_phy *phy = fake_to_phy(dev);
+
+ mutex_lock(&phy->pib_lock);
+ phy->current_channel = channel;
+ phy->current_page = page;
+ mutex_unlock(&phy->pib_lock);
+
+ /* We simply emulate it here */
+ return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev),
+ IEEE802154_SUCCESS);
+}
+
+/**
+ * fake_assoc_resp - Send an association response to a device.
+ * @dev: The network device on which to send the response.
+ * @addr: The address of the device to respond to.
+ * @short_addr: The assigned short address for the device (if any).
+ * @status: The result of the association request.
+ *
+ * Queue the association response of the coordinator to another
+ * device's attempt to associate with the network which we
+ * coordinate. This is then added to the indirect-send queue to be
+ * transmitted to the end device when it polls for data.
+ *
+ * Note: This is in section 7.3.2 and 7.5.3.1 of the IEEE
+ * 802.15.4-2006 document.
+ */
+static int fake_assoc_resp(struct net_device *dev,
+ struct ieee802154_addr *addr, u16 short_addr, u8 status)
+{
+ return 0;
+}
+
+/**
+ * fake_disassoc_req - Disassociate a device from a network.
+ * @dev: The network device on which we're disassociating a device.
+ * @addr: The device to disassociate from the network.
+ * @reason: The reason to give to the device for being disassociated.
+ *
+ * This sends a disassociation notification to the device being
+ * disassociated from the network.
+ *
+ * Note: This is in section 7.5.3.2 of the IEEE 802.15.4-2006
+ * document, with the reason described in 7.3.3.2.
+ */
+static int fake_disassoc_req(struct net_device *dev,
+ struct ieee802154_addr *addr, u8 reason)
+{
+ return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS);
+}
+
+/**
+ * fake_start_req - Start an IEEE 802.15.4 PAN.
+ * @dev: The network device on which to start the PAN.
+ * @addr: The coordinator address to use when starting the PAN.
+ * @channel: The channel on which to start the PAN.
+ * @bcn_ord: Beacon order.
+ * @sf_ord: Superframe order.
+ * @pan_coord: Whether or not we are the PAN coordinator or just
+ * requesting a realignment perhaps?
+ * @blx: Battery Life Extension feature bitfield.
+ * @coord_realign: Something to realign something else.
+ *
+ * If pan_coord is non-zero then this starts a network with the
+ * provided parameters, otherwise it attempts a coordinator
+ * realignment of the stated network instead.
+ *
+ * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006
+ * document, with 7.3.8 describing coordinator realignment.
+ */
+static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
+ u8 channel, u8 page,
+ u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
+ u8 coord_realign)
+{
+ struct wpan_phy *phy = fake_to_phy(dev);
+
+ mutex_lock(&phy->pib_lock);
+ phy->current_channel = channel;
+ phy->current_page = page;
+ mutex_unlock(&phy->pib_lock);
+
+ /* We don't emulate beacons here at all, so START should fail */
+ ieee802154_nl_start_confirm(dev, IEEE802154_INVALID_PARAMETER);
+ return 0;
+}
+
+/**
+ * fake_scan_req - Start a channel scan.
+ * @dev: The network device on which to perform a channel scan.
+ * @type: The type of scan to perform.
+ * @channels: The channel bitmask to scan.
+ * @duration: How long to spend on each channel.
+ *
+ * This starts either a passive (energy) scan or an active (PAN) scan
+ * on the channels indicated in the @channels bitmask. The duration of
+ * the scan is measured in terms of superframe duration. Specifically,
+ * the scan will spend aBaseSuperFrameDuration * ((2^n) + 1) on each
+ * channel.
+ *
+ * Note: This is in section 7.5.2.1 of the IEEE 802.15.4-2006 document.
+ */
+static int fake_scan_req(struct net_device *dev, u8 type, u32 channels,
+ u8 page, u8 duration)
+{
+ u8 edl[27] = {};
+ return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type,
+ channels, page,
+ type == IEEE802154_MAC_SCAN_ED ? edl : NULL);
+}
+
+static struct ieee802154_mlme_ops fake_mlme = {
+ .assoc_req = fake_assoc_req,
+ .assoc_resp = fake_assoc_resp,
+ .disassoc_req = fake_disassoc_req,
+ .start_req = fake_start_req,
+ .scan_req = fake_scan_req,
+
+ .get_phy = fake_get_phy,
+
+ .get_pan_id = fake_get_pan_id,
+ .get_short_addr = fake_get_short_addr,
+ .get_dsn = fake_get_dsn,
+ .get_bsn = fake_get_bsn,
+};
+
+static int ieee802154_fake_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int ieee802154_fake_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* FIXME: do hardware work here ... */
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+
+static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct sockaddr_ieee802154 *sa =
+ (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
+ u16 pan_id, short_addr;
+
+ switch (cmd) {
+ case SIOCGIFADDR:
+ /* FIXME: fixed here, get from device IRL */
+ pan_id = fake_get_pan_id(dev);
+ short_addr = fake_get_short_addr(dev);
+ if (pan_id == IEEE802154_PANID_BROADCAST ||
+ short_addr == IEEE802154_ADDR_BROADCAST)
+ return -EADDRNOTAVAIL;
+
+ sa->family = AF_IEEE802154;
+ sa->addr.addr_type = IEEE802154_ADDR_SHORT;
+ sa->addr.pan_id = pan_id;
+ sa->addr.short_addr = short_addr;
+ return 0;
+ }
+ return -ENOIOCTLCMD;
+}
+
+static int ieee802154_fake_mac_addr(struct net_device *dev, void *p)
+{
+ return -EBUSY; /* HW address is built into the device */
+}
+
+static const struct net_device_ops fake_ops = {
+ .ndo_open = ieee802154_fake_open,
+ .ndo_stop = ieee802154_fake_close,
+ .ndo_start_xmit = ieee802154_fake_xmit,
+ .ndo_do_ioctl = ieee802154_fake_ioctl,
+ .ndo_set_mac_address = ieee802154_fake_mac_addr,
+};
+
+static void ieee802154_fake_destruct(struct net_device *dev)
+{
+ struct wpan_phy *phy = fake_to_phy(dev);
+
+ wpan_phy_unregister(phy);
+ free_netdev(dev);
+ wpan_phy_free(phy);
+}
+
+static void ieee802154_fake_setup(struct net_device *dev)
+{
+ dev->addr_len = IEEE802154_ADDR_LEN;
+ memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+ dev->features = NETIF_F_HW_CSUM;
+ dev->needed_tailroom = 2; /* FCS */
+ dev->mtu = 127;
+ dev->tx_queue_len = 10;
+ dev->type = ARPHRD_IEEE802154;
+ dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->watchdog_timeo = 0;
+ dev->destructor = ieee802154_fake_destruct;
+}
+
+
+static int __devinit ieee802154fake_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct fakehard_priv *priv;
+ struct wpan_phy *phy = wpan_phy_alloc(0);
+ int err;
+
+ if (!phy)
+ return -ENOMEM;
+
+ dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup);
+ if (!dev) {
+ wpan_phy_free(phy);
+ return -ENOMEM;
+ }
+
+ memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
+ dev->addr_len);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ /*
+ * For now we'd like to emulate 2.4 GHz-only device,
+ * both O-QPSK and CSS
+ */
+ /* 2.4 GHz O-QPSK 802.15.4-2003 */
+ phy->channels_supported[0] |= 0x7FFF800;
+ /* 2.4 GHz CSS 802.15.4a-2007 */
+ phy->channels_supported[3] |= 0x3fff;
+
+ phy->transmit_power = 0xbf;
+
+ dev->netdev_ops = &fake_ops;
+ dev->ml_priv = &fake_mlme;
+
+ priv = netdev_priv(dev);
+ priv->phy = phy;
+
+ wpan_phy_set_dev(phy, &pdev->dev);
+ SET_NETDEV_DEV(dev, &phy->dev);
+
+ platform_set_drvdata(pdev, dev);
+
+ err = wpan_phy_register(phy);
+ if (err)
+ goto out;
+
+ err = register_netdev(dev);
+ if (err < 0)
+ goto out;
+
+ dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
+ return 0;
+
+out:
+ unregister_netdev(dev);
+ return err;
+}
+
+static int __devexit ieee802154fake_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ unregister_netdev(dev);
+ return 0;
+}
+
+static struct platform_device *ieee802154fake_dev;
+
+static struct platform_driver ieee802154fake_driver = {
+ .probe = ieee802154fake_probe,
+ .remove = __devexit_p(ieee802154fake_remove),
+ .driver = {
+ .name = "ieee802154hardmac",
+ .owner = THIS_MODULE,
+ },
+};
+
+static __init int fake_init(void)
+{
+ ieee802154fake_dev = platform_device_register_simple(
+ "ieee802154hardmac", -1, NULL, 0);
+ return platform_driver_register(&ieee802154fake_driver);
+}
+
+static __exit void fake_exit(void)
+{
+ platform_driver_unregister(&ieee802154fake_driver);
+ platform_device_unregister(ieee802154fake_dev);
+}
+
+module_init(fake_init);
+module_exit(fake_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
new file mode 100644
index 000000000000..e7456fcd0913
--- /dev/null
+++ b/drivers/net/ieee802154/fakelb.c
@@ -0,0 +1,294 @@
+/*
+ * Loopback IEEE 802.15.4 interface
+ *
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+static int numlbs = 1;
+
+struct fakelb_dev_priv {
+ struct ieee802154_dev *dev;
+
+ struct list_head list;
+ struct fakelb_priv *fake;
+
+ spinlock_t lock;
+ bool working;
+};
+
+struct fakelb_priv {
+ struct list_head list;
+ rwlock_t lock;
+};
+
+static int
+fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level)
+{
+ might_sleep();
+ BUG_ON(!level);
+ *level = 0xbe;
+
+ return 0;
+}
+
+static int
+fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+ pr_debug("set channel to %d\n", channel);
+
+ might_sleep();
+ dev->phy->current_page = page;
+ dev->phy->current_channel = channel;
+
+ return 0;
+}
+
+static void
+fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
+{
+ struct sk_buff *newskb;
+
+ spin_lock(&priv->lock);
+ if (priv->working) {
+ newskb = pskb_copy(skb, GFP_ATOMIC);
+ ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc);
+ }
+ spin_unlock(&priv->lock);
+}
+
+static int
+fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+ struct fakelb_dev_priv *priv = dev->priv;
+ struct fakelb_priv *fake = priv->fake;
+
+ might_sleep();
+
+ read_lock_bh(&fake->lock);
+ if (priv->list.next == priv->list.prev) {
+ /* we are the only one device */
+ fakelb_hw_deliver(priv, skb);
+ } else {
+ struct fakelb_dev_priv *dp;
+ list_for_each_entry(dp, &priv->fake->list, list) {
+ if (dp != priv &&
+ (dp->dev->phy->current_channel ==
+ priv->dev->phy->current_channel))
+ fakelb_hw_deliver(dp, skb);
+ }
+ }
+ read_unlock_bh(&fake->lock);
+
+ return 0;
+}
+
+static int
+fakelb_hw_start(struct ieee802154_dev *dev) {
+ struct fakelb_dev_priv *priv = dev->priv;
+ int ret = 0;
+
+ spin_lock(&priv->lock);
+ if (priv->working)
+ ret = -EBUSY;
+ else
+ priv->working = 1;
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static void
+fakelb_hw_stop(struct ieee802154_dev *dev) {
+ struct fakelb_dev_priv *priv = dev->priv;
+
+ spin_lock(&priv->lock);
+ priv->working = 0;
+ spin_unlock(&priv->lock);
+}
+
+static struct ieee802154_ops fakelb_ops = {
+ .owner = THIS_MODULE,
+ .xmit = fakelb_hw_xmit,
+ .ed = fakelb_hw_ed,
+ .set_channel = fakelb_hw_channel,
+ .start = fakelb_hw_start,
+ .stop = fakelb_hw_stop,
+};
+
+/* Number of dummy devices to be set up by this module. */
+module_param(numlbs, int, 0);
+MODULE_PARM_DESC(numlbs, " number of pseudo devices");
+
+static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
+{
+ struct fakelb_dev_priv *priv;
+ int err;
+ struct ieee802154_dev *ieee;
+
+ ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops);
+ if (!ieee)
+ return -ENOMEM;
+
+ priv = ieee->priv;
+ priv->dev = ieee;
+
+ /* 868 MHz BPSK 802.15.4-2003 */
+ ieee->phy->channels_supported[0] |= 1;
+ /* 915 MHz BPSK 802.15.4-2003 */
+ ieee->phy->channels_supported[0] |= 0x7fe;
+ /* 2.4 GHz O-QPSK 802.15.4-2003 */
+ ieee->phy->channels_supported[0] |= 0x7FFF800;
+ /* 868 MHz ASK 802.15.4-2006 */
+ ieee->phy->channels_supported[1] |= 1;
+ /* 915 MHz ASK 802.15.4-2006 */
+ ieee->phy->channels_supported[1] |= 0x7fe;
+ /* 868 MHz O-QPSK 802.15.4-2006 */
+ ieee->phy->channels_supported[2] |= 1;
+ /* 915 MHz O-QPSK 802.15.4-2006 */
+ ieee->phy->channels_supported[2] |= 0x7fe;
+ /* 2.4 GHz CSS 802.15.4a-2007 */
+ ieee->phy->channels_supported[3] |= 0x3fff;
+ /* UWB Sub-gigahertz 802.15.4a-2007 */
+ ieee->phy->channels_supported[4] |= 1;
+ /* UWB Low band 802.15.4a-2007 */
+ ieee->phy->channels_supported[4] |= 0x1e;
+ /* UWB High band 802.15.4a-2007 */
+ ieee->phy->channels_supported[4] |= 0xffe0;
+ /* 750 MHz O-QPSK 802.15.4c-2009 */
+ ieee->phy->channels_supported[5] |= 0xf;
+ /* 750 MHz MPSK 802.15.4c-2009 */
+ ieee->phy->channels_supported[5] |= 0xf0;
+ /* 950 MHz BPSK 802.15.4d-2009 */
+ ieee->phy->channels_supported[6] |= 0x3ff;
+ /* 950 MHz GFSK 802.15.4d-2009 */
+ ieee->phy->channels_supported[6] |= 0x3ffc00;
+
+ INIT_LIST_HEAD(&priv->list);
+ priv->fake = fake;
+
+ spin_lock_init(&priv->lock);
+
+ ieee->parent = dev;
+
+ err = ieee802154_register_device(ieee);
+ if (err)
+ goto err_reg;
+
+ write_lock_bh(&fake->lock);
+ list_add_tail(&priv->list, &fake->list);
+ write_unlock_bh(&fake->lock);
+
+ return 0;
+
+err_reg:
+ ieee802154_free_device(priv->dev);
+ return err;
+}
+
+static void fakelb_del(struct fakelb_dev_priv *priv)
+{
+ write_lock_bh(&priv->fake->lock);
+ list_del(&priv->list);
+ write_unlock_bh(&priv->fake->lock);
+
+ ieee802154_unregister_device(priv->dev);
+ ieee802154_free_device(priv->dev);
+}
+
+static int __devinit fakelb_probe(struct platform_device *pdev)
+{
+ struct fakelb_priv *priv;
+ struct fakelb_dev_priv *dp;
+ int err = -ENOMEM;
+ int i;
+
+ priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
+ if (!priv)
+ goto err_alloc;
+
+ INIT_LIST_HEAD(&priv->list);
+ rwlock_init(&priv->lock);
+
+ for (i = 0; i < numlbs; i++) {
+ err = fakelb_add_one(&pdev->dev, priv);
+ if (err < 0)
+ goto err_slave;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ dev_info(&pdev->dev, "added ieee802154 hardware\n");
+ return 0;
+
+err_slave:
+ list_for_each_entry(dp, &priv->list, list)
+ fakelb_del(dp);
+ kfree(priv);
+err_alloc:
+ return err;
+}
+
+static int __devexit fakelb_remove(struct platform_device *pdev)
+{
+ struct fakelb_priv *priv = platform_get_drvdata(pdev);
+ struct fakelb_dev_priv *dp, *temp;
+
+ list_for_each_entry_safe(dp, temp, &priv->list, list)
+ fakelb_del(dp);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct platform_device *ieee802154fake_dev;
+
+static struct platform_driver ieee802154fake_driver = {
+ .probe = fakelb_probe,
+ .remove = __devexit_p(fakelb_remove),
+ .driver = {
+ .name = "ieee802154fakelb",
+ .owner = THIS_MODULE,
+ },
+};
+
+static __init int fakelb_init_module(void)
+{
+ ieee802154fake_dev = platform_device_register_simple(
+ "ieee802154fakelb", -1, NULL, 0);
+ return platform_driver_register(&ieee802154fake_driver);
+}
+
+static __exit void fake_remove_module(void)
+{
+ platform_driver_unregister(&ieee802154fake_driver);
+ platform_device_unregister(ieee802154fake_dev);
+}
+
+module_init(fakelb_init_module);
+module_exit(fake_remove_module);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
new file mode 100644
index 000000000000..ed7521693980
--- /dev/null
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -0,0 +1,767 @@
+/*
+ * Driver for Microchip MRF24J40 802.15.4 Wireless-PAN Networking controller
+ *
+ * Copyright (C) 2012 Alan Ott <alan@signal11.us>
+ * Signal 11 Software
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <net/wpan-phy.h>
+#include <net/mac802154.h>
+
+/* MRF24J40 Short Address Registers */
+#define REG_RXMCR 0x00 /* Receive MAC control */
+#define REG_PANIDL 0x01 /* PAN ID (low) */
+#define REG_PANIDH 0x02 /* PAN ID (high) */
+#define REG_SADRL 0x03 /* Short address (low) */
+#define REG_SADRH 0x04 /* Short address (high) */
+#define REG_EADR0 0x05 /* Long address (low) (high is EADR7) */
+#define REG_TXMCR 0x11 /* Transmit MAC control */
+#define REG_PACON0 0x16 /* Power Amplifier Control */
+#define REG_PACON1 0x17 /* Power Amplifier Control */
+#define REG_PACON2 0x18 /* Power Amplifier Control */
+#define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */
+#define REG_TXSTAT 0x24 /* TX MAC Status Register */
+#define REG_SOFTRST 0x2A /* Soft Reset */
+#define REG_TXSTBL 0x2E /* TX Stabilization */
+#define REG_INTSTAT 0x31 /* Interrupt Status */
+#define REG_INTCON 0x32 /* Interrupt Control */
+#define REG_RFCTL 0x36 /* RF Control Mode Register */
+#define REG_BBREG1 0x39 /* Baseband Registers */
+#define REG_BBREG2 0x3A /* */
+#define REG_BBREG6 0x3E /* */
+#define REG_CCAEDTH 0x3F /* Energy Detection Threshold */
+
+/* MRF24J40 Long Address Registers */
+#define REG_RFCON0 0x200 /* RF Control Registers */
+#define REG_RFCON1 0x201
+#define REG_RFCON2 0x202
+#define REG_RFCON3 0x203
+#define REG_RFCON5 0x205
+#define REG_RFCON6 0x206
+#define REG_RFCON7 0x207
+#define REG_RFCON8 0x208
+#define REG_RSSI 0x210
+#define REG_SLPCON0 0x211 /* Sleep Clock Control Registers */
+#define REG_SLPCON1 0x220
+#define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */
+#define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */
+#define REG_RX_FIFO 0x300 /* Receive FIFO */
+
+/* Device configuration: Only channels 11-26 on page 0 are supported. */
+#define MRF24J40_CHAN_MIN 11
+#define MRF24J40_CHAN_MAX 26
+#define CHANNEL_MASK (((u32)1 << (MRF24J40_CHAN_MAX + 1)) \
+ - ((u32)1 << MRF24J40_CHAN_MIN))
+
+#define TX_FIFO_SIZE 128 /* From datasheet */
+#define RX_FIFO_SIZE 144 /* From datasheet */
+#define SET_CHANNEL_DELAY_US 192 /* From datasheet */
+
+/* Device Private Data */
+struct mrf24j40 {
+ struct spi_device *spi;
+ struct ieee802154_dev *dev;
+
+ struct mutex buffer_mutex; /* only used to protect buf */
+ struct completion tx_complete;
+ struct work_struct irqwork;
+ u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
+};
+
+/* Read/Write SPI Commands for Short and Long Address registers. */
+#define MRF24J40_READSHORT(reg) ((reg) << 1)
+#define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1)
+#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5)
+#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
+
+/* Maximum speed to run the device at. TODO: Get the real max value from
+ * someone at Microchip since it isn't in the datasheet. */
+#define MAX_SPI_SPEED_HZ 1000000
+
+#define printdev(X) (&X->spi->dev)
+
+static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = devrec->buf,
+ .rx_buf = devrec->buf,
+ };
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ mutex_lock(&devrec->buffer_mutex);
+ devrec->buf[0] = MRF24J40_WRITESHORT(reg);
+ devrec->buf[1] = value;
+
+ ret = spi_sync(devrec->spi, &msg);
+ if (ret)
+ dev_err(printdev(devrec),
+ "SPI write Failed for short register 0x%hhx\n", reg);
+
+ mutex_unlock(&devrec->buffer_mutex);
+ return ret;
+}
+
+static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val)
+{
+ int ret = -1;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = devrec->buf,
+ .rx_buf = devrec->buf,
+ };
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ mutex_lock(&devrec->buffer_mutex);
+ devrec->buf[0] = MRF24J40_READSHORT(reg);
+ devrec->buf[1] = 0;
+
+ ret = spi_sync(devrec->spi, &msg);
+ if (ret)
+ dev_err(printdev(devrec),
+ "SPI read Failed for short register 0x%hhx\n", reg);
+ else
+ *val = devrec->buf[1];
+
+ mutex_unlock(&devrec->buffer_mutex);
+ return ret;
+}
+
+static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value)
+{
+ int ret;
+ u16 cmd;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 3,
+ .tx_buf = devrec->buf,
+ .rx_buf = devrec->buf,
+ };
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ cmd = MRF24J40_READLONG(reg);
+ mutex_lock(&devrec->buffer_mutex);
+ devrec->buf[0] = cmd >> 8 & 0xff;
+ devrec->buf[1] = cmd & 0xff;
+ devrec->buf[2] = 0;
+
+ ret = spi_sync(devrec->spi, &msg);
+ if (ret)
+ dev_err(printdev(devrec),
+ "SPI read Failed for long register 0x%hx\n", reg);
+ else
+ *value = devrec->buf[2];
+
+ mutex_unlock(&devrec->buffer_mutex);
+ return ret;
+}
+
+static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val)
+{
+ int ret;
+ u16 cmd;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 3,
+ .tx_buf = devrec->buf,
+ .rx_buf = devrec->buf,
+ };
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ cmd = MRF24J40_WRITELONG(reg);
+ mutex_lock(&devrec->buffer_mutex);
+ devrec->buf[0] = cmd >> 8 & 0xff;
+ devrec->buf[1] = cmd & 0xff;
+ devrec->buf[2] = val;
+
+ ret = spi_sync(devrec->spi, &msg);
+ if (ret)
+ dev_err(printdev(devrec),
+ "SPI write Failed for long register 0x%hx\n", reg);
+
+ mutex_unlock(&devrec->buffer_mutex);
+ return ret;
+}
+
+/* This function relies on an undocumented write method. Once a write command
+ and address is set, as many bytes of data as desired can be clocked into
+ the device. The datasheet only shows setting one byte at a time. */
+static int write_tx_buf(struct mrf24j40 *devrec, u16 reg,
+ const u8 *data, size_t length)
+{
+ int ret;
+ u16 cmd;
+ u8 lengths[2];
+ struct spi_message msg;
+ struct spi_transfer addr_xfer = {
+ .len = 2,
+ .tx_buf = devrec->buf,
+ };
+ struct spi_transfer lengths_xfer = {
+ .len = 2,
+ .tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */
+ };
+ struct spi_transfer data_xfer = {
+ .len = length,
+ .tx_buf = data,
+ };
+
+ /* Range check the length. 2 bytes are used for the length fields.*/
+ if (length > TX_FIFO_SIZE-2) {
+ dev_err(printdev(devrec), "write_tx_buf() was passed too large a buffer. Performing short write.\n");
+ length = TX_FIFO_SIZE-2;
+ }
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&addr_xfer, &msg);
+ spi_message_add_tail(&lengths_xfer, &msg);
+ spi_message_add_tail(&data_xfer, &msg);
+
+ cmd = MRF24J40_WRITELONG(reg);
+ mutex_lock(&devrec->buffer_mutex);
+ devrec->buf[0] = cmd >> 8 & 0xff;
+ devrec->buf[1] = cmd & 0xff;
+ lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */
+ lengths[1] = length; /* Total length */
+
+ ret = spi_sync(devrec->spi, &msg);
+ if (ret)
+ dev_err(printdev(devrec), "SPI write Failed for TX buf\n");
+
+ mutex_unlock(&devrec->buffer_mutex);
+ return ret;
+}
+
+static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
+ u8 *data, u8 *len, u8 *lqi)
+{
+ u8 rx_len;
+ u8 addr[2];
+ u8 lqi_rssi[2];
+ u16 cmd;
+ int ret;
+ struct spi_message msg;
+ struct spi_transfer addr_xfer = {
+ .len = 2,
+ .tx_buf = &addr,
+ };
+ struct spi_transfer data_xfer = {
+ .len = 0x0, /* set below */
+ .rx_buf = data,
+ };
+ struct spi_transfer status_xfer = {
+ .len = 2,
+ .rx_buf = &lqi_rssi,
+ };
+
+ /* Get the length of the data in the RX FIFO. The length in this
+ * register exclues the 1-byte length field at the beginning. */
+ ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len);
+ if (ret)
+ goto out;
+
+ /* Range check the RX FIFO length, accounting for the one-byte
+ * length field at the begining. */
+ if (rx_len > RX_FIFO_SIZE-1) {
+ dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
+ rx_len = RX_FIFO_SIZE-1;
+ }
+
+ if (rx_len > *len) {
+ /* Passed in buffer wasn't big enough. Should never happen. */
+ dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n");
+ rx_len = *len;
+ }
+
+ /* Set up the commands to read the data. */
+ cmd = MRF24J40_READLONG(REG_RX_FIFO+1);
+ addr[0] = cmd >> 8 & 0xff;
+ addr[1] = cmd & 0xff;
+ data_xfer.len = rx_len;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&addr_xfer, &msg);
+ spi_message_add_tail(&data_xfer, &msg);
+ spi_message_add_tail(&status_xfer, &msg);
+
+ ret = spi_sync(devrec->spi, &msg);
+ if (ret) {
+ dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n");
+ goto out;
+ }
+
+ *lqi = lqi_rssi[0];
+ *len = rx_len;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
+ DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
+ printk(KERN_DEBUG "mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
+ lqi_rssi[0], lqi_rssi[1]);
+#endif
+
+out:
+ return ret;
+}
+
+static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+ struct mrf24j40 *devrec = dev->priv;
+ u8 val;
+ int ret = 0;
+
+ dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len);
+
+ ret = write_tx_buf(devrec, 0x000, skb->data, skb->len);
+ if (ret)
+ goto err;
+
+ /* Set TXNTRIG bit of TXNCON to send packet */
+ ret = read_short_reg(devrec, REG_TXNCON, &val);
+ if (ret)
+ goto err;
+ val |= 0x1;
+ val &= ~0x4;
+ write_short_reg(devrec, REG_TXNCON, val);
+
+ INIT_COMPLETION(devrec->tx_complete);
+
+ /* Wait for the device to send the TX complete interrupt. */
+ ret = wait_for_completion_interruptible_timeout(
+ &devrec->tx_complete,
+ 5 * HZ);
+ if (ret == -ERESTARTSYS)
+ goto err;
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ /* Check for send error from the device. */
+ ret = read_short_reg(devrec, REG_TXSTAT, &val);
+ if (ret)
+ goto err;
+ if (val & 0x1) {
+ dev_err(printdev(devrec), "Error Sending. Retry count exceeded\n");
+ ret = -ECOMM; /* TODO: Better error code ? */
+ } else
+ dev_dbg(printdev(devrec), "Packet Sent\n");
+
+err:
+
+ return ret;
+}
+
+static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level)
+{
+ /* TODO: */
+ printk(KERN_WARNING "mrf24j40: ed not implemented\n");
+ *level = 0;
+ return 0;
+}
+
+static int mrf24j40_start(struct ieee802154_dev *dev)
+{
+ struct mrf24j40 *devrec = dev->priv;
+ u8 val;
+ int ret;
+
+ dev_dbg(printdev(devrec), "start\n");
+
+ ret = read_short_reg(devrec, REG_INTCON, &val);
+ if (ret)
+ return ret;
+ val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */
+ write_short_reg(devrec, REG_INTCON, val);
+
+ return 0;
+}
+
+static void mrf24j40_stop(struct ieee802154_dev *dev)
+{
+ struct mrf24j40 *devrec = dev->priv;
+ u8 val;
+ int ret;
+ dev_dbg(printdev(devrec), "stop\n");
+
+ ret = read_short_reg(devrec, REG_INTCON, &val);
+ if (ret)
+ return;
+ val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */
+ write_short_reg(devrec, REG_INTCON, val);
+
+ return;
+}
+
+static int mrf24j40_set_channel(struct ieee802154_dev *dev,
+ int page, int channel)
+{
+ struct mrf24j40 *devrec = dev->priv;
+ u8 val;
+ int ret;
+
+ dev_dbg(printdev(devrec), "Set Channel %d\n", channel);
+
+ WARN_ON(page != 0);
+ WARN_ON(channel < MRF24J40_CHAN_MIN);
+ WARN_ON(channel > MRF24J40_CHAN_MAX);
+
+ /* Set Channel TODO */
+ val = (channel-11) << 4 | 0x03;
+ write_long_reg(devrec, REG_RFCON0, val);
+
+ /* RF Reset */
+ ret = read_short_reg(devrec, REG_RFCTL, &val);
+ if (ret)
+ return ret;
+ val |= 0x04;
+ write_short_reg(devrec, REG_RFCTL, val);
+ val &= ~0x04;
+ write_short_reg(devrec, REG_RFCTL, val);
+
+ udelay(SET_CHANNEL_DELAY_US); /* per datasheet */
+
+ return 0;
+}
+
+static int mrf24j40_filter(struct ieee802154_dev *dev,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed)
+{
+ struct mrf24j40 *devrec = dev->priv;
+
+ dev_dbg(printdev(devrec), "filter\n");
+
+ if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ /* Short Addr */
+ u8 addrh, addrl;
+ addrh = filt->short_addr >> 8 & 0xff;
+ addrl = filt->short_addr & 0xff;
+
+ write_short_reg(devrec, REG_SADRH, addrh);
+ write_short_reg(devrec, REG_SADRL, addrl);
+ dev_dbg(printdev(devrec),
+ "Set short addr to %04hx\n", filt->short_addr);
+ }
+
+ if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ /* Device Address */
+ int i;
+ for (i = 0; i < 8; i++)
+ write_short_reg(devrec, REG_EADR0+i,
+ filt->ieee_addr[i]);
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "Set long addr to: ");
+ for (i = 0; i < 8; i++)
+ printk("%02hhx ", filt->ieee_addr[i]);
+ printk(KERN_DEBUG "\n");
+#endif
+ }
+
+ if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ /* PAN ID */
+ u8 panidl, panidh;
+ panidh = filt->pan_id >> 8 & 0xff;
+ panidl = filt->pan_id & 0xff;
+ write_short_reg(devrec, REG_PANIDH, panidh);
+ write_short_reg(devrec, REG_PANIDL, panidl);
+
+ dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id);
+ }
+
+ if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+ /* Pan Coordinator */
+ u8 val;
+ int ret;
+
+ ret = read_short_reg(devrec, REG_RXMCR, &val);
+ if (ret)
+ return ret;
+ if (filt->pan_coord)
+ val |= 0x8;
+ else
+ val &= ~0x8;
+ write_short_reg(devrec, REG_RXMCR, val);
+
+ /* REG_SLOTTED is maintained as default (unslotted/CSMA-CA).
+ * REG_ORDER is maintained as default (no beacon/superframe).
+ */
+
+ dev_dbg(printdev(devrec), "Set Pan Coord to %s\n",
+ filt->pan_coord ? "on" : "off");
+ }
+
+ return 0;
+}
+
+static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
+{
+ u8 len = RX_FIFO_SIZE;
+ u8 lqi = 0;
+ u8 val;
+ int ret = 0;
+ struct sk_buff *skb;
+
+ /* Turn off reception of packets off the air. This prevents the
+ * device from overwriting the buffer while we're reading it. */
+ ret = read_short_reg(devrec, REG_BBREG1, &val);
+ if (ret)
+ goto out;
+ val |= 4; /* SET RXDECINV */
+ write_short_reg(devrec, REG_BBREG1, val);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi);
+ if (ret < 0) {
+ dev_err(printdev(devrec), "Failure reading RX FIFO\n");
+ kfree_skb(skb);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Cut off the checksum */
+ skb_trim(skb, len-2);
+
+ /* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040,
+ * also from a workqueue). I think irqsafe is not necessary here.
+ * Can someone confirm? */
+ ieee802154_rx_irqsafe(devrec->dev, skb, lqi);
+
+ dev_dbg(printdev(devrec), "RX Handled\n");
+
+out:
+ /* Turn back on reception of packets off the air. */
+ ret = read_short_reg(devrec, REG_BBREG1, &val);
+ if (ret)
+ return ret;
+ val &= ~0x4; /* Clear RXDECINV */
+ write_short_reg(devrec, REG_BBREG1, val);
+
+ return ret;
+}
+
+static struct ieee802154_ops mrf24j40_ops = {
+ .owner = THIS_MODULE,
+ .xmit = mrf24j40_tx,
+ .ed = mrf24j40_ed,
+ .start = mrf24j40_start,
+ .stop = mrf24j40_stop,
+ .set_channel = mrf24j40_set_channel,
+ .set_hw_addr_filt = mrf24j40_filter,
+};
+
+static irqreturn_t mrf24j40_isr(int irq, void *data)
+{
+ struct mrf24j40 *devrec = data;
+
+ disable_irq_nosync(irq);
+
+ schedule_work(&devrec->irqwork);
+
+ return IRQ_HANDLED;
+}
+
+static void mrf24j40_isrwork(struct work_struct *work)
+{
+ struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
+ u8 intstat;
+ int ret;
+
+ /* Read the interrupt status */
+ ret = read_short_reg(devrec, REG_INTSTAT, &intstat);
+ if (ret)
+ goto out;
+
+ /* Check for TX complete */
+ if (intstat & 0x1)
+ complete(&devrec->tx_complete);
+
+ /* Check for Rx */
+ if (intstat & 0x8)
+ mrf24j40_handle_rx(devrec);
+
+out:
+ enable_irq(devrec->spi->irq);
+}
+
+static int __devinit mrf24j40_probe(struct spi_device *spi)
+{
+ int ret = -ENOMEM;
+ u8 val;
+ struct mrf24j40 *devrec;
+
+ printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
+
+ devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL);
+ if (!devrec)
+ goto err_devrec;
+ devrec->buf = kzalloc(3, GFP_KERNEL);
+ if (!devrec->buf)
+ goto err_buf;
+
+ spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
+ if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
+ spi->max_speed_hz = MAX_SPI_SPEED_HZ;
+
+ mutex_init(&devrec->buffer_mutex);
+ init_completion(&devrec->tx_complete);
+ INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
+ devrec->spi = spi;
+ dev_set_drvdata(&spi->dev, devrec);
+
+ /* Register with the 802154 subsystem */
+
+ devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
+ if (!devrec->dev)
+ goto err_alloc_dev;
+
+ devrec->dev->priv = devrec;
+ devrec->dev->parent = &devrec->spi->dev;
+ devrec->dev->phy->channels_supported[0] = CHANNEL_MASK;
+ devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK;
+
+ dev_dbg(printdev(devrec), "registered mrf24j40\n");
+ ret = ieee802154_register_device(devrec->dev);
+ if (ret)
+ goto err_register_device;
+
+ /* Initialize the device.
+ From datasheet section 3.2: Initialization. */
+ write_short_reg(devrec, REG_SOFTRST, 0x07);
+ write_short_reg(devrec, REG_PACON2, 0x98);
+ write_short_reg(devrec, REG_TXSTBL, 0x95);
+ write_long_reg(devrec, REG_RFCON0, 0x03);
+ write_long_reg(devrec, REG_RFCON1, 0x01);
+ write_long_reg(devrec, REG_RFCON2, 0x80);
+ write_long_reg(devrec, REG_RFCON6, 0x90);
+ write_long_reg(devrec, REG_RFCON7, 0x80);
+ write_long_reg(devrec, REG_RFCON8, 0x10);
+ write_long_reg(devrec, REG_SLPCON1, 0x21);
+ write_short_reg(devrec, REG_BBREG2, 0x80);
+ write_short_reg(devrec, REG_CCAEDTH, 0x60);
+ write_short_reg(devrec, REG_BBREG6, 0x40);
+ write_short_reg(devrec, REG_RFCTL, 0x04);
+ write_short_reg(devrec, REG_RFCTL, 0x0);
+ udelay(192);
+
+ /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
+ ret = read_short_reg(devrec, REG_RXMCR, &val);
+ if (ret)
+ goto err_read_reg;
+ val &= ~0x3; /* Clear RX mode (normal) */
+ write_short_reg(devrec, REG_RXMCR, val);
+
+ ret = request_irq(spi->irq,
+ mrf24j40_isr,
+ IRQF_TRIGGER_FALLING,
+ dev_name(&spi->dev),
+ devrec);
+
+ if (ret) {
+ dev_err(printdev(devrec), "Unable to get IRQ");
+ goto err_irq;
+ }
+
+ return 0;
+
+err_irq:
+err_read_reg:
+ ieee802154_unregister_device(devrec->dev);
+err_register_device:
+ ieee802154_free_device(devrec->dev);
+err_alloc_dev:
+ kfree(devrec->buf);
+err_buf:
+ kfree(devrec);
+err_devrec:
+ return ret;
+}
+
+static int __devexit mrf24j40_remove(struct spi_device *spi)
+{
+ struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev);
+
+ dev_dbg(printdev(devrec), "remove\n");
+
+ free_irq(spi->irq, devrec);
+ flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
+ ieee802154_unregister_device(devrec->dev);
+ ieee802154_free_device(devrec->dev);
+ /* TODO: Will ieee802154_free_device() wait until ->xmit() is
+ * complete? */
+
+ /* Clean up the SPI stuff. */
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(devrec->buf);
+ kfree(devrec);
+ return 0;
+}
+
+static const struct spi_device_id mrf24j40_ids[] = {
+ { "mrf24j40", 0 },
+ { "mrf24j40ma", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
+
+static struct spi_driver mrf24j40_driver = {
+ .driver = {
+ .name = "mrf24j40",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .id_table = mrf24j40_ids,
+ .probe = mrf24j40_probe,
+ .remove = __devexit_p(mrf24j40_remove),
+};
+
+static int __init mrf24j40_init(void)
+{
+ return spi_register_driver(&mrf24j40_driver);
+}
+
+static void __exit mrf24j40_exit(void)
+{
+ spi_unregister_driver(&mrf24j40_driver);
+}
+
+module_init(mrf24j40_init);
+module_exit(mrf24j40_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alan Ott");
+MODULE_DESCRIPTION("MRF24J40 SPI 802.15.4 Controller Driver");
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 3352b2443e58..30087ca23a0f 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -124,8 +124,8 @@ static int irtty_change_speed(struct sir_dev *dev, unsigned speed)
tty = priv->tty;
mutex_lock(&tty->termios_mutex);
- old_termios = *(tty->termios);
- cflag = tty->termios->c_cflag;
+ old_termios = tty->termios;
+ cflag = tty->termios.c_cflag;
tty_encode_baud_rate(tty, speed, speed);
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old_termios);
@@ -281,15 +281,15 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
int cflag;
mutex_lock(&tty->termios_mutex);
- old_termios = *(tty->termios);
- cflag = tty->termios->c_cflag;
+ old_termios = tty->termios;
+ cflag = tty->termios.c_cflag;
if (stop)
cflag &= ~CREAD;
else
cflag |= CREAD;
- tty->termios->c_cflag = cflag;
+ tty->termios.c_cflag = cflag;
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old_termios);
mutex_unlock(&tty->termios_mutex);
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 8d5476707912..002a442bf73f 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -28,9 +28,9 @@
#include <net/irda/irda_device.h>
#include <mach/dma.h>
-#include <mach/irda.h>
-#include <mach/regs-uart.h>
+#include <linux/platform_data/irda-pxaficp.h>
#include <mach/regs-ost.h>
+#include <mach/regs-uart.h>
#define FICP __REG(0x40800000) /* Start of FICP area */
#define ICCR0 __REG(0x40800000) /* ICP Control Register 0 */
@@ -112,6 +112,9 @@ struct pxa_irda {
int txdma;
int rxdma;
+ int uart_irq;
+ int icp_irq;
+
struct irlap_cb *irlap;
struct qos_info qos;
@@ -672,19 +675,19 @@ static int pxa_irda_start(struct net_device *dev)
si->speed = 9600;
- err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev);
+ err = request_irq(si->uart_irq, pxa_irda_sir_irq, 0, dev->name, dev);
if (err)
goto err_irq1;
- err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev);
+ err = request_irq(si->icp_irq, pxa_irda_fir_irq, 0, dev->name, dev);
if (err)
goto err_irq2;
/*
* The interrupt must remain disabled for now.
*/
- disable_irq(IRQ_STUART);
- disable_irq(IRQ_ICP);
+ disable_irq(si->uart_irq);
+ disable_irq(si->icp_irq);
err = -EBUSY;
si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
@@ -720,8 +723,8 @@ static int pxa_irda_start(struct net_device *dev)
/*
* Now enable the interrupt and start the queue
*/
- enable_irq(IRQ_STUART);
- enable_irq(IRQ_ICP);
+ enable_irq(si->uart_irq);
+ enable_irq(si->icp_irq);
netif_start_queue(dev);
printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
@@ -738,9 +741,9 @@ err_dma_rx_buff:
err_tx_dma:
pxa_free_dma(si->rxdma);
err_rx_dma:
- free_irq(IRQ_ICP, dev);
+ free_irq(si->icp_irq, dev);
err_irq2:
- free_irq(IRQ_STUART, dev);
+ free_irq(si->uart_irq, dev);
err_irq1:
return err;
@@ -760,8 +763,8 @@ static int pxa_irda_stop(struct net_device *dev)
si->irlap = NULL;
}
- free_irq(IRQ_STUART, dev);
- free_irq(IRQ_ICP, dev);
+ free_irq(si->uart_irq, dev);
+ free_irq(si->icp_irq, dev);
pxa_free_dma(si->rxdma);
pxa_free_dma(si->txdma);
@@ -851,6 +854,9 @@ static int pxa_irda_probe(struct platform_device *pdev)
si->dev = &pdev->dev;
si->pdata = pdev->dev.platform_data;
+ si->uart_irq = platform_get_irq(pdev, 0);
+ si->icp_irq = platform_get_irq(pdev, 1);
+
si->sir_clk = clk_get(&pdev->dev, "UARTCLK");
si->fir_clk = clk_get(&pdev->dev, "FICPCLK");
if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index e2a06fd996d5..81f8f9e31db5 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -157,7 +157,7 @@ static const struct net_device_ops loopback_ops = {
*/
static void loopback_setup(struct net_device *dev)
{
- dev->mtu = (16 * 1024) + 20 + 20 + 12;
+ dev->mtu = 64 * 1024;
dev->hard_header_len = ETH_HLEN; /* 14 */
dev->addr_len = ETH_ALEN; /* 6 */
dev->tx_queue_len = 0;
@@ -197,6 +197,7 @@ static __net_init int loopback_net_init(struct net *net)
if (err)
goto out_free_netdev;
+ BUG_ON(dev->ifindex != LOOPBACK_IFINDEX);
net->loopback_dev = dev;
return 0;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 66a9bfe7b1c8..68a43fe602e7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -546,9 +546,9 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static int macvlan_fdb_add(struct ndmsg *ndm,
+static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
- unsigned char *addr,
+ const unsigned char *addr,
u16 flags)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -567,7 +567,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm,
static int macvlan_fdb_del(struct ndmsg *ndm,
struct net_device *dev,
- unsigned char *addr)
+ const unsigned char *addr)
{
struct macvlan_dev *vlan = netdev_priv(dev);
int err = -EINVAL;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 3090dc65a6f1..983bbf4d5ef6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO
several child MDIO busses to a parent bus. Child bus
selection is under the control of GPIO lines.
+config MDIO_BUS_MUX_MMIOREG
+ tristate "Support for MMIO device-controlled MDIO bus multiplexers"
+ depends on OF_MDIO
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexers that
+ are controlled via a simple memory-mapped device, like an FPGA.
+ The multiplexer connects one of several child MDIO busses to a
+ parent bus. Child bus selection is under the control of one of
+ the FPGA's registers.
+
+ Currently, only 8-bit registers are supported.
+
endif # PHYLIB
config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 6d2dc6c94f2e..426674debae4 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b0da0226661f..24e05c43bff8 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -980,7 +980,7 @@ static int dp83640_probe(struct phy_device *phydev)
if (choose_this_phy(clock, phydev)) {
clock->chosen = dp83640;
- clock->ptp_clock = ptp_clock_register(&clock->caps);
+ clock->ptp_clock = ptp_clock_register(&clock->caps, &phydev->dev);
if (IS_ERR(clock->ptp_clock)) {
err = PTR_ERR(clock->ptp_clock);
goto no_register;
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 6d1e3fcc43e2..ec40ba882f61 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -122,6 +122,123 @@ static int lxt971_config_intr(struct phy_device *phydev)
return err;
}
+/*
+ * A2 version of LXT973 chip has an ERRATA: it randomly return the contents
+ * of the previous even register when you read a odd register regularly
+ */
+
+static int lxt973a2_update_link(struct phy_device *phydev)
+{
+ int status;
+ int control;
+ int retry = 8; /* we try 8 times */
+
+ /* Do a fake read */
+ status = phy_read(phydev, MII_BMSR);
+
+ if (status < 0)
+ return status;
+
+ control = phy_read(phydev, MII_BMCR);
+ if (control < 0)
+ return control;
+
+ do {
+ /* Read link and autonegotiation status */
+ status = phy_read(phydev, MII_BMSR);
+ } while (status >= 0 && retry-- && status == control);
+
+ if (status < 0)
+ return status;
+
+ if ((status & BMSR_LSTATUS) == 0)
+ phydev->link = 0;
+ else
+ phydev->link = 1;
+
+ return 0;
+}
+
+int lxt973a2_read_status(struct phy_device *phydev)
+{
+ int adv;
+ int err;
+ int lpa;
+ int lpagb = 0;
+
+ /* Update the link, but return if there was an error */
+ err = lxt973a2_update_link(phydev);
+ if (err)
+ return err;
+
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+ int retry = 1;
+
+ adv = phy_read(phydev, MII_ADVERTISE);
+
+ if (adv < 0)
+ return adv;
+
+ do {
+ lpa = phy_read(phydev, MII_LPA);
+
+ if (lpa < 0)
+ return lpa;
+
+ /* If both registers are equal, it is suspect but not
+ * impossible, hence a new try
+ */
+ } while (lpa == adv && retry--);
+
+ lpa &= adv;
+
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_HALF;
+ phydev->pause = phydev->asym_pause = 0;
+
+ if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+ phydev->speed = SPEED_1000;
+
+ if (lpagb & LPA_1000FULL)
+ phydev->duplex = DUPLEX_FULL;
+ } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ phydev->speed = SPEED_100;
+
+ if (lpa & LPA_100FULL)
+ phydev->duplex = DUPLEX_FULL;
+ } else {
+ if (lpa & LPA_10FULL)
+ phydev->duplex = DUPLEX_FULL;
+ }
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+ phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+ }
+ } else {
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ phydev->pause = phydev->asym_pause = 0;
+ }
+
+ return 0;
+}
+
static int lxt973_probe(struct phy_device *phydev)
{
int val = phy_read(phydev, MII_LXT973_PCR);
@@ -175,6 +292,16 @@ static struct phy_driver lxt97x_driver[] = {
.driver = { .owner = THIS_MODULE,},
}, {
.phy_id = 0x00137a10,
+ .name = "LXT973-A2",
+ .phy_id_mask = 0xffffffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = 0,
+ .probe = lxt973_probe,
+ .config_aneg = lxt973_config_aneg,
+ .read_status = lxt973a2_read_status,
+ .driver = { .owner = THIS_MODULE,},
+}, {
+ .phy_id = 0x00137a10,
.name = "LXT973",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 7189adf54bd1..899274f2f9b1 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -28,17 +28,38 @@
#include <linux/gpio.h>
#include <linux/mdio-gpio.h>
-#ifdef CONFIG_OF_GPIO
#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#endif
struct mdio_gpio_info {
struct mdiobb_ctrl ctrl;
int mdc, mdio;
};
+static void *mdio_gpio_of_get_data(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mdio_gpio_platform_data *pdata;
+ int ret;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ ret = of_get_gpio(np, 0);
+ if (ret < 0)
+ return NULL;
+
+ pdata->mdc = ret;
+
+ ret = of_get_gpio(np, 1);
+ if (ret < 0)
+ return NULL;
+ pdata->mdio = ret;
+
+ return pdata;
+}
+
static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
{
struct mdio_gpio_info *bitbang =
@@ -162,10 +183,15 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev)
static int __devinit mdio_gpio_probe(struct platform_device *pdev)
{
- struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct mdio_gpio_platform_data *pdata;
struct mii_bus *new_bus;
int ret;
+ if (pdev->dev.of_node)
+ pdata = mdio_gpio_of_get_data(pdev);
+ else
+ pdata = pdev->dev.platform_data;
+
if (!pdata)
return -ENODEV;
@@ -173,7 +199,11 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
if (!new_bus)
return -ENODEV;
- ret = mdiobus_register(new_bus);
+ if (pdev->dev.of_node)
+ ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
+ else
+ ret = mdiobus_register(new_bus);
+
if (ret)
mdio_gpio_bus_deinit(&pdev->dev);
@@ -187,112 +217,30 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF_GPIO
-
-static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev)
-{
- struct mdio_gpio_platform_data *pdata;
- struct mii_bus *new_bus;
- int ret;
-
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- ret = of_get_gpio(ofdev->dev.of_node, 0);
- if (ret < 0)
- goto out_free;
- pdata->mdc = ret;
-
- ret = of_get_gpio(ofdev->dev.of_node, 1);
- if (ret < 0)
- goto out_free;
- pdata->mdio = ret;
-
- new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc);
- if (!new_bus)
- goto out_free;
-
- ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
- if (ret)
- mdio_gpio_bus_deinit(&ofdev->dev);
-
- return ret;
-
-out_free:
- kfree(pdata);
- return -ENODEV;
-}
-
-static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev)
-{
- mdio_gpio_bus_destroy(&ofdev->dev);
- kfree(ofdev->dev.platform_data);
-
- return 0;
-}
-
-static struct of_device_id mdio_ofgpio_match[] = {
- {
- .compatible = "virtual,mdio-gpio",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
-
-static struct platform_driver mdio_ofgpio_driver = {
- .driver = {
- .name = "mdio-ofgpio",
- .owner = THIS_MODULE,
- .of_match_table = mdio_ofgpio_match,
- },
- .probe = mdio_ofgpio_probe,
- .remove = __devexit_p(mdio_ofgpio_remove),
+static struct of_device_id mdio_gpio_of_match[] = {
+ { .compatible = "virtual,mdio-gpio", },
+ { /* sentinel */ }
};
-static inline int __init mdio_ofgpio_init(void)
-{
- return platform_driver_register(&mdio_ofgpio_driver);
-}
-
-static inline void mdio_ofgpio_exit(void)
-{
- platform_driver_unregister(&mdio_ofgpio_driver);
-}
-#else
-static inline int __init mdio_ofgpio_init(void) { return 0; }
-static inline void mdio_ofgpio_exit(void) { }
-#endif /* CONFIG_OF_GPIO */
-
static struct platform_driver mdio_gpio_driver = {
.probe = mdio_gpio_probe,
.remove = __devexit_p(mdio_gpio_remove),
.driver = {
.name = "mdio-gpio",
.owner = THIS_MODULE,
+ .of_match_table = mdio_gpio_of_match,
},
};
static int __init mdio_gpio_init(void)
{
- int ret;
-
- ret = mdio_ofgpio_init();
- if (ret)
- return ret;
-
- ret = platform_driver_register(&mdio_gpio_driver);
- if (ret)
- mdio_ofgpio_exit();
-
- return ret;
+ return platform_driver_register(&mdio_gpio_driver);
}
module_init(mdio_gpio_init);
static void __exit mdio_gpio_exit(void)
{
platform_driver_unregister(&mdio_gpio_driver);
- mdio_ofgpio_exit();
}
module_exit(mdio_gpio_exit);
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
new file mode 100644
index 000000000000..9061ba622ac4
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -0,0 +1,171 @@
+/*
+ * Simple memory-mapped device MDIO MUX driver
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/phy.h>
+#include <linux/mdio-mux.h>
+
+struct mdio_mux_mmioreg_state {
+ void *mux_handle;
+ phys_addr_t phys;
+ uint8_t mask;
+};
+
+/*
+ * MDIO multiplexing switch function
+ *
+ * This function is called by the mdio-mux layer when it thinks the mdio bus
+ * multiplexer needs to switch.
+ *
+ * 'current_child' is the current value of the mux register (masked via
+ * s->mask).
+ *
+ * 'desired_child' is the value of the 'reg' property of the target child MDIO
+ * node.
+ *
+ * The first time this function is called, current_child == -1.
+ *
+ * If current_child == desired_child, then the mux is already set to the
+ * correct bus.
+ */
+static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct mdio_mux_mmioreg_state *s = data;
+
+ if (current_child ^ desired_child) {
+ void *p = ioremap(s->phys, 1);
+ uint8_t x, y;
+
+ if (!p)
+ return -ENOMEM;
+
+ x = ioread8(p);
+ y = (x & ~s->mask) | desired_child;
+ if (x != y) {
+ iowrite8((x & ~s->mask) | desired_child, p);
+ pr_debug("%s: %02x -> %02x\n", __func__, x, y);
+ }
+
+ iounmap(p);
+ }
+
+ return 0;
+}
+
+static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
+{
+ struct device_node *np2, *np = pdev->dev.of_node;
+ struct mdio_mux_mmioreg_state *s;
+ struct resource res;
+ const __be32 *iprop;
+ int len, ret;
+
+ dev_dbg(&pdev->dev, "probing node %s\n", np->full_name);
+
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(&pdev->dev, "could not obtain memory map for node %s\n",
+ np->full_name);
+ return ret;
+ }
+ s->phys = res.start;
+
+ if (resource_size(&res) != sizeof(uint8_t)) {
+ dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+ return -EINVAL;
+ }
+
+ iprop = of_get_property(np, "mux-mask", &len);
+ if (!iprop || len != sizeof(uint32_t)) {
+ dev_err(&pdev->dev, "missing or invalid mux-mask property\n");
+ return -ENODEV;
+ }
+ if (be32_to_cpup(iprop) > 255) {
+ dev_err(&pdev->dev, "only 8-bit registers are supported\n");
+ return -EINVAL;
+ }
+ s->mask = be32_to_cpup(iprop);
+
+ /*
+ * Verify that the 'reg' property of each child MDIO bus does not
+ * set any bits outside of the 'mask'.
+ */
+ for_each_available_child_of_node(np, np2) {
+ iprop = of_get_property(np2, "reg", &len);
+ if (!iprop || len != sizeof(uint32_t)) {
+ dev_err(&pdev->dev, "mdio-mux child node %s is "
+ "missing a 'reg' property\n", np2->full_name);
+ return -ENODEV;
+ }
+ if (be32_to_cpup(iprop) & ~s->mask) {
+ dev_err(&pdev->dev, "mdio-mux child node %s has "
+ "a 'reg' value with unmasked bits\n",
+ np2->full_name);
+ return -ENODEV;
+ }
+ }
+
+ ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
+ &s->mux_handle, s);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
+ np->full_name);
+ return ret;
+ }
+
+ pdev->dev.platform_data = s;
+
+ return 0;
+}
+
+static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev)
+{
+ struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
+
+ mdio_mux_uninit(s->mux_handle);
+
+ return 0;
+}
+
+static struct of_device_id mdio_mux_mmioreg_match[] = {
+ {
+ .compatible = "mdio-mux-mmioreg",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match);
+
+static struct platform_driver mdio_mux_mmioreg_driver = {
+ .driver = {
+ .name = "mdio-mux-mmioreg",
+ .owner = THIS_MODULE,
+ .of_match_table = mdio_mux_mmioreg_match,
+ },
+ .probe = mdio_mux_mmioreg_probe,
+ .remove = __devexit_p(mdio_mux_mmioreg_remove),
+};
+
+module_platform_driver(mdio_mux_mmioreg_driver);
+
+MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
+MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7ca2ff97c368..ef9ea9248223 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1035,66 +1035,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
bus->write(bus, addr, MII_MMD_DATA, data);
}
-static u32 phy_eee_to_adv(u16 eee_adv)
-{
- u32 adv = 0;
-
- if (eee_adv & MDIO_EEE_100TX)
- adv |= ADVERTISED_100baseT_Full;
- if (eee_adv & MDIO_EEE_1000T)
- adv |= ADVERTISED_1000baseT_Full;
- if (eee_adv & MDIO_EEE_10GT)
- adv |= ADVERTISED_10000baseT_Full;
- if (eee_adv & MDIO_EEE_1000KX)
- adv |= ADVERTISED_1000baseKX_Full;
- if (eee_adv & MDIO_EEE_10GKX4)
- adv |= ADVERTISED_10000baseKX4_Full;
- if (eee_adv & MDIO_EEE_10GKR)
- adv |= ADVERTISED_10000baseKR_Full;
-
- return adv;
-}
-
-static u32 phy_eee_to_supported(u16 eee_caported)
-{
- u32 supported = 0;
-
- if (eee_caported & MDIO_EEE_100TX)
- supported |= SUPPORTED_100baseT_Full;
- if (eee_caported & MDIO_EEE_1000T)
- supported |= SUPPORTED_1000baseT_Full;
- if (eee_caported & MDIO_EEE_10GT)
- supported |= SUPPORTED_10000baseT_Full;
- if (eee_caported & MDIO_EEE_1000KX)
- supported |= SUPPORTED_1000baseKX_Full;
- if (eee_caported & MDIO_EEE_10GKX4)
- supported |= SUPPORTED_10000baseKX4_Full;
- if (eee_caported & MDIO_EEE_10GKR)
- supported |= SUPPORTED_10000baseKR_Full;
-
- return supported;
-}
-
-static u16 phy_adv_to_eee(u32 adv)
-{
- u16 reg = 0;
-
- if (adv & ADVERTISED_100baseT_Full)
- reg |= MDIO_EEE_100TX;
- if (adv & ADVERTISED_1000baseT_Full)
- reg |= MDIO_EEE_1000T;
- if (adv & ADVERTISED_10000baseT_Full)
- reg |= MDIO_EEE_10GT;
- if (adv & ADVERTISED_1000baseKX_Full)
- reg |= MDIO_EEE_1000KX;
- if (adv & ADVERTISED_10000baseKX4_Full)
- reg |= MDIO_EEE_10GKX4;
- if (adv & ADVERTISED_10000baseKR_Full)
- reg |= MDIO_EEE_10GKR;
-
- return reg;
-}
-
/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
@@ -1132,7 +1072,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (eee_cap < 0)
return eee_cap;
- cap = phy_eee_to_supported(eee_cap);
+ cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
if (!cap)
goto eee_exit;
@@ -1149,8 +1089,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (eee_adv < 0)
return eee_adv;
- adv = phy_eee_to_adv(eee_adv);
- lp = phy_eee_to_adv(eee_lp);
+ adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+ lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
idx = phy_find_setting(phydev->speed, phydev->duplex);
if ((lp & adv & settings[idx].setting))
goto eee_exit;
@@ -1210,21 +1150,21 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
MDIO_MMD_PCS, phydev->addr);
if (val < 0)
return val;
- data->supported = phy_eee_to_supported(val);
+ data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
/* Get advertisement EEE */
val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
MDIO_MMD_AN, phydev->addr);
if (val < 0)
return val;
- data->advertised = phy_eee_to_adv(val);
+ data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
/* Get LP advertisement EEE */
val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
MDIO_MMD_AN, phydev->addr);
if (val < 0)
return val;
- data->lp_advertised = phy_eee_to_adv(val);
+ data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
return 0;
}
@@ -1241,7 +1181,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
int val;
- val = phy_adv_to_eee(data->advertised);
+ val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
phydev->addr, val);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 5c0557222f20..eb3f5cefeba3 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -94,6 +94,18 @@ struct ppp_file {
#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
/*
+ * Data structure to hold primary network stats for which
+ * we want to use 64 bit storage. Other network stats
+ * are stored in dev->stats of the ppp strucute.
+ */
+struct ppp_link_stats {
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+/*
* Data structure describing one ppp unit.
* A ppp unit corresponds to a ppp network interface device
* and represents a multilink bundle.
@@ -136,6 +148,7 @@ struct ppp {
unsigned pass_len, active_len;
#endif /* CONFIG_PPP_FILTER */
struct net *ppp_net; /* the net we belong to */
+ struct ppp_link_stats stats64; /* 64 bit network stats */
};
/*
@@ -1021,9 +1034,34 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
+struct rtnl_link_stats64*
+ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+{
+ struct ppp *ppp = netdev_priv(dev);
+
+ ppp_recv_lock(ppp);
+ stats64->rx_packets = ppp->stats64.rx_packets;
+ stats64->rx_bytes = ppp->stats64.rx_bytes;
+ ppp_recv_unlock(ppp);
+
+ ppp_xmit_lock(ppp);
+ stats64->tx_packets = ppp->stats64.tx_packets;
+ stats64->tx_bytes = ppp->stats64.tx_bytes;
+ ppp_xmit_unlock(ppp);
+
+ stats64->rx_errors = dev->stats.rx_errors;
+ stats64->tx_errors = dev->stats.tx_errors;
+ stats64->rx_dropped = dev->stats.rx_dropped;
+ stats64->tx_dropped = dev->stats.tx_dropped;
+ stats64->rx_length_errors = dev->stats.rx_length_errors;
+
+ return stats64;
+}
+
static const struct net_device_ops ppp_netdev_ops = {
- .ndo_start_xmit = ppp_start_xmit,
- .ndo_do_ioctl = ppp_net_ioctl,
+ .ndo_start_xmit = ppp_start_xmit,
+ .ndo_do_ioctl = ppp_net_ioctl,
+ .ndo_get_stats64 = ppp_get_stats64,
};
static void ppp_setup(struct net_device *dev)
@@ -1157,8 +1195,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
#endif /* CONFIG_PPP_FILTER */
}
- ++ppp->dev->stats.tx_packets;
- ppp->dev->stats.tx_bytes += skb->len - 2;
+ ++ppp->stats64.tx_packets;
+ ppp->stats64.tx_bytes += skb->len - 2;
switch (proto) {
case PPP_IP:
@@ -1745,8 +1783,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
break;
}
- ++ppp->dev->stats.rx_packets;
- ppp->dev->stats.rx_bytes += skb->len - 2;
+ ++ppp->stats64.rx_packets;
+ ppp->stats64.rx_bytes += skb->len - 2;
npi = proto_to_npindex(proto);
if (npi < 0) {
@@ -2570,12 +2608,12 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
struct slcompress *vj = ppp->vj;
memset(st, 0, sizeof(*st));
- st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
+ st->p.ppp_ipackets = ppp->stats64.rx_packets;
st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
- st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
- st->p.ppp_opackets = ppp->dev->stats.tx_packets;
+ st->p.ppp_ibytes = ppp->stats64.rx_bytes;
+ st->p.ppp_opackets = ppp->stats64.tx_packets;
st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
- st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
+ st->p.ppp_obytes = ppp->stats64.tx_bytes;
if (!vj)
return;
st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 6a7260b03a1e..6b08bd419fba 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -21,7 +21,7 @@ config NET_TEAM_MODE_BROADCAST
---help---
Basic mode where packets are transmitted always by all suitable ports.
- All added ports are setup to have team's mac address.
+ All added ports are setup to have team's device address.
To compile this team mode as a module, choose M here: the module
will be called team_mode_broadcast.
@@ -33,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
Basic mode where port used for transmitting packets is selected in
round-robin fashion using packet counter.
- All added ports are setup to have team's mac address.
+ All added ports are setup to have team's device address.
To compile this team mode as a module, choose M here: the module
will be called team_mode_roundrobin.
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f8cd61f449a4..5c7547c4f802 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -54,29 +54,29 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
}
/*
- * Since the ability to change mac address for open port device is tested in
+ * Since the ability to change device address for open port device is tested in
* team_port_add, this function can be called without control of return value
*/
-static int __set_port_mac(struct net_device *port_dev,
- const unsigned char *dev_addr)
+static int __set_port_dev_addr(struct net_device *port_dev,
+ const unsigned char *dev_addr)
{
struct sockaddr addr;
- memcpy(addr.sa_data, dev_addr, ETH_ALEN);
- addr.sa_family = ARPHRD_ETHER;
+ memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
+ addr.sa_family = port_dev->type;
return dev_set_mac_address(port_dev, &addr);
}
-static int team_port_set_orig_mac(struct team_port *port)
+static int team_port_set_orig_dev_addr(struct team_port *port)
{
- return __set_port_mac(port->dev, port->orig.dev_addr);
+ return __set_port_dev_addr(port->dev, port->orig.dev_addr);
}
-int team_port_set_team_mac(struct team_port *port)
+int team_port_set_team_dev_addr(struct team_port *port)
{
- return __set_port_mac(port->dev, port->team->dev->dev_addr);
+ return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
}
-EXPORT_SYMBOL(team_port_set_team_mac);
+EXPORT_SYMBOL(team_port_set_team_dev_addr);
static void team_refresh_port_linkup(struct team_port *port)
{
@@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
}
+/*************************************
+ * Multiqueue Tx port select override
+ *************************************/
+
+static int team_queue_override_init(struct team *team)
+{
+ struct list_head *listarr;
+ unsigned int queue_cnt = team->dev->num_tx_queues - 1;
+ unsigned int i;
+
+ if (!queue_cnt)
+ return 0;
+ listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
+ if (!listarr)
+ return -ENOMEM;
+ team->qom_lists = listarr;
+ for (i = 0; i < queue_cnt; i++)
+ INIT_LIST_HEAD(listarr++);
+ return 0;
+}
+
+static void team_queue_override_fini(struct team *team)
+{
+ kfree(team->qom_lists);
+}
+
+static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
+{
+ return &team->qom_lists[queue_id - 1];
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct list_head *qom_list;
+ struct team_port *port;
+
+ if (!team->queue_override_enabled || !skb->queue_mapping)
+ return false;
+ qom_list = __team_get_qom_list(team, skb->queue_mapping);
+ list_for_each_entry_rcu(port, qom_list, qom_list) {
+ if (!team_dev_queue_xmit(team, port, skb))
+ return true;
+ }
+ return false;
+}
+
+static void __team_queue_override_port_del(struct team *team,
+ struct team_port *port)
+{
+ list_del_rcu(&port->qom_list);
+ synchronize_rcu();
+ INIT_LIST_HEAD(&port->qom_list);
+}
+
+static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
+ struct team_port *cur)
+{
+ if (port->priority < cur->priority)
+ return true;
+ if (port->priority > cur->priority)
+ return false;
+ if (port->index < cur->index)
+ return true;
+ return false;
+}
+
+static void __team_queue_override_port_add(struct team *team,
+ struct team_port *port)
+{
+ struct team_port *cur;
+ struct list_head *qom_list;
+ struct list_head *node;
+
+ if (!port->queue_id || !team_port_enabled(port))
+ return;
+
+ qom_list = __team_get_qom_list(team, port->queue_id);
+ node = qom_list;
+ list_for_each_entry(cur, qom_list, qom_list) {
+ if (team_queue_override_port_has_gt_prio_than(port, cur))
+ break;
+ node = &cur->qom_list;
+ }
+ list_add_tail_rcu(&port->qom_list, node);
+}
+
+static void __team_queue_override_enabled_check(struct team *team)
+{
+ struct team_port *port;
+ bool enabled = false;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ if (!list_empty(&port->qom_list)) {
+ enabled = true;
+ break;
+ }
+ }
+ if (enabled == team->queue_override_enabled)
+ return;
+ netdev_dbg(team->dev, "%s queue override\n",
+ enabled ? "Enabling" : "Disabling");
+ team->queue_override_enabled = enabled;
+}
+
+static void team_queue_override_port_refresh(struct team *team,
+ struct team_port *port)
+{
+ __team_queue_override_port_del(team, port);
+ __team_queue_override_port_add(team, port);
+ __team_queue_override_enabled_check(team);
+}
+
+
/****************
* Port handling
****************/
@@ -688,6 +804,7 @@ static void team_port_enable(struct team *team,
hlist_add_head_rcu(&port->hlist,
team_port_index_hash(team, port->index));
team_adjust_ops(team);
+ team_queue_override_port_refresh(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
}
@@ -716,6 +833,7 @@ static void team_port_disable(struct team *team,
hlist_del_rcu(&port->hlist);
__reconstruct_port_hlist(team, port->index);
port->index = -1;
+ team_queue_override_port_refresh(team, port);
__team_adjust_ops(team, team->en_port_count - 1);
/*
* Wait until readers see adjusted ops. This ensures that
@@ -849,6 +967,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
#endif
static void __team_port_change_port_added(struct team_port *port, bool linkup);
+static int team_dev_type_check_change(struct net_device *dev,
+ struct net_device *port_dev);
static int team_port_add(struct team *team, struct net_device *port_dev)
{
@@ -857,9 +977,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
char *portname = port_dev->name;
int err;
- if (port_dev->flags & IFF_LOOPBACK ||
- port_dev->type != ARPHRD_ETHER) {
- netdev_err(dev, "Device %s is of an unsupported type\n",
+ if (port_dev->flags & IFF_LOOPBACK) {
+ netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
portname);
return -EINVAL;
}
@@ -870,6 +989,17 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EBUSY;
}
+ if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
+ vlan_uses_dev(dev)) {
+ netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
+ portname);
+ return -EPERM;
+ }
+
+ err = team_dev_type_check_change(dev, port_dev);
+ if (err)
+ return err;
+
if (port_dev->flags & IFF_UP) {
netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
portname);
@@ -883,6 +1013,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
port->dev = port_dev;
port->team = team;
+ INIT_LIST_HEAD(&port->qom_list);
port->orig.mtu = port_dev->mtu;
err = dev_set_mtu(port_dev, dev->mtu);
@@ -891,7 +1022,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_set_mtu;
}
- memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+ memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
err = team_port_enter(team, port);
if (err) {
@@ -972,7 +1103,7 @@ err_vids_add:
err_dev_open:
team_port_leave(team, port);
- team_port_set_orig_mac(port);
+ team_port_set_orig_dev_addr(port);
err_port_enter:
dev_set_mtu(port_dev, port->orig.mtu);
@@ -1010,7 +1141,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
vlan_vids_del_by_dev(port_dev, dev);
dev_close(port_dev);
team_port_leave(team, port);
- team_port_set_orig_mac(port);
+ team_port_set_orig_dev_addr(port);
dev_set_mtu(port_dev, port->orig.mtu);
synchronize_rcu();
kfree(port);
@@ -1095,6 +1226,49 @@ static int team_user_linkup_en_option_set(struct team *team,
return 0;
}
+static int team_priority_option_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.s32_val = port->priority;
+ return 0;
+}
+
+static int team_priority_option_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+
+ port->priority = ctx->data.s32_val;
+ team_queue_override_port_refresh(team, port);
+ return 0;
+}
+
+static int team_queue_id_option_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.u32_val = port->queue_id;
+ return 0;
+}
+
+static int team_queue_id_option_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+
+ if (port->queue_id == ctx->data.u32_val)
+ return 0;
+ if (ctx->data.u32_val >= team->dev->real_num_tx_queues)
+ return -EINVAL;
+ port->queue_id = ctx->data.u32_val;
+ team_queue_override_port_refresh(team, port);
+ return 0;
+}
+
+
static const struct team_option team_options[] = {
{
.name = "mode",
@@ -1123,6 +1297,20 @@ static const struct team_option team_options[] = {
.getter = team_user_linkup_en_option_get,
.setter = team_user_linkup_en_option_set,
},
+ {
+ .name = "priority",
+ .type = TEAM_OPTION_TYPE_S32,
+ .per_port = true,
+ .getter = team_priority_option_get,
+ .setter = team_priority_option_set,
+ },
+ {
+ .name = "queue_id",
+ .type = TEAM_OPTION_TYPE_U32,
+ .per_port = true,
+ .getter = team_queue_id_option_get,
+ .setter = team_queue_id_option_set,
+ },
};
static struct lock_class_key team_netdev_xmit_lock_key;
@@ -1158,6 +1346,9 @@ static int team_init(struct net_device *dev)
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
INIT_HLIST_HEAD(&team->en_port_hlist[i]);
INIT_LIST_HEAD(&team->port_list);
+ err = team_queue_override_init(team);
+ if (err)
+ goto err_team_queue_override_init;
team_adjust_ops(team);
@@ -1173,6 +1364,8 @@ static int team_init(struct net_device *dev)
return 0;
err_options_register:
+ team_queue_override_fini(team);
+err_team_queue_override_init:
free_percpu(team->pcpu_stats);
return err;
@@ -1190,6 +1383,7 @@ static void team_uninit(struct net_device *dev)
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ team_queue_override_fini(team);
mutex_unlock(&team->lock);
}
@@ -1219,10 +1413,12 @@ static int team_close(struct net_device *dev)
static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct team *team = netdev_priv(dev);
- bool tx_success = false;
+ bool tx_success;
unsigned int len = skb->len;
- tx_success = team->ops.transmit(team, skb);
+ tx_success = team_queue_override_transmit(team, skb);
+ if (!tx_success)
+ tx_success = team->ops.transmit(team, skb);
if (tx_success) {
struct team_pcpu_stats *pcpu_stats;
@@ -1296,17 +1492,18 @@ static void team_set_rx_mode(struct net_device *dev)
static int team_set_mac_address(struct net_device *dev, void *p)
{
+ struct sockaddr *addr = p;
struct team *team = netdev_priv(dev);
struct team_port *port;
- int err;
- err = eth_mac_addr(dev, p);
- if (err)
- return err;
+ if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
- if (team->ops.port_change_mac)
- team->ops.port_change_mac(team, port);
+ if (team->ops.port_change_dev_addr)
+ team->ops.port_change_dev_addr(team, port);
rcu_read_unlock();
return 0;
}
@@ -1537,6 +1734,45 @@ static const struct net_device_ops team_netdev_ops = {
* rt netlink interface
***********************/
+static void team_setup_by_port(struct net_device *dev,
+ struct net_device *port_dev)
+{
+ dev->header_ops = port_dev->header_ops;
+ dev->type = port_dev->type;
+ dev->hard_header_len = port_dev->hard_header_len;
+ dev->addr_len = port_dev->addr_len;
+ dev->mtu = port_dev->mtu;
+ memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
+ memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+}
+
+static int team_dev_type_check_change(struct net_device *dev,
+ struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ char *portname = port_dev->name;
+ int err;
+
+ if (dev->type == port_dev->type)
+ return 0;
+ if (!list_empty(&team->port_list)) {
+ netdev_err(dev, "Device %s is of different type\n", portname);
+ return -EBUSY;
+ }
+ err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
+ err = notifier_to_errno(err);
+ if (err) {
+ netdev_err(dev, "Refused to change device type\n");
+ return err;
+ }
+ dev_uc_flush(dev);
+ dev_mc_flush(dev);
+ team_setup_by_port(dev, port_dev);
+ call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
+ return 0;
+}
+
static void team_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -1651,7 +1887,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
if (!msg)
return -ENOMEM;
- hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+ hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
&team_nl_family, 0, TEAM_CMD_NOOP);
if (!hdr) {
err = -EMSGSIZE;
@@ -1660,7 +1896,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
genlmsg_end(msg, hdr);
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
err_msg_put:
nlmsg_free(msg);
@@ -1717,7 +1953,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
if (err < 0)
goto err_fill;
- err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+ err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
return err;
err_fill:
@@ -1726,11 +1962,11 @@ err_fill:
}
typedef int team_nl_send_func_t(struct sk_buff *skb,
- struct team *team, u32 pid);
+ struct team *team, u32 portid);
-static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
+static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
{
- return genlmsg_unicast(dev_net(team->dev), skb, pid);
+ return genlmsg_unicast(dev_net(team->dev), skb, portid);
}
static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
@@ -1790,6 +2026,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
goto nest_cancel;
break;
+ case TEAM_OPTION_TYPE_S32:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
+ goto nest_cancel;
+ if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
+ goto nest_cancel;
+ break;
default:
BUG();
}
@@ -1809,13 +2051,13 @@ nest_cancel:
}
static int __send_and_alloc_skb(struct sk_buff **pskb,
- struct team *team, u32 pid,
+ struct team *team, u32 portid,
team_nl_send_func_t *send_func)
{
int err;
if (*pskb) {
- err = send_func(*pskb, team, pid);
+ err = send_func(*pskb, team, portid);
if (err)
return err;
}
@@ -1825,7 +2067,7 @@ static int __send_and_alloc_skb(struct sk_buff **pskb,
return 0;
}
-static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
+static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
int flags, team_nl_send_func_t *send_func,
struct list_head *sel_opt_inst_list)
{
@@ -1842,11 +2084,11 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
struct team_option_inst, tmp_list);
start_again:
- err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
return err;
- hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
+ hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
if (!hdr)
return -EMSGSIZE;
@@ -1879,15 +2121,15 @@ start_again:
goto start_again;
send_done:
- nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
if (!nlh) {
- err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ err = __send_and_alloc_skb(&skb, team, portid, send_func);
if (err)
goto errout;
goto send_done;
}
- return send_func(skb, team, pid);
+ return send_func(skb, team, portid);
nla_put_failure:
err = -EMSGSIZE;
@@ -1910,7 +2152,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
list_for_each_entry(opt_inst, &team->option_inst_list, list)
list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
- err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
+ err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
NLM_F_ACK, team_nl_send_unicast,
&sel_opt_inst_list);
@@ -1978,6 +2220,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
case NLA_FLAG:
opt_type = TEAM_OPTION_TYPE_BOOL;
break;
+ case NLA_S32:
+ opt_type = TEAM_OPTION_TYPE_S32;
+ break;
default:
goto team_put;
}
@@ -2034,6 +2279,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
case TEAM_OPTION_TYPE_BOOL:
ctx.data.bool_val = attr_data ? true : false;
break;
+ case TEAM_OPTION_TYPE_S32:
+ ctx.data.s32_val = nla_get_s32(attr_data);
+ break;
default:
BUG();
}
@@ -2058,7 +2306,7 @@ team_put:
}
static int team_nl_fill_port_list_get(struct sk_buff *skb,
- u32 pid, u32 seq, int flags,
+ u32 portid, u32 seq, int flags,
struct team *team,
bool fillall)
{
@@ -2066,7 +2314,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
void *hdr;
struct team_port *port;
- hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags,
TEAM_CMD_PORT_LIST_GET);
if (!hdr)
return -EMSGSIZE;
@@ -2115,7 +2363,7 @@ static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
struct genl_info *info, int flags,
struct team *team)
{
- return team_nl_fill_port_list_get(skb, info->snd_pid,
+ return team_nl_fill_port_list_get(skb, info->snd_portid,
info->snd_seq, NLM_F_ACK,
team, true);
}
@@ -2168,7 +2416,7 @@ static struct genl_multicast_group team_change_event_mcgrp = {
};
static int team_nl_send_multicast(struct sk_buff *skb,
- struct team *team, u32 pid)
+ struct team *team, u32 portid)
{
return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
team_change_event_mcgrp.id, GFP_KERNEL);
@@ -2246,7 +2494,7 @@ static void __team_options_change_check(struct team *team)
list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
}
err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
- if (err)
+ if (err && err != -ESRCH)
netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
err);
}
@@ -2275,9 +2523,9 @@ static void __team_port_change_send(struct team_port *port, bool linkup)
send_event:
err = team_nl_send_event_port_list_get(port->team);
- if (err)
- netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
- port->dev->name);
+ if (err && err != -ESRCH)
+ netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
+ port->dev->name, err);
}
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index c96e4d2967f0..9db0171e9366 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -48,18 +48,18 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
static int bc_port_enter(struct team *team, struct team_port *port)
{
- return team_port_set_team_mac(port);
+ return team_port_set_team_dev_addr(port);
}
-static void bc_port_change_mac(struct team *team, struct team_port *port)
+static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
{
- team_port_set_team_mac(port);
+ team_port_set_team_dev_addr(port);
}
static const struct team_mode_ops bc_mode_ops = {
.transmit = bc_transmit,
.port_enter = bc_port_enter,
- .port_change_mac = bc_port_change_mac,
+ .port_change_dev_addr = bc_port_change_dev_addr,
};
static const struct team_mode bc_mode = {
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index ad7ed0ec544c..105135aa8f05 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -66,18 +66,18 @@ drop:
static int rr_port_enter(struct team *team, struct team_port *port)
{
- return team_port_set_team_mac(port);
+ return team_port_set_team_dev_addr(port);
}
-static void rr_port_change_mac(struct team *team, struct team_port *port)
+static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
{
- team_port_set_team_mac(port);
+ team_port_set_team_dev_addr(port);
}
static const struct team_mode_ops rr_mode_ops = {
.transmit = rr_transmit,
.port_enter = rr_port_enter,
- .port_change_mac = rr_port_change_mac,
+ .port_change_dev_addr = rr_port_change_dev_addr,
};
static const struct team_mode rr_mode = {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3a16d4fdaa05..0873cdcf39be 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -68,6 +68,7 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
+#include <net/cls_cgroup.h>
#include <asm/uaccess.h>
@@ -120,8 +121,8 @@ struct tun_sock;
struct tun_struct {
struct tun_file *tfile;
unsigned int flags;
- uid_t owner;
- gid_t group;
+ kuid_t owner;
+ kgid_t group;
struct net_device *dev;
netdev_features_t set_features;
@@ -1031,8 +1032,8 @@ static void tun_setup(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
- tun->owner = -1;
- tun->group = -1;
+ tun->owner = INVALID_UID;
+ tun->group = INVALID_GID;
dev->ethtool_ops = &tun_ethtool_ops;
dev->destructor = tun_free_netdev;
@@ -1155,14 +1156,20 @@ static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
- return sprintf(buf, "%d\n", tun->owner);
+ return uid_valid(tun->owner)?
+ sprintf(buf, "%u\n",
+ from_kuid_munged(current_user_ns(), tun->owner)):
+ sprintf(buf, "-1\n");
}
static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
- return sprintf(buf, "%d\n", tun->group);
+ return gid_valid(tun->group) ?
+ sprintf(buf, "%u\n",
+ from_kgid_munged(current_user_ns(), tun->group)):
+ sprintf(buf, "-1\n");
}
static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
@@ -1189,8 +1196,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else
return -EINVAL;
- if (((tun->owner != -1 && cred->euid != tun->owner) ||
- (tun->group != -1 && !in_egroup_p(tun->group))) &&
+ if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+ (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
!capable(CAP_NET_ADMIN))
return -EPERM;
err = security_tun_dev_attach(tun->socket.sk);
@@ -1374,6 +1381,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
void __user* argp = (void __user*)arg;
struct sock_fprog fprog;
struct ifreq ifr;
+ kuid_t owner;
+ kgid_t group;
int sndbuf;
int vnet_hdr_sz;
int ret;
@@ -1447,16 +1456,26 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case TUNSETOWNER:
/* Set owner of the device */
- tun->owner = (uid_t) arg;
-
- tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
+ owner = make_kuid(current_user_ns(), arg);
+ if (!uid_valid(owner)) {
+ ret = -EINVAL;
+ break;
+ }
+ tun->owner = owner;
+ tun_debug(KERN_INFO, tun, "owner set to %d\n",
+ from_kuid(&init_user_ns, tun->owner));
break;
case TUNSETGROUP:
/* Set group of the device */
- tun->group= (gid_t) arg;
-
- tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
+ group = make_kgid(current_user_ns(), arg);
+ if (!gid_valid(group)) {
+ ret = -EINVAL;
+ break;
+ }
+ tun->group = group;
+ tun_debug(KERN_INFO, tun, "group set to %d\n",
+ from_kgid(&init_user_ns, tun->group));
break;
case TUNSETLINK:
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 32e31c5c5dc6..33ab824773c5 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -221,7 +221,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
/* Get the MAC address */
ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
if (ret < 0) {
- dbg("read AX_CMD_READ_NODE_ID failed: %d", ret);
+ netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n",
+ ret);
goto out;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -303,7 +304,7 @@ static int ax88772_reset(struct usbnet *dev)
ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
if (ret < 0) {
- dbg("Select PHY #1 failed: %d", ret);
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
goto out;
}
@@ -331,13 +332,13 @@ static int ax88772_reset(struct usbnet *dev)
msleep(150);
rx_ctl = asix_read_rx_ctl(dev);
- dbg("RX_CTL is 0x%04x after software reset", rx_ctl);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
ret = asix_write_rx_ctl(dev, 0x0000);
if (ret < 0)
goto out;
rx_ctl = asix_read_rx_ctl(dev);
- dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
ret = asix_sw_reset(dev, AX_SWRESET_PRL);
if (ret < 0)
@@ -364,7 +365,7 @@ static int ax88772_reset(struct usbnet *dev)
AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
AX88772_IPG2_DEFAULT, 0, NULL);
if (ret < 0) {
- dbg("Write IPG,IPG1,IPG2 failed: %d", ret);
+ netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
goto out;
}
@@ -381,10 +382,13 @@ static int ax88772_reset(struct usbnet *dev)
goto out;
rx_ctl = asix_read_rx_ctl(dev);
- dbg("RX_CTL is 0x%04x after all initializations", rx_ctl);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+ rx_ctl);
rx_ctl = asix_read_medium_status(dev);
- dbg("Medium Status is 0x%04x after all initializations", rx_ctl);
+ netdev_dbg(dev->net,
+ "Medium Status is 0x%04x after all initializations\n",
+ rx_ctl);
return 0;
@@ -416,7 +420,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
if (ret < 0) {
- dbg("Failed to read MAC address: %d", ret);
+ netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -439,7 +443,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
/* Reset the PHY to normal operation mode */
ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
if (ret < 0) {
- dbg("Select PHY #1 failed: %d", ret);
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
return ret;
}
@@ -459,7 +463,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
/* Read PHYID register *AFTER* the PHY was reset properly */
phyid = asix_get_phyid(dev);
- dbg("PHYID=0x%08x", phyid);
+ netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
@@ -575,13 +579,13 @@ static int ax88178_reset(struct usbnet *dev)
u32 phyid;
asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
- dbg("GPIO Status: 0x%04x", status);
+ netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status);
asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
- dbg("EEPROM index 0x17 is 0x%04x", eeprom);
+ netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom);
if (eeprom == cpu_to_le16(0xffff)) {
data->phymode = PHY_MODE_MARVELL;
@@ -592,7 +596,7 @@ static int ax88178_reset(struct usbnet *dev)
data->ledmode = le16_to_cpu(eeprom) >> 8;
gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
}
- dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
+ netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode);
/* Power up external GigaPHY through AX88178 GPIO pin */
asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
@@ -601,14 +605,14 @@ static int ax88178_reset(struct usbnet *dev)
asix_write_gpio(dev, 0x001c, 300);
asix_write_gpio(dev, 0x003c, 30);
} else {
- dbg("gpio phymode == 1 path");
+ netdev_dbg(dev->net, "gpio phymode == 1 path\n");
asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
}
/* Read PHYID register *AFTER* powering up PHY */
phyid = asix_get_phyid(dev);
- dbg("PHYID=0x%08x", phyid);
+ netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
/* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
@@ -770,7 +774,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
if (ret < 0) {
- dbg("Failed to read MAC address: %d", ret);
+ netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
@@ -930,6 +934,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x04f1, 0x3008),
.driver_info = (unsigned long) &ax8817x_info,
}, {
+ // Lenovo U2L100P 10/100
+ USB_DEVICE (0x17ef, 0x7203),
+ .driver_info = (unsigned long) &ax88772_info,
+}, {
// ASIX AX88772B 10/100
USB_DEVICE (0x0b95, 0x772b),
.driver_info = (unsigned long) &ax88772_info,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 26c5bebd9eca..18d9579123ea 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -236,7 +236,8 @@ static void catc_rx_done(struct urb *urb)
}
if (status) {
- dbg("rx_done, status %d, length %d", status, urb->actual_length);
+ dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n",
+ status, urb->actual_length);
return;
}
@@ -275,10 +276,11 @@ static void catc_rx_done(struct urb *urb)
if (atomic_read(&catc->recq_sz)) {
int state;
atomic_dec(&catc->recq_sz);
- dbg("getting extra packet");
+ netdev_dbg(catc->netdev, "getting extra packet\n");
urb->dev = catc->usbdev;
if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
- dbg("submit(rx_urb) status %d", state);
+ netdev_dbg(catc->netdev,
+ "submit(rx_urb) status %d\n", state);
}
} else {
clear_bit(RX_RUNNING, &catc->flags);
@@ -317,18 +319,20 @@ static void catc_irq_done(struct urb *urb)
return;
/* -EPIPE: should clear the halt */
default: /* error */
- dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]);
+ dev_dbg(&urb->dev->dev,
+ "irq_done, status %d, data %02x %02x.\n",
+ status, data[0], data[1]);
goto resubmit;
}
if (linksts == LinkGood) {
netif_carrier_on(catc->netdev);
- dbg("link ok");
+ netdev_dbg(catc->netdev, "link ok\n");
}
if (linksts == LinkBad) {
netif_carrier_off(catc->netdev);
- dbg("link bad");
+ netdev_dbg(catc->netdev, "link bad\n");
}
if (hasdata) {
@@ -385,7 +389,7 @@ static void catc_tx_done(struct urb *urb)
int r, status = urb->status;
if (status == -ECONNRESET) {
- dbg("Tx Reset.");
+ dev_dbg(&urb->dev->dev, "Tx Reset.\n");
urb->status = 0;
catc->netdev->trans_start = jiffies;
catc->netdev->stats.tx_errors++;
@@ -395,7 +399,8 @@ static void catc_tx_done(struct urb *urb)
}
if (status) {
- dbg("tx_done, status %d, length %d", status, urb->actual_length);
+ dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n",
+ status, urb->actual_length);
return;
}
@@ -511,7 +516,8 @@ static void catc_ctrl_done(struct urb *urb)
int status = urb->status;
if (status)
- dbg("ctrl_done, status %d, len %d.", status, urb->actual_length);
+ dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n",
+ status, urb->actual_length);
spin_lock_irqsave(&catc->ctrl_lock, flags);
@@ -667,7 +673,9 @@ static void catc_set_multicast_list(struct net_device *netdev)
f5u011_mchash_async(catc, catc->multicast);
if (catc->rxmode[0] != rx) {
catc->rxmode[0] = rx;
- dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]);
+ netdev_dbg(catc->netdev,
+ "Setting RX mode to %2.2X %2.2X\n",
+ catc->rxmode[0], catc->rxmode[1]);
f5u011_rxmode_async(catc, catc->rxmode);
}
}
@@ -766,6 +774,7 @@ static const struct net_device_ops catc_netdev_ops = {
static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
+ struct device *dev = &intf->dev;
struct usb_device *usbdev = interface_to_usbdev(intf);
struct net_device *netdev;
struct catc *catc;
@@ -774,7 +783,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
if (usb_set_interface(usbdev,
intf->altsetting->desc.bInterfaceNumber, 1)) {
- dev_err(&intf->dev, "Can't set altsetting 1.\n");
+ dev_err(dev, "Can't set altsetting 1.\n");
return -EIO;
}
@@ -817,7 +826,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
- dbg("Testing for f5u011");
+ dev_dbg(dev, "Testing for f5u011\n");
catc->is_f5u011 = 1;
atomic_set(&catc->recq_sz, 0);
pktsz = RX_PKT_SZ;
@@ -838,7 +847,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
catc->irq_buf, 2, catc_irq_done, catc, 1);
if (!catc->is_f5u011) {
- dbg("Checking memory size\n");
+ dev_dbg(dev, "Checking memory size\n");
i = 0x12345678;
catc_write_mem(catc, 0x7a80, &i, 4);
@@ -850,7 +859,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
catc_set_reg(catc, RxBufCount, 32);
- dbg("64k Memory\n");
+ dev_dbg(dev, "64k Memory\n");
break;
default:
dev_warn(&intf->dev,
@@ -858,49 +867,49 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
case 0x87654321:
catc_set_reg(catc, TxBufCount, 4);
catc_set_reg(catc, RxBufCount, 16);
- dbg("32k Memory\n");
+ dev_dbg(dev, "32k Memory\n");
break;
}
- dbg("Getting MAC from SEEROM.");
+ dev_dbg(dev, "Getting MAC from SEEROM.\n");
catc_get_mac(catc, netdev->dev_addr);
- dbg("Setting MAC into registers.");
+ dev_dbg(dev, "Setting MAC into registers.\n");
for (i = 0; i < 6; i++)
catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
- dbg("Filling the multicast list.");
+ dev_dbg(dev, "Filling the multicast list.\n");
memset(broadcast, 0xff, 6);
catc_multicast(broadcast, catc->multicast);
catc_multicast(netdev->dev_addr, catc->multicast);
catc_write_mem(catc, 0xfa80, catc->multicast, 64);
- dbg("Clearing error counters.");
+ dev_dbg(dev, "Clearing error counters.\n");
for (i = 0; i < 8; i++)
catc_set_reg(catc, EthStats + i, 0);
catc->last_stats = jiffies;
- dbg("Enabling.");
+ dev_dbg(dev, "Enabling.\n");
catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
catc_set_reg(catc, LEDCtrl, LEDLink);
catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
} else {
- dbg("Performing reset\n");
+ dev_dbg(dev, "Performing reset\n");
catc_reset(catc);
catc_get_mac(catc, netdev->dev_addr);
- dbg("Setting RX Mode");
+ dev_dbg(dev, "Setting RX Mode\n");
catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
catc->rxmode[1] = 0;
f5u011_rxmode(catc, catc->rxmode);
}
- dbg("Init done.");
+ dev_dbg(dev, "Init done.\n");
printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 49ab45e17fe8..1e207f086b75 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -302,18 +302,9 @@ static const struct driver_info cx82310_info = {
.tx_fixup = cx82310_tx_fixup,
};
-#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
- USB_DEVICE_ID_MATCH_DEV_INFO, \
- .idVendor = (vend), \
- .idProduct = (prod), \
- .bDeviceClass = (cl), \
- .bDeviceSubClass = (sc), \
- .bDeviceProtocol = (pr)
-
static const struct usb_device_id products[] = {
{
- USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
+ USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
.driver_info = (unsigned long) &cx82310_info
},
{ },
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index db3c8021f2a3..a7e3f4e55bf3 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -91,7 +91,9 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
// get the packet count of the received skb
count = le32_to_cpu(header->packet_count);
if (count > GL_MAX_TRANSMIT_PACKETS) {
- dbg("genelink: invalid received packet count %u", count);
+ netdev_dbg(dev->net,
+ "genelink: invalid received packet count %u\n",
+ count);
return 0;
}
@@ -107,7 +109,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
// this may be a broken packet
if (size > GL_MAX_PACKET_LEN) {
- dbg("genelink: invalid rx length %d", size);
+ netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
+ size);
return 0;
}
@@ -133,7 +136,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, 4);
if (skb->len > GL_MAX_PACKET_LEN) {
- dbg("genelink: invalid rx length %d", skb->len);
+ netdev_dbg(dev->net, "genelink: invalid rx length %d\n",
+ skb->len);
return 0;
}
return 1;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 62f30b46fa42..605a4baa9b7b 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1107,7 +1107,6 @@ static void _hso_serial_set_termios(struct tty_struct *tty,
struct ktermios *old)
{
struct hso_serial *serial = tty->driver_data;
- struct ktermios *termios;
if (!serial) {
printk(KERN_ERR "%s: no tty structures", __func__);
@@ -1119,16 +1118,15 @@ static void _hso_serial_set_termios(struct tty_struct *tty,
/*
* Fix up unsupported bits
*/
- termios = tty->termios;
- termios->c_iflag &= ~IXON; /* disable enable XON/XOFF flow control */
+ tty->termios.c_iflag &= ~IXON; /* disable enable XON/XOFF flow control */
- termios->c_cflag &=
+ tty->termios.c_cflag &=
~(CSIZE /* no size */
| PARENB /* disable parity bit */
| CBAUD /* clear current baud rate */
| CBAUDEX); /* clear current buad rate */
- termios->c_cflag |= CS8; /* character size 8 bits */
+ tty->termios.c_cflag |= CS8; /* character size 8 bits */
/* baud rate 115200 */
tty_encode_baud_rate(tty, 115200, 115200);
@@ -1425,14 +1423,14 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
if (old)
D5("Termios called with: cflags new[%d] - old[%d]",
- tty->termios->c_cflag, old->c_cflag);
+ tty->termios.c_cflag, old->c_cflag);
/* the actual setup */
spin_lock_irqsave(&serial->serial_lock, flags);
if (serial->port.count)
_hso_serial_set_termios(tty, old);
else
- tty->termios = old;
+ tty->termios = *old;
spin_unlock_irqrestore(&serial->serial_lock, flags);
/* done */
@@ -2289,9 +2287,11 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
if (minor < 0)
goto exit;
+ tty_port_init(&serial->port);
+
/* register our minor number */
- serial->parent->dev = tty_register_device(tty_drv, minor,
- &serial->parent->interface->dev);
+ serial->parent->dev = tty_port_register_device(&serial->port, tty_drv,
+ minor, &serial->parent->interface->dev);
dev = serial->parent->dev;
dev_set_drvdata(dev, serial->parent);
i = device_create_file(dev, &dev_attr_hsotype);
@@ -2300,7 +2300,6 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->minor = minor;
serial->magic = HSO_SERIAL_MAGIC;
spin_lock_init(&serial->serial_lock);
- tty_port_init(&serial->port);
serial->num_rx_urbs = num_urbs;
/* RX, allocate urb and initialize */
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index c3d03490c97d..c75e11e1b385 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -267,19 +267,16 @@ static int kaweth_control(struct kaweth_device *kaweth,
struct usb_ctrlrequest *dr;
int retval;
- dbg("kaweth_control()");
+ netdev_dbg(kaweth->net, "kaweth_control()\n");
if(in_interrupt()) {
- dbg("in_interrupt()");
+ netdev_dbg(kaweth->net, "in_interrupt()\n");
return -EBUSY;
}
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
-
- if (!dr) {
- dbg("kmalloc() failed");
+ if (!dr)
return -ENOMEM;
- }
dr->bRequestType = requesttype;
dr->bRequest = request;
@@ -305,7 +302,7 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth)
{
int retval;
- dbg("Reading kaweth configuration");
+ netdev_dbg(kaweth->net, "Reading kaweth configuration\n");
retval = kaweth_control(kaweth,
usb_rcvctrlpipe(kaweth->dev, 0),
@@ -327,7 +324,7 @@ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
{
int retval;
- dbg("Setting URB size to %d", (unsigned)urb_size);
+ netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size);
retval = kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
@@ -349,7 +346,7 @@ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
{
int retval;
- dbg("Set SOFS wait to %d", (unsigned)sofs_wait);
+ netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait);
retval = kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
@@ -372,7 +369,8 @@ static int kaweth_set_receive_filter(struct kaweth_device *kaweth,
{
int retval;
- dbg("Set receive filter to %d", (unsigned)receive_filter);
+ netdev_dbg(kaweth->net, "Set receive filter to %d\n",
+ (unsigned)receive_filter);
retval = kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
@@ -421,12 +419,13 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
kaweth->firmware_buf[4] = type;
kaweth->firmware_buf[5] = interrupt;
- dbg("High: %i, Low:%i", kaweth->firmware_buf[3],
+ netdev_dbg(kaweth->net, "High: %i, Low:%i\n", kaweth->firmware_buf[3],
kaweth->firmware_buf[2]);
- dbg("Downloading firmware at %p to kaweth device at %p",
- fw->data, kaweth);
- dbg("Firmware length: %d", data_len);
+ netdev_dbg(kaweth->net,
+ "Downloading firmware at %p to kaweth device at %p\n",
+ fw->data, kaweth);
+ netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
return kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
@@ -454,7 +453,7 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
kaweth->firmware_buf[6] = 0x00;
kaweth->firmware_buf[7] = 0x00;
- dbg("Triggering firmware");
+ netdev_dbg(kaweth->net, "Triggering firmware\n");
return kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
@@ -474,11 +473,11 @@ static int kaweth_reset(struct kaweth_device *kaweth)
{
int result;
- dbg("kaweth_reset(%p)", kaweth);
+ netdev_dbg(kaweth->net, "kaweth_reset(%p)\n", kaweth);
result = usb_reset_configuration(kaweth->dev);
mdelay(10);
- dbg("kaweth_reset() returns %d.",result);
+ netdev_dbg(kaweth->net, "kaweth_reset() returns %d.\n", result);
return result;
}
@@ -595,6 +594,7 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth);
****************************************************************/
static void kaweth_usb_receive(struct urb *urb)
{
+ struct device *dev = &urb->dev->dev;
struct kaweth_device *kaweth = urb->context;
struct net_device *net = kaweth->net;
int status = urb->status;
@@ -610,25 +610,25 @@ static void kaweth_usb_receive(struct urb *urb)
kaweth->stats.rx_errors++;
kaweth->end = 1;
wake_up(&kaweth->term_wait);
- dbg("Status was -EPIPE.");
+ dev_dbg(dev, "Status was -EPIPE.\n");
return;
}
if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) {
/* we are killed - set a flag and wake the disconnect handler */
kaweth->end = 1;
wake_up(&kaweth->term_wait);
- dbg("Status was -ECONNRESET or -ESHUTDOWN.");
+ dev_dbg(dev, "Status was -ECONNRESET or -ESHUTDOWN.\n");
return;
}
if (unlikely(status == -EPROTO || status == -ETIME ||
status == -EILSEQ)) {
kaweth->stats.rx_errors++;
- dbg("Status was -EPROTO, -ETIME, or -EILSEQ.");
+ dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n");
return;
}
if (unlikely(status == -EOVERFLOW)) {
kaweth->stats.rx_errors++;
- dbg("Status was -EOVERFLOW.");
+ dev_dbg(dev, "Status was -EOVERFLOW.\n");
}
spin_lock(&kaweth->device_lock);
if (IS_BLOCKED(kaweth->status)) {
@@ -687,7 +687,7 @@ static int kaweth_open(struct net_device *net)
struct kaweth_device *kaweth = netdev_priv(net);
int res;
- dbg("Opening network device.");
+ netdev_dbg(kaweth->net, "Opening network device.\n");
res = usb_autopm_get_interface(kaweth->intf);
if (res) {
@@ -787,7 +787,8 @@ static void kaweth_usb_transmit_complete(struct urb *urb)
if (unlikely(status != 0))
if (status != -ENOENT)
- dbg("%s: TX status %d.", kaweth->net->name, status);
+ dev_dbg(&urb->dev->dev, "%s: TX status %d.\n",
+ kaweth->net->name, status);
netif_wake_queue(kaweth->net);
dev_kfree_skb_irq(skb);
@@ -871,7 +872,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
KAWETH_PACKET_FILTER_BROADCAST |
KAWETH_PACKET_FILTER_MULTICAST;
- dbg("Setting Rx mode to %d", packet_filter_bitmap);
+ netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap);
netif_stop_queue(net);
@@ -916,7 +917,8 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
result);
}
else {
- dbg("Set Rx mode to %d", packet_filter_bitmap);
+ netdev_dbg(kaweth->net, "Set Rx mode to %d\n",
+ packet_filter_bitmap);
}
}
@@ -951,7 +953,7 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
struct kaweth_device *kaweth = usb_get_intfdata(intf);
unsigned long flags;
- dbg("Suspending device");
+ dev_dbg(&intf->dev, "Suspending device\n");
spin_lock_irqsave(&kaweth->device_lock, flags);
kaweth->status |= KAWETH_STATUS_SUSPENDING;
spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -968,7 +970,7 @@ static int kaweth_resume(struct usb_interface *intf)
struct kaweth_device *kaweth = usb_get_intfdata(intf);
unsigned long flags;
- dbg("Resuming device");
+ dev_dbg(&intf->dev, "Resuming device\n");
spin_lock_irqsave(&kaweth->device_lock, flags);
kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -1003,36 +1005,37 @@ static int kaweth_probe(
const struct usb_device_id *id /* from id_table */
)
{
- struct usb_device *dev = interface_to_usbdev(intf);
+ struct device *dev = &intf->dev;
+ struct usb_device *udev = interface_to_usbdev(intf);
struct kaweth_device *kaweth;
struct net_device *netdev;
const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
int result = 0;
- dbg("Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x",
- dev->devnum,
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct),
- le16_to_cpu(dev->descriptor.bcdDevice));
+ dev_dbg(dev,
+ "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
+ udev->devnum, le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct),
+ le16_to_cpu(udev->descriptor.bcdDevice));
- dbg("Device at %p", dev);
+ dev_dbg(dev, "Device at %p\n", udev);
- dbg("Descriptor length: %x type: %x",
- (int)dev->descriptor.bLength,
- (int)dev->descriptor.bDescriptorType);
+ dev_dbg(dev, "Descriptor length: %x type: %x\n",
+ (int)udev->descriptor.bLength,
+ (int)udev->descriptor.bDescriptorType);
netdev = alloc_etherdev(sizeof(*kaweth));
if (!netdev)
return -ENOMEM;
kaweth = netdev_priv(netdev);
- kaweth->dev = dev;
+ kaweth->dev = udev;
kaweth->net = netdev;
spin_lock_init(&kaweth->device_lock);
init_waitqueue_head(&kaweth->term_wait);
- dbg("Resetting.");
+ dev_dbg(dev, "Resetting.\n");
kaweth_reset(kaweth);
@@ -1041,17 +1044,17 @@ static int kaweth_probe(
* downloaded. Don't try to do it again, or we'll hang the device.
*/
- if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) {
- dev_info(&intf->dev, "Firmware present in device.\n");
+ if (le16_to_cpu(udev->descriptor.bcdDevice) >> 8) {
+ dev_info(dev, "Firmware present in device.\n");
} else {
/* Download the firmware */
- dev_info(&intf->dev, "Downloading firmware...\n");
+ dev_info(dev, "Downloading firmware...\n");
kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
if ((result = kaweth_download_firmware(kaweth,
"kaweth/new_code.bin",
100,
2)) < 0) {
- dev_err(&intf->dev, "Error downloading firmware (%d)\n",
+ dev_err(dev, "Error downloading firmware (%d)\n",
result);
goto err_fw;
}
@@ -1060,8 +1063,7 @@ static int kaweth_probe(
"kaweth/new_code_fix.bin",
100,
3)) < 0) {
- dev_err(&intf->dev,
- "Error downloading firmware fix (%d)\n",
+ dev_err(dev, "Error downloading firmware fix (%d)\n",
result);
goto err_fw;
}
@@ -1070,8 +1072,7 @@ static int kaweth_probe(
"kaweth/trigger_code.bin",
126,
2)) < 0) {
- dev_err(&intf->dev,
- "Error downloading trigger code (%d)\n",
+ dev_err(dev, "Error downloading trigger code (%d)\n",
result);
goto err_fw;
@@ -1081,19 +1082,18 @@ static int kaweth_probe(
"kaweth/trigger_code_fix.bin",
126,
3)) < 0) {
- dev_err(&intf->dev, "Error downloading trigger code fix (%d)\n", result);
+ dev_err(dev, "Error downloading trigger code fix (%d)\n", result);
goto err_fw;
}
if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) {
- dev_err(&intf->dev, "Error triggering firmware (%d)\n",
- result);
+ dev_err(dev, "Error triggering firmware (%d)\n", result);
goto err_fw;
}
/* Device will now disappear for a moment... */
- dev_info(&intf->dev, "Firmware loaded. I'll be back...\n");
+ dev_info(dev, "Firmware loaded. I'll be back...\n");
err_fw:
free_page((unsigned long)kaweth->firmware_buf);
free_netdev(netdev);
@@ -1103,29 +1103,29 @@ err_fw:
result = kaweth_read_configuration(kaweth);
if(result < 0) {
- dev_err(&intf->dev, "Error reading configuration (%d), no net device created\n", result);
+ dev_err(dev, "Error reading configuration (%d), no net device created\n", result);
goto err_free_netdev;
}
- dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
- dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
- dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
- dev_info(&intf->dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr);
+ dev_info(dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask);
+ dev_info(dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1));
+ dev_info(dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size));
+ dev_info(dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr);
if(!memcmp(&kaweth->configuration.hw_addr,
&bcast_addr,
sizeof(bcast_addr))) {
- dev_err(&intf->dev, "Firmware not functioning properly, no net device created\n");
+ dev_err(dev, "Firmware not functioning properly, no net device created\n");
goto err_free_netdev;
}
if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) {
- dbg("Error setting URB size");
+ dev_dbg(dev, "Error setting URB size\n");
goto err_free_netdev;
}
if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) {
- dev_err(&intf->dev, "Error setting SOFS wait\n");
+ dev_err(dev, "Error setting SOFS wait\n");
goto err_free_netdev;
}
@@ -1135,11 +1135,11 @@ err_fw:
KAWETH_PACKET_FILTER_MULTICAST);
if(result < 0) {
- dev_err(&intf->dev, "Error setting receive filter\n");
+ dev_err(dev, "Error setting receive filter\n");
goto err_free_netdev;
}
- dbg("Initializing net device.");
+ dev_dbg(dev, "Initializing net device.\n");
kaweth->intf = intf;
@@ -1181,20 +1181,20 @@ err_fw:
#if 0
// dma_supported() is deeply broken on almost all architectures
- if (dma_supported (&intf->dev, 0xffffffffffffffffULL))
+ if (dma_supported (dev, 0xffffffffffffffffULL))
kaweth->net->features |= NETIF_F_HIGHDMA;
#endif
- SET_NETDEV_DEV(netdev, &intf->dev);
+ SET_NETDEV_DEV(netdev, dev);
if (register_netdev(netdev) != 0) {
- dev_err(&intf->dev, "Error registering netdev.\n");
+ dev_err(dev, "Error registering netdev.\n");
goto err_intfdata;
}
- dev_info(&intf->dev, "kaweth interface created at %s\n",
+ dev_info(dev, "kaweth interface created at %s\n",
kaweth->net->name);
- dbg("Kaweth probe returning.");
+ dev_dbg(dev, "Kaweth probe returning.\n");
return 0;
@@ -1232,7 +1232,7 @@ static void kaweth_disconnect(struct usb_interface *intf)
}
netdev = kaweth->net;
- dbg("Unregistering net device");
+ netdev_dbg(kaweth->net, "Unregistering net device\n");
unregister_netdev(netdev);
usb_free_urb(kaweth->rx_urb);
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 28c4d513ba85..c062a3e8295c 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -155,12 +155,10 @@ static void nc_dump_registers(struct usbnet *dev)
u8 reg;
u16 *vp = kmalloc(sizeof (u16));
- if (!vp) {
- dbg("no memory?");
+ if (!vp)
return;
- }
- dbg("%s registers:", dev->net->name);
+ netdev_dbg(dev->net, "registers:\n");
for (reg = 0; reg < 0x20; reg++) {
int retval;
@@ -172,11 +170,10 @@ static void nc_dump_registers(struct usbnet *dev)
retval = nc_register_read(dev, reg, vp);
if (retval < 0)
- dbg("%s reg [0x%x] ==> error %d",
- dev->net->name, reg, retval);
+ netdev_dbg(dev->net, "reg [0x%x] ==> error %d\n",
+ reg, retval);
else
- dbg("%s reg [0x%x] = 0x%x",
- dev->net->name, reg, *vp);
+ netdev_dbg(dev->net, "reg [0x%x] = 0x%x\n", reg, *vp);
}
kfree(vp);
}
@@ -300,15 +297,15 @@ static int net1080_reset(struct usbnet *dev)
// nc_dump_registers(dev);
if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
- dbg("can't read %s-%s status: %d",
- dev->udev->bus->bus_name, dev->udev->devpath, retval);
+ netdev_dbg(dev->net, "can't read %s-%s status: %d\n",
+ dev->udev->bus->bus_name, dev->udev->devpath, retval);
goto done;
}
status = *vp;
nc_dump_status(dev, status);
if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
- dbg("can't read USBCTL, %d", retval);
+ netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval);
goto done;
}
usbctl = *vp;
@@ -318,7 +315,7 @@ static int net1080_reset(struct usbnet *dev)
USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
- dbg("can't read TTL, %d", retval);
+ netdev_dbg(dev->net, "can't read TTL, %d\n", retval);
goto done;
}
ttl = *vp;
@@ -326,7 +323,7 @@ static int net1080_reset(struct usbnet *dev)
nc_register_write(dev, REG_TTL,
MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
- dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS);
+ netdev_dbg(dev->net, "assigned TTL, %d ms\n", NC_READ_TTL_MS);
netif_info(dev, link, dev->net, "port %c, peer %sconnected\n",
(status & STATUS_PORT_A) ? 'A' : 'B',
@@ -350,7 +347,7 @@ static int net1080_check_connect(struct usbnet *dev)
status = *vp;
kfree(vp);
if (retval != 0) {
- dbg("%s net1080_check_conn read - %d", dev->net->name, retval);
+ netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval);
return retval;
}
if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER)
@@ -420,11 +417,9 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 hdr_len, packet_len;
if (!(skb->len & 0x01)) {
-#ifdef DEBUG
- struct net_device *net = dev->net;
- dbg("rx framesize %d range %d..%d mtu %d", skb->len,
- net->hard_header_len, dev->hard_mtu, net->mtu);
-#endif
+ netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
+ skb->len, dev->net->hard_header_len, dev->hard_mtu,
+ dev->net->mtu);
dev->net->stats.rx_frame_errors++;
nc_ensure_sync(dev);
return 0;
@@ -435,17 +430,17 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
packet_len = le16_to_cpup(&header->packet_len);
if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
dev->net->stats.rx_frame_errors++;
- dbg("packet too big, %d", packet_len);
+ netdev_dbg(dev->net, "packet too big, %d\n", packet_len);
nc_ensure_sync(dev);
return 0;
} else if (hdr_len < MIN_HEADER) {
dev->net->stats.rx_frame_errors++;
- dbg("header too short, %d", hdr_len);
+ netdev_dbg(dev->net, "header too short, %d\n", hdr_len);
nc_ensure_sync(dev);
return 0;
} else if (hdr_len > MIN_HEADER) {
// out of band data for us?
- dbg("header OOB, %d bytes", hdr_len - MIN_HEADER);
+ netdev_dbg(dev->net, "header OOB, %d bytes\n", hdr_len - MIN_HEADER);
nc_ensure_sync(dev);
// switch (vendor/product ids) { ... }
}
@@ -458,23 +453,23 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if ((packet_len & 0x01) == 0) {
if (skb->data [packet_len] != PAD_BYTE) {
dev->net->stats.rx_frame_errors++;
- dbg("bad pad");
+ netdev_dbg(dev->net, "bad pad\n");
return 0;
}
skb_trim(skb, skb->len - 1);
}
if (skb->len != packet_len) {
dev->net->stats.rx_frame_errors++;
- dbg("bad packet len %d (expected %d)",
- skb->len, packet_len);
+ netdev_dbg(dev->net, "bad packet len %d (expected %d)\n",
+ skb->len, packet_len);
nc_ensure_sync(dev);
return 0;
}
if (header->packet_id != get_unaligned(&trailer->packet_id)) {
dev->net->stats.rx_fifo_errors++;
- dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
- le16_to_cpu(header->packet_id),
- le16_to_cpu(trailer->packet_id));
+ netdev_dbg(dev->net, "(2+ dropped) rx packet_id mismatch 0x%x 0x%x\n",
+ le16_to_cpu(header->packet_id),
+ le16_to_cpu(trailer->packet_id));
return 0;
}
#if 0
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3543c9e57824..6883c371c59f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -108,7 +108,7 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
atomic_set(&info->pmcount, 0);
/* register subdriver */
- subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
+ subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
if (IS_ERR(subdriver)) {
dev_err(&info->control->dev, "subdriver registration failed\n");
rv = PTR_ERR(subdriver);
@@ -139,10 +139,18 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
- /* require a single interrupt status endpoint for subdriver */
+ /* control and data is shared? */
+ if (intf->cur_altsetting->desc.bNumEndpoints == 3) {
+ info->control = intf;
+ info->data = intf;
+ goto shared;
+ }
+
+ /* else require a single interrupt status endpoint on control intf */
if (intf->cur_altsetting->desc.bNumEndpoints != 1)
goto err;
+ /* and a number of CDC descriptors */
while (len > 3) {
struct usb_descriptor_header *h = (void *)buf;
@@ -231,8 +239,9 @@ next_desc:
if (status < 0)
goto err;
+shared:
status = qmi_wwan_register_subdriver(dev);
- if (status < 0) {
+ if (status < 0 && info->control != info->data) {
usb_set_intfdata(info->data, NULL);
usb_driver_release_interface(driver, info->data);
}
@@ -241,20 +250,6 @@ err:
return status;
}
-/* Some devices combine the "control" and "data" functions into a
- * single interface with all three endpoints: interrupt + bulk in and
- * out
- */
-static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
-{
- struct qmi_wwan_state *info = (void *)&dev->data;
-
- /* control and data is shared */
- info->control = intf;
- info->data = intf;
- return qmi_wwan_register_subdriver(dev);
-}
-
static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct qmi_wwan_state *info = (void *)&dev->data;
@@ -331,20 +326,12 @@ static const struct driver_info qmi_wwan_info = {
.manage_power = qmi_wwan_manage_power,
};
-static const struct driver_info qmi_wwan_shared = {
- .description = "WWAN/QMI device",
- .flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
-};
-
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
#define QMI_FIXED_INTF(vend, prod, num) \
USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
- .driver_info = (unsigned long)&qmi_wwan_shared
+ .driver_info = (unsigned long)&qmi_wwan_info
/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
#define QMI_GOBI1K_DEVICE(vend, prod) \
@@ -372,15 +359,15 @@ static const struct usb_device_id products[] = {
},
{ /* Huawei E392, E398 and possibly others in "Windows mode" */
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
- .driver_info = (unsigned long)&qmi_wwan_shared,
+ .driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Pantech UML290, P4200 and more */
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
- .driver_info = (unsigned long)&qmi_wwan_shared,
+ .driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Pantech UML290 - newer firmware */
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
- .driver_info = (unsigned long)&qmi_wwan_shared,
+ .driver_info = (unsigned long)&qmi_wwan_info,
},
/* 3. Combined interface devices matching on interface number */
@@ -467,7 +454,7 @@ static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id
*/
if (!id->driver_info) {
dev_dbg(&intf->dev, "setting defaults for dynamic device id\n");
- id->driver_info = (unsigned long)&qmi_wwan_shared;
+ id->driver_info = (unsigned long)&qmi_wwan_info;
}
return usbnet_probe(intf, id);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 0e2c92e0e532..5f39a3b225ef 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -275,7 +275,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
return -EBUSY;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
+ netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr);
/* Set the IDR registers. */
set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
#ifdef EEPROM_WRITE
@@ -503,12 +503,12 @@ static void intr_callback(struct urb *urb)
if ((d[INT_MSR] & MSR_LINK) == 0) {
if (netif_carrier_ok(dev->netdev)) {
netif_carrier_off(dev->netdev);
- dbg("%s: LINK LOST\n", __func__);
+ netdev_dbg(dev->netdev, "%s: LINK LOST\n", __func__);
}
} else {
if (!netif_carrier_ok(dev->netdev)) {
netif_carrier_on(dev->netdev);
- dbg("%s: LINK CAME BACK\n", __func__);
+ netdev_dbg(dev->netdev, "%s: LINK CAME BACK\n", __func__);
}
}
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 8e22417fa6c1..c27d27701aee 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -68,9 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
*/
#define SIERRA_NET_USBCTL_BUF_LEN 1024
-struct sierra_net_info_data {
- u16 rx_urb_size;
-};
+/* Overriding the default usbnet rx_urb_size */
+#define SIERRA_NET_RX_URB_SIZE (8 * 1024)
/* Private data structure */
struct sierra_net_data {
@@ -560,7 +559,7 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work)
/*
* Sync Retransmit Timer Handler. On expiry, kick the work queue
*/
-void sierra_sync_timer(unsigned long syncdata)
+static void sierra_sync_timer(unsigned long syncdata)
{
struct usbnet *dev = (struct usbnet *)syncdata;
@@ -678,9 +677,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
- struct sierra_net_info_data *data =
- (struct sierra_net_info_data *)dev->driver_info->data;
-
dev_dbg(&dev->udev->dev, "%s", __func__);
ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -725,9 +721,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
sierra_net_set_ctx_index(priv, 0);
/* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
- dev->rx_urb_size = data->rx_urb_size;
+ dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE;
if (dev->udev->speed != USB_SPEED_HIGH)
- dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size);
+ dev->rx_urb_size = min_t(size_t, 4096, SIERRA_NET_RX_URB_SIZE);
dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
@@ -842,7 +838,7 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
dev->net->stats.rx_frame_errors++;
- /* dev->net->stats.rx_errors incremented by caller */;
+ /* dev->net->stats.rx_errors incremented by caller */
return 0;
}
@@ -866,8 +862,8 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
/* ---------------------------- Transmit data path ----------------------*/
-struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
- gfp_t flags)
+static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev,
+ struct sk_buff *skb, gfp_t flags)
{
struct sierra_net_data *priv = sierra_net_get_private(dev);
u16 len;
@@ -918,10 +914,6 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
return NULL;
}
-static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
- .rx_urb_size = 8 * 1024,
-};
-
static const struct driver_info sierra_net_info_direct_ip = {
.description = "Sierra Wireless USB-to-WWAN Modem",
.flags = FLAG_WWAN | FLAG_SEND_ZLP,
@@ -930,7 +922,6 @@ static const struct driver_info sierra_net_info_direct_ip = {
.status = sierra_net_status,
.rx_fixup = sierra_net_rx_fixup,
.tx_fixup = sierra_net_tx_fixup,
- .data = (unsigned long)&sierra_net_info_data_direct_ip,
};
#define DIRECT_IP_DEVICE(vend, prod) \
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 376143e8a1aa..b77ae76f4aa8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -52,6 +52,7 @@
#define USB_PRODUCT_ID_LAN7500 (0x7500)
#define USB_PRODUCT_ID_LAN7505 (0x7505)
#define RXW_PADDING 2
+#define SUPPORTED_WAKE (WAKE_MAGIC)
#define check_warn(ret, fmt, args...) \
({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -65,6 +66,7 @@
struct smsc75xx_priv {
struct usbnet *dev;
u32 rfe_ctl;
+ u32 wolopts;
u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
struct mutex dataport_mutex;
spinlock_t rfe_ctl_lock;
@@ -135,6 +137,30 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
return ret;
}
+static int smsc75xx_set_feature(struct usbnet *dev, u32 feature)
+{
+ if (WARN_ON_ONCE(!dev))
+ return -EINVAL;
+
+ cpu_to_le32s(&feature);
+
+ return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+}
+
+static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature)
+{
+ if (WARN_ON_ONCE(!dev))
+ return -EINVAL;
+
+ cpu_to_le32s(&feature);
+
+ return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+}
+
/* Loop until the read is completed with timeout
* called with phy_mutex held */
static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
@@ -578,6 +604,26 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
}
+static void smsc75xx_ethtool_get_wol(struct net_device *net,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+
+ wolinfo->supported = SUPPORTED_WAKE;
+ wolinfo->wolopts = pdata->wolopts;
+}
+
+static int smsc75xx_ethtool_set_wol(struct net_device *net,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+
+ pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+ return 0;
+}
+
static const struct ethtool_ops smsc75xx_ethtool_ops = {
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
@@ -589,6 +635,8 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
.get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
.get_eeprom = smsc75xx_ethtool_get_eeprom,
.set_eeprom = smsc75xx_ethtool_set_eeprom,
+ .get_wol = smsc75xx_ethtool_get_wol,
+ .set_wol = smsc75xx_ethtool_set_wol,
};
static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -756,6 +804,26 @@ static int smsc75xx_set_features(struct net_device *netdev,
return 0;
}
+static int smsc75xx_wait_ready(struct usbnet *dev)
+{
+ int timeout = 0;
+
+ do {
+ u32 buf;
+ int ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ check_warn_return(ret, "Failed to read PMT_CTL: %d", ret);
+
+ if (buf & PMT_CTL_DEV_RDY)
+ return 0;
+
+ msleep(10);
+ timeout++;
+ } while (timeout < 100);
+
+ netdev_warn(dev->net, "timeout waiting for device ready");
+ return -EIO;
+}
+
static int smsc75xx_reset(struct usbnet *dev)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -764,6 +832,9 @@ static int smsc75xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset");
+ ret = smsc75xx_wait_ready(dev);
+ check_warn_return(ret, "device not ready in smsc75xx_reset");
+
ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
check_warn_return(ret, "Failed to read HW_CFG: %d", ret);
@@ -1083,6 +1154,169 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
}
}
+static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ int ret;
+ u32 val;
+
+ ret = usbnet_suspend(intf, message);
+ check_warn_return(ret, "usbnet_suspend error");
+
+ /* if no wol options set, enter lowest power SUSPEND2 mode */
+ if (!(pdata->wolopts & SUPPORTED_WAKE)) {
+ netdev_info(dev->net, "entering SUSPEND2 mode");
+
+ /* disable energy detect (link up) & wake up events */
+ ret = smsc75xx_read_reg(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val &= ~(WUCSR_MPEN | WUCSR_WUEN);
+
+ ret = smsc75xx_write_reg(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ check_warn_return(ret, "Error reading PMT_CTL");
+
+ val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ check_warn_return(ret, "Error writing PMT_CTL");
+
+ /* enter suspend2 mode */
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ check_warn_return(ret, "Error reading PMT_CTL");
+
+ val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
+ val |= PMT_CTL_SUS_MODE_2;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ check_warn_return(ret, "Error writing PMT_CTL");
+
+ return 0;
+ }
+
+ if (pdata->wolopts & WAKE_MAGIC) {
+ /* clear any pending magic packet status */
+ ret = smsc75xx_read_reg(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val |= WUCSR_MPR;
+
+ ret = smsc75xx_write_reg(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+ }
+
+ /* enable/disable magic packup wake */
+ ret = smsc75xx_read_reg(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ if (pdata->wolopts & WAKE_MAGIC) {
+ netdev_info(dev->net, "enabling magic packet wakeup");
+ val |= WUCSR_MPEN;
+ } else {
+ netdev_info(dev->net, "disabling magic packet wakeup");
+ val &= ~WUCSR_MPEN;
+ }
+
+ ret = smsc75xx_write_reg(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+
+ /* enable wol wakeup source */
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ check_warn_return(ret, "Error reading PMT_CTL");
+
+ val |= PMT_CTL_WOL_EN;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ check_warn_return(ret, "Error writing PMT_CTL");
+
+ /* enable receiver */
+ ret = smsc75xx_read_reg(dev, MAC_RX, &val);
+ check_warn_return(ret, "Failed to read MAC_RX: %d", ret);
+
+ val |= MAC_RX_RXEN;
+
+ ret = smsc75xx_write_reg(dev, MAC_RX, val);
+ check_warn_return(ret, "Failed to write MAC_RX: %d", ret);
+
+ /* some wol options are enabled, so enter SUSPEND0 */
+ netdev_info(dev->net, "entering SUSPEND0 mode");
+
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ check_warn_return(ret, "Error reading PMT_CTL");
+
+ val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST));
+ val |= PMT_CTL_SUS_MODE_0;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ check_warn_return(ret, "Error writing PMT_CTL");
+
+ /* clear wol status */
+ val &= ~PMT_CTL_WUPS;
+ val |= PMT_CTL_WUPS_WOL;
+ ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ check_warn_return(ret, "Error writing PMT_CTL");
+
+ /* read back PMT_CTL */
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ check_warn_return(ret, "Error reading PMT_CTL");
+
+ smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+ return 0;
+}
+
+static int smsc75xx_resume(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ int ret;
+ u32 val;
+
+ if (pdata->wolopts & WAKE_MAGIC) {
+ netdev_info(dev->net, "resuming from SUSPEND0");
+
+ smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+ /* Disable magic packup wake */
+ ret = smsc75xx_read_reg(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val &= ~WUCSR_MPEN;
+
+ ret = smsc75xx_write_reg(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+
+ /* clear wake-up status */
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ check_warn_return(ret, "Error reading PMT_CTL");
+
+ val &= ~PMT_CTL_WOL_EN;
+ val |= PMT_CTL_WUPS;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ check_warn_return(ret, "Error writing PMT_CTL");
+ } else {
+ netdev_info(dev->net, "resuming from SUSPEND2");
+
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
+ check_warn_return(ret, "Error reading PMT_CTL");
+
+ val |= PMT_CTL_PHY_PWRUP;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, val);
+ check_warn_return(ret, "Error writing PMT_CTL");
+ }
+
+ ret = smsc75xx_wait_ready(dev);
+ check_warn_return(ret, "device not ready in smsc75xx_resume");
+
+ return usbnet_resume(intf);
+}
+
static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
u32 rx_cmd_a, u32 rx_cmd_b)
{
@@ -1251,9 +1485,9 @@ static struct usb_driver smsc75xx_driver = {
.name = SMSC_CHIPNAME,
.id_table = products,
.probe = usbnet_probe,
- .suspend = usbnet_suspend,
- .resume = usbnet_resume,
- .reset_resume = usbnet_resume,
+ .suspend = smsc75xx_suspend,
+ .resume = smsc75xx_resume,
+ .reset_resume = smsc75xx_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index d45e539a84b7..7479a5761d0d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -46,11 +46,22 @@
#define SMSC95XX_INTERNAL_PHY_ID (1)
#define SMSC95XX_TX_OVERHEAD (8)
#define SMSC95XX_TX_OVERHEAD_CSUM (12)
+#define SUPPORTED_WAKE (WAKE_MAGIC)
+
+#define check_warn(ret, fmt, args...) \
+ ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
+
+#define check_warn_return(ret, fmt, args...) \
+ ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
+
+#define check_warn_goto_done(ret, fmt, args...) \
+ ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
struct smsc95xx_priv {
u32 mac_cr;
u32 hash_hi;
u32 hash_lo;
+ u32 wolopts;
spinlock_t mac_cr_lock;
};
@@ -63,7 +74,8 @@ static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
-static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
+static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data)
{
u32 *buf = kmalloc(4, GFP_KERNEL);
int ret;
@@ -88,7 +100,8 @@ static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
return ret;
}
-static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
+static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
+ u32 data)
{
u32 *buf = kmalloc(4, GFP_KERNEL);
int ret;
@@ -114,15 +127,41 @@ static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
return ret;
}
+static int smsc95xx_set_feature(struct usbnet *dev, u32 feature)
+{
+ if (WARN_ON_ONCE(!dev))
+ return -EINVAL;
+
+ cpu_to_le32s(&feature);
+
+ return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+}
+
+static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature)
+{
+ if (WARN_ON_ONCE(!dev))
+ return -EINVAL;
+
+ cpu_to_le32s(&feature);
+
+ return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+}
+
/* Loop until the read is completed with timeout
* called with phy_mutex held */
-static int smsc95xx_phy_wait_not_busy(struct usbnet *dev)
+static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
{
unsigned long start_time = jiffies;
u32 val;
+ int ret;
do {
- smsc95xx_read_reg(dev, MII_ADDR, &val);
+ ret = smsc95xx_read_reg(dev, MII_ADDR, &val);
+ check_warn_return(ret, "Error reading MII_ACCESS");
if (!(val & MII_BUSY_))
return 0;
} while (!time_after(jiffies, start_time + HZ));
@@ -134,33 +173,32 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
{
struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
+ int ret;
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- if (smsc95xx_phy_wait_not_busy(dev)) {
- netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
- mutex_unlock(&dev->phy_mutex);
- return -EIO;
- }
+ ret = smsc95xx_phy_wait_not_busy(dev);
+ check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read");
/* set the address, index & direction (read from PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
addr = (phy_id << 11) | (idx << 6) | MII_READ_;
- smsc95xx_write_reg(dev, MII_ADDR, addr);
+ ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
+ check_warn_goto_done(ret, "Error writing MII_ADDR");
- if (smsc95xx_phy_wait_not_busy(dev)) {
- netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
- mutex_unlock(&dev->phy_mutex);
- return -EIO;
- }
+ ret = smsc95xx_phy_wait_not_busy(dev);
+ check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx);
- smsc95xx_read_reg(dev, MII_DATA, &val);
+ ret = smsc95xx_read_reg(dev, MII_DATA, &val);
+ check_warn_goto_done(ret, "Error reading MII_DATA");
- mutex_unlock(&dev->phy_mutex);
+ ret = (u16)(val & 0xFFFF);
- return (u16)(val & 0xFFFF);
+done:
+ mutex_unlock(&dev->phy_mutex);
+ return ret;
}
static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
@@ -168,38 +206,41 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
{
struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
+ int ret;
mutex_lock(&dev->phy_mutex);
/* confirm MII not busy */
- if (smsc95xx_phy_wait_not_busy(dev)) {
- netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
- mutex_unlock(&dev->phy_mutex);
- return;
- }
+ ret = smsc95xx_phy_wait_not_busy(dev);
+ check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write");
val = regval;
- smsc95xx_write_reg(dev, MII_DATA, val);
+ ret = smsc95xx_write_reg(dev, MII_DATA, val);
+ check_warn_goto_done(ret, "Error writing MII_DATA");
/* set the address, index & direction (write to PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
- smsc95xx_write_reg(dev, MII_ADDR, addr);
+ ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
+ check_warn_goto_done(ret, "Error writing MII_ADDR");
- if (smsc95xx_phy_wait_not_busy(dev))
- netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
+ ret = smsc95xx_phy_wait_not_busy(dev);
+ check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx);
+done:
mutex_unlock(&dev->phy_mutex);
}
-static int smsc95xx_wait_eeprom(struct usbnet *dev)
+static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
{
unsigned long start_time = jiffies;
u32 val;
+ int ret;
do {
- smsc95xx_read_reg(dev, E2P_CMD, &val);
+ ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
+ check_warn_return(ret, "Error reading E2P_CMD");
if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_))
break;
udelay(40);
@@ -213,13 +254,15 @@ static int smsc95xx_wait_eeprom(struct usbnet *dev)
return 0;
}
-static int smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
+static int __must_check smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
{
unsigned long start_time = jiffies;
u32 val;
+ int ret;
do {
- smsc95xx_read_reg(dev, E2P_CMD, &val);
+ ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
+ check_warn_return(ret, "Error reading E2P_CMD");
if (!(val & E2P_CMD_BUSY_))
return 0;
@@ -246,13 +289,15 @@ static int smsc95xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
for (i = 0; i < length; i++) {
val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_);
- smsc95xx_write_reg(dev, E2P_CMD, val);
+ ret = smsc95xx_write_reg(dev, E2P_CMD, val);
+ check_warn_return(ret, "Error writing E2P_CMD");
ret = smsc95xx_wait_eeprom(dev);
if (ret < 0)
return ret;
- smsc95xx_read_reg(dev, E2P_DATA, &val);
+ ret = smsc95xx_read_reg(dev, E2P_DATA, &val);
+ check_warn_return(ret, "Error reading E2P_DATA");
data[i] = val & 0xFF;
offset++;
@@ -276,7 +321,8 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
/* Issue write/erase enable command */
val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_;
- smsc95xx_write_reg(dev, E2P_CMD, val);
+ ret = smsc95xx_write_reg(dev, E2P_CMD, val);
+ check_warn_return(ret, "Error writing E2P_DATA");
ret = smsc95xx_wait_eeprom(dev);
if (ret < 0)
@@ -286,11 +332,13 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
/* Fill data register */
val = data[i];
- smsc95xx_write_reg(dev, E2P_DATA, val);
+ ret = smsc95xx_write_reg(dev, E2P_DATA, val);
+ check_warn_return(ret, "Error writing E2P_DATA");
/* Send "write" command */
val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_);
- smsc95xx_write_reg(dev, E2P_CMD, val);
+ ret = smsc95xx_write_reg(dev, E2P_CMD, val);
+ check_warn_return(ret, "Error writing E2P_CMD");
ret = smsc95xx_wait_eeprom(dev);
if (ret < 0)
@@ -308,14 +356,14 @@ static void smsc95xx_async_cmd_callback(struct urb *urb)
struct usbnet *dev = usb_context->dev;
int status = urb->status;
- if (status < 0)
- netdev_warn(dev->net, "async callback failed with %d\n", status);
+ check_warn(status, "async callback failed with %d\n", status);
kfree(usb_context);
usb_free_urb(urb);
}
-static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
+static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index,
+ u32 *data)
{
struct usb_context *usb_context;
int status;
@@ -371,6 +419,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
struct usbnet *dev = netdev_priv(netdev);
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
unsigned long flags;
+ int ret;
pdata->hash_hi = 0;
pdata->hash_lo = 0;
@@ -411,21 +460,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
/* Initiate async writes, as we can't wait for completion here */
- smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
- smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
- smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
+ ret = smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
+ check_warn(ret, "failed to initiate async write to HASHH");
+
+ ret = smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
+ check_warn(ret, "failed to initiate async write to HASHL");
+
+ ret = smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
+ check_warn(ret, "failed to initiate async write to MAC_CR");
}
-static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
- u16 lcladv, u16 rmtadv)
+static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
+ u16 lcladv, u16 rmtadv)
{
u32 flow, afc_cfg = 0;
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
- if (ret < 0) {
- netdev_warn(dev->net, "error reading AFC_CFG\n");
- return;
- }
+ check_warn_return(ret, "Error reading AFC_CFG");
if (duplex == DUPLEX_FULL) {
u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -449,8 +500,13 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
afc_cfg |= 0xF;
}
- smsc95xx_write_reg(dev, FLOW, flow);
- smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
+ ret = smsc95xx_write_reg(dev, FLOW, flow);
+ check_warn_return(ret, "Error writing FLOW");
+
+ ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
+ check_warn_return(ret, "Error writing AFC_CFG");
+
+ return 0;
}
static int smsc95xx_link_reset(struct usbnet *dev)
@@ -460,12 +516,14 @@ static int smsc95xx_link_reset(struct usbnet *dev)
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
unsigned long flags;
u16 lcladv, rmtadv;
- u32 intdata;
+ int ret;
/* clear interrupt status */
- smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
- intdata = 0xFFFFFFFF;
- smsc95xx_write_reg(dev, INT_STS, intdata);
+ ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
+ check_warn_return(ret, "Error reading PHY_INT_SRC");
+
+ ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+ check_warn_return(ret, "Error writing INT_STS");
mii_check_media(mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
@@ -486,9 +544,11 @@ static int smsc95xx_link_reset(struct usbnet *dev)
}
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+ ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+ check_warn_return(ret, "Error writing MAC_CR");
- smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+ ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+ check_warn_return(ret, "Error updating PHY flow control");
return 0;
}
@@ -524,10 +584,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
int ret;
ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read COE_CR: %d\n", ret);
if (features & NETIF_F_HW_CSUM)
read_buf |= Tx_COE_EN_;
@@ -540,10 +597,7 @@ static int smsc95xx_set_features(struct net_device *netdev,
read_buf &= ~Rx_COE_EN_;
ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to write COE_CR: %d\n", ret);
netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
return 0;
@@ -608,6 +662,26 @@ smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
}
}
+static void smsc95xx_ethtool_get_wol(struct net_device *net,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+ wolinfo->supported = SUPPORTED_WAKE;
+ wolinfo->wolopts = pdata->wolopts;
+}
+
+static int smsc95xx_ethtool_set_wol(struct net_device *net,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+ pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
+ return 0;
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
@@ -621,6 +695,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.set_eeprom = smsc95xx_ethtool_set_eeprom,
.get_regs_len = smsc95xx_ethtool_getregslen,
.get_regs = smsc95xx_ethtool_getregs,
+ .get_wol = smsc95xx_ethtool_get_wol,
+ .set_wol = smsc95xx_ethtool_set_wol,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -658,55 +734,56 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
int ret;
ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to write ADDRL: %d\n", ret);
ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to write ADDRH: %d\n", ret);
return 0;
}
/* starts the TX path */
-static void smsc95xx_start_tx_path(struct usbnet *dev)
+static int smsc95xx_start_tx_path(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
unsigned long flags;
- u32 reg_val;
+ int ret;
/* Enable Tx at MAC */
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
pdata->mac_cr |= MAC_CR_TXEN_;
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+ ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+ check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
/* Enable Tx at SCSRs */
- reg_val = TX_CFG_ON_;
- smsc95xx_write_reg(dev, TX_CFG, reg_val);
+ ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
+ check_warn_return(ret, "Failed to write TX_CFG: %d\n", ret);
+
+ return 0;
}
/* Starts the Receive path */
-static void smsc95xx_start_rx_path(struct usbnet *dev)
+static int smsc95xx_start_rx_path(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
unsigned long flags;
+ int ret;
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
pdata->mac_cr |= MAC_CR_RXEN_;
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+ ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
+ check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret);
+
+ return 0;
}
static int smsc95xx_phy_initialize(struct usbnet *dev)
{
- int bmcr, timeout = 0;
+ int bmcr, ret, timeout = 0;
/* Initialize MII structure */
dev->mii.dev = dev->net;
@@ -735,7 +812,8 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
ADVERTISE_PAUSE_ASYM);
/* read to clear */
- smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
+ ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
+ check_warn_return(ret, "Failed to read PHY_INT_SRC during init");
smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
PHY_INT_MASK_DEFAULT_);
@@ -753,22 +831,14 @@ static int smsc95xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
- write_buf = HW_CFG_LRST_;
- ret = smsc95xx_write_reg(dev, HW_CFG, write_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG register, ret = %d\n",
- ret);
- return ret;
- }
+ ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_);
+ check_warn_return(ret, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
timeout = 0;
do {
- ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
- return ret;
- }
msleep(10);
+ ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
+ check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
timeout++;
} while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
@@ -777,21 +847,14 @@ static int smsc95xx_reset(struct usbnet *dev)
return ret;
}
- write_buf = PM_CTL_PHY_RST_;
- ret = smsc95xx_write_reg(dev, PM_CTRL, write_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
- return ret;
- }
+ ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
+ check_warn_return(ret, "Failed to write PM_CTRL: %d\n", ret);
timeout = 0;
do {
- ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
- return ret;
- }
msleep(10);
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
+ check_warn_return(ret, "Failed to read PM_CTRL: %d\n", ret);
timeout++;
} while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
@@ -808,10 +871,7 @@ static int smsc95xx_reset(struct usbnet *dev)
"MAC Address: %pM\n", dev->net->dev_addr);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG : 0x%08x\n", read_buf);
@@ -819,17 +879,10 @@ static int smsc95xx_reset(struct usbnet *dev)
read_buf |= HW_CFG_BIR_;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG register, ret = %d\n",
- ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
read_buf);
@@ -849,41 +902,28 @@ static int smsc95xx_reset(struct usbnet *dev)
"rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret);
ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret);
+
netif_dbg(dev, ifup, dev->net,
"Read Value from BURST_CAP after writing: 0x%08x\n",
read_buf);
- read_buf = DEFAULT_BULK_IN_DELAY;
- ret = smsc95xx_write_reg(dev, BULK_IN_DLY, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "ret = %d\n", ret);
- return ret;
- }
+ ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+ check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret);
ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret);
+
netif_dbg(dev, ifup, dev->net,
"Read Value from BULK_IN_DLY after writing: 0x%08x\n",
read_buf);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG: 0x%08x\n", read_buf);
@@ -896,101 +936,66 @@ static int smsc95xx_reset(struct usbnet *dev)
read_buf |= NET_IP_ALIGN << 9;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write HW_CFG register, ret=%d\n",
- ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret);
+
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
- write_buf = 0xFFFFFFFF;
- ret = smsc95xx_write_reg(dev, INT_STS, write_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write INT_STS register, ret=%d\n",
- ret);
- return ret;
- }
+ ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+ check_warn_return(ret, "Failed to write INT_STS: %d\n", ret);
ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read ID_REV: %d\n", ret);
netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
/* Configure GPIO pins as LED outputs */
write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
LED_GPIO_CFG_FDX_LED;
ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write LED_GPIO_CFG register, ret=%d\n",
- ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n", ret);
/* Init Tx */
- write_buf = 0;
- ret = smsc95xx_write_reg(dev, FLOW, write_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
- return ret;
- }
+ ret = smsc95xx_write_reg(dev, FLOW, 0);
+ check_warn_return(ret, "Failed to write FLOW: %d\n", ret);
- read_buf = AFC_CFG_DEFAULT;
- ret = smsc95xx_write_reg(dev, AFC_CFG, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
- return ret;
- }
+ ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT);
+ check_warn_return(ret, "Failed to write AFC_CFG: %d\n", ret);
/* Don't need mac_cr_lock during initialisation */
ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret);
/* Init Rx */
/* Set Vlan */
- write_buf = (u32)ETH_P_8021Q;
- ret = smsc95xx_write_reg(dev, VLAN1, write_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write VAN1: %d\n", ret);
- return ret;
- }
+ ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q);
+ check_warn_return(ret, "Failed to write VLAN1: %d\n", ret);
/* Enable or disable checksum offload engines */
- smsc95xx_set_features(dev->net, dev->net->features);
+ ret = smsc95xx_set_features(dev->net, dev->net->features);
+ check_warn_return(ret, "Failed to set checksum offload features");
smsc95xx_set_multicast(dev->net);
- if (smsc95xx_phy_initialize(dev) < 0)
- return -EIO;
+ ret = smsc95xx_phy_initialize(dev);
+ check_warn_return(ret, "Failed to init PHY");
ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret);
/* enable PHY interrupts */
read_buf |= INT_EP_CTL_PHY_INT_;
ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret);
- smsc95xx_start_tx_path(dev);
- smsc95xx_start_rx_path(dev);
+ ret = smsc95xx_start_tx_path(dev);
+ check_warn_return(ret, "Failed to start TX path");
+
+ ret = smsc95xx_start_rx_path(dev);
+ check_warn_return(ret, "Failed to start RX path");
netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
return 0;
@@ -1017,10 +1022,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
ret = usbnet_get_endpoints(dev, intf);
- if (ret < 0) {
- netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
- return ret;
- }
+ check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret);
dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
GFP_KERNEL);
@@ -1064,6 +1066,153 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
}
}
+static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ int ret;
+ u32 val;
+
+ ret = usbnet_suspend(intf, message);
+ check_warn_return(ret, "usbnet_suspend error");
+
+ /* if no wol options set, enter lowest power SUSPEND2 mode */
+ if (!(pdata->wolopts & SUPPORTED_WAKE)) {
+ netdev_info(dev->net, "entering SUSPEND2 mode");
+
+ /* disable energy detect (link up) & wake up events */
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
+
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ check_warn_return(ret, "Error reading PM_CTRL");
+
+ val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
+
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ check_warn_return(ret, "Error writing PM_CTRL");
+
+ /* enter suspend2 mode */
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ check_warn_return(ret, "Error reading PM_CTRL");
+
+ val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
+ val |= PM_CTL_SUS_MODE_2;
+
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ check_warn_return(ret, "Error writing PM_CTRL");
+
+ return 0;
+ }
+
+ if (pdata->wolopts & WAKE_MAGIC) {
+ /* clear any pending magic packet status */
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val |= WUCSR_MPR_;
+
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+ }
+
+ /* enable/disable magic packup wake */
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ if (pdata->wolopts & WAKE_MAGIC) {
+ netdev_info(dev->net, "enabling magic packet wakeup");
+ val |= WUCSR_MPEN_;
+ } else {
+ netdev_info(dev->net, "disabling magic packet wakeup");
+ val &= ~WUCSR_MPEN_;
+ }
+
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+
+ /* enable wol wakeup source */
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ check_warn_return(ret, "Error reading PM_CTRL");
+
+ val |= PM_CTL_WOL_EN_;
+
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ check_warn_return(ret, "Error writing PM_CTRL");
+
+ /* enable receiver */
+ smsc95xx_start_rx_path(dev);
+
+ /* some wol options are enabled, so enter SUSPEND0 */
+ netdev_info(dev->net, "entering SUSPEND0 mode");
+
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ check_warn_return(ret, "Error reading PM_CTRL");
+
+ val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
+ val |= PM_CTL_SUS_MODE_0;
+
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ check_warn_return(ret, "Error writing PM_CTRL");
+
+ /* clear wol status */
+ val &= ~PM_CTL_WUPS_;
+ val |= PM_CTL_WUPS_WOL_;
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ check_warn_return(ret, "Error writing PM_CTRL");
+
+ /* read back PM_CTRL */
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ check_warn_return(ret, "Error reading PM_CTRL");
+
+ smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+ return 0;
+}
+
+static int smsc95xx_resume(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ int ret;
+ u32 val;
+
+ BUG_ON(!dev);
+
+ if (pdata->wolopts & WAKE_MAGIC) {
+ smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP);
+
+ /* Disable magic packup wake */
+ ret = smsc95xx_read_reg(dev, WUCSR, &val);
+ check_warn_return(ret, "Error reading WUCSR");
+
+ val &= ~WUCSR_MPEN_;
+
+ ret = smsc95xx_write_reg(dev, WUCSR, val);
+ check_warn_return(ret, "Error writing WUCSR");
+
+ /* clear wake-up status */
+ ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+ check_warn_return(ret, "Error reading PM_CTRL");
+
+ val &= ~PM_CTL_WOL_EN_;
+ val |= PM_CTL_WUPS_;
+
+ ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+ check_warn_return(ret, "Error writing PM_CTRL");
+ }
+
+ return usbnet_resume(intf);
+ check_warn_return(ret, "usbnet_resume error");
+
+ return 0;
+}
+
static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
{
skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -1326,8 +1475,9 @@ static struct usb_driver smsc95xx_driver = {
.name = "smsc95xx",
.id_table = products,
.probe = usbnet_probe,
- .suspend = usbnet_suspend,
- .resume = usbnet_resume,
+ .suspend = smsc95xx_suspend,
+ .resume = smsc95xx_resume,
+ .reset_resume = smsc95xx_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index 86bc44977fbd..2ff9815aa27c 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -63,6 +63,7 @@
#define INT_STS_TDFO_ (0x00001000)
#define INT_STS_RXDF_ (0x00000800)
#define INT_STS_GPIOS_ (0x000007FF)
+#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF)
#define RX_CFG (0x0C)
#define RX_FIFO_FLUSH_ (0x00000001)
@@ -83,12 +84,16 @@
#define HW_CFG_BCE_ (0x00000002)
#define HW_CFG_SRST_ (0x00000001)
+#define RX_FIFO_INF (0x18)
+
#define PM_CTRL (0x20)
+#define PM_CTL_RES_CLR_WKP_STS (0x00000200)
#define PM_CTL_DEV_RDY_ (0x00000080)
#define PM_CTL_SUS_MODE_ (0x00000060)
#define PM_CTL_SUS_MODE_0 (0x00000000)
#define PM_CTL_SUS_MODE_1 (0x00000020)
-#define PM_CTL_SUS_MODE_2 (0x00000060)
+#define PM_CTL_SUS_MODE_2 (0x00000040)
+#define PM_CTL_SUS_MODE_3 (0x00000060)
#define PM_CTL_PHY_RST_ (0x00000010)
#define PM_CTL_WOL_EN_ (0x00000008)
#define PM_CTL_ED_EN_ (0x00000004)
@@ -200,6 +205,11 @@
#define WUFF (0x128)
#define WUCSR (0x12C)
+#define WUCSR_GUE_ (0x00000200)
+#define WUCSR_WUFR_ (0x00000040)
+#define WUCSR_MPR_ (0x00000020)
+#define WUCSR_WAKE_EN_ (0x00000004)
+#define WUCSR_MPEN_ (0x00000002)
#define COE_CR (0x130)
#define Tx_COE_EN_ (0x00010000)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5852361032c4..e522ff70444c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -348,6 +348,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (tbp[IFLA_ADDRESS] == NULL)
eth_hw_addr_random(peer);
+ if (ifmp && (dev->ifindex != 0))
+ peer->ifindex = ifmp->ifi_index;
+
err = register_netdevice(peer);
put_net(net);
net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 83d2b0c34c5e..cbf8b0625352 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -521,7 +521,7 @@ static void refill_work(struct work_struct *work)
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
- queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
+ schedule_delayed_work(&vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -540,7 +540,7 @@ again:
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
- queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+ schedule_delayed_work(&vi->refill, 0);
}
/* Out of packets? */
@@ -745,7 +745,7 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, GFP_KERNEL))
- queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+ schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(vi);
return 0;
@@ -993,7 +993,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
goto done;
if (v & VIRTIO_NET_S_ANNOUNCE) {
- netif_notify_peers(vi->dev);
+ netdev_notify_peers(vi->dev);
virtnet_ack_link_announce(vi);
}
@@ -1020,7 +1020,7 @@ static void virtnet_config_changed(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- queue_work(system_nrt_wq, &vi->config_work);
+ schedule_work(&vi->config_work);
}
static int init_vqs(struct virtnet_info *vi)
@@ -1152,7 +1152,7 @@ static int virtnet_probe(struct virtio_device *vdev)
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
netif_carrier_off(dev);
- queue_work(system_nrt_wq, &vi->config_work);
+ schedule_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
netif_carrier_on(dev);
@@ -1264,7 +1264,7 @@ static int virtnet_restore(struct virtio_device *vdev)
netif_device_attach(vi->dev);
if (!try_fill_recv(vi, GFP_KERNEL))
- queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+ schedule_delayed_work(&vi->refill, 0);
mutex_lock(&vi->config_lock);
vi->config_enable = true;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
new file mode 100644
index 000000000000..51de9edb55f5
--- /dev/null
+++ b/drivers/net/vxlan.c
@@ -0,0 +1,1219 @@
+/*
+ * VXLAN: Virtual eXtensiable Local Area Network
+ *
+ * Copyright (c) 2012 Vyatta Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO
+ * - use IANA UDP port number (when defined)
+ * - IPv6 (not in RFC)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/rculist.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/igmp.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/version.h>
+#include <linux/hash.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define VXLAN_VERSION "0.1"
+
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS 8
+#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
+#define FDB_AGE_DEFAULT 300 /* 5 min */
+#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
+
+#define VXLAN_N_VID (1u << 24)
+#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
+/* VLAN + IP header + UDP + VXLAN */
+#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
+
+#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
+
+/* VXLAN protocol header */
+struct vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+};
+
+/* UDP port for VXLAN traffic. */
+static unsigned int vxlan_port __read_mostly = 8472;
+module_param_named(udp_port, vxlan_port, uint, 0444);
+MODULE_PARM_DESC(udp_port, "Destination UDP port");
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+/* per-net private data for this module */
+static unsigned int vxlan_net_id;
+struct vxlan_net {
+ struct socket *sock; /* UDP encap socket */
+ struct hlist_head vni_list[VNI_HASH_SIZE];
+};
+
+/* Forwarding table entry */
+struct vxlan_fdb {
+ struct hlist_node hlist; /* linked list of entries */
+ struct rcu_head rcu;
+ unsigned long updated; /* jiffies */
+ unsigned long used;
+ __be32 remote_ip;
+ u16 state; /* see ndm_state */
+ u8 eth_addr[ETH_ALEN];
+};
+
+/* Per-cpu network traffic stats */
+struct vxlan_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+ struct hlist_node hlist;
+ struct net_device *dev;
+ struct vxlan_stats __percpu *stats;
+ __u32 vni; /* virtual network id */
+ __be32 gaddr; /* multicast group */
+ __be32 saddr; /* source address */
+ unsigned int link; /* link to multicast over */
+ __u8 tos; /* TOS override */
+ __u8 ttl;
+ bool learn;
+
+ unsigned long age_interval;
+ struct timer_list age_timer;
+ spinlock_t hash_lock;
+ unsigned int addrcnt;
+ unsigned int addrmax;
+ unsigned int addrexceeded;
+
+ struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
+/* salt for hash table */
+static u32 vxlan_salt __read_mostly;
+
+static inline struct hlist_head *vni_head(struct net *net, u32 id)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
+}
+
+/* Look up VNI in a per net namespace table */
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
+{
+ struct vxlan_dev *vxlan;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
+ if (vxlan->vni == id)
+ return vxlan;
+ }
+
+ return NULL;
+}
+
+/* Fill in neighbour message in skbuff. */
+static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
+ const struct vxlan_fdb *fdb,
+ u32 portid, u32 seq, int type, unsigned int flags)
+{
+ unsigned long now = jiffies;
+ struct nda_cacheinfo ci;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ memset(ndm, 0, sizeof(*ndm));
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_state = fdb->state;
+ ndm->ndm_ifindex = vxlan->dev->ifindex;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = NDA_DST;
+
+ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NDA_DST, fdb->remote_ip))
+ goto nla_put_failure;
+
+ ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
+ ci.ndm_confirmed = 0;
+ ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
+ ci.ndm_refcnt = 0;
+
+ if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static inline size_t vxlan_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg))
+ + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ + nla_total_size(sizeof(__be32)) /* NDA_DST */
+ + nla_total_size(sizeof(struct nda_cacheinfo));
+}
+
+static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
+ const struct vxlan_fdb *fdb, int type)
+{
+ struct net *net = dev_net(vxlan->dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
+ if (skb == NULL)
+ goto errout;
+
+ err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+
+ rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+ return;
+errout:
+ if (err < 0)
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+/* Hash Ethernet address */
+static u32 eth_hash(const unsigned char *addr)
+{
+ u64 value = get_unaligned((u64 *)addr);
+
+ /* only want 6 bytes */
+#ifdef __BIG_ENDIAN
+ value <<= 16;
+#else
+ value >>= 16;
+#endif
+ return hash_64(value, FDB_HASH_BITS);
+}
+
+/* Hash chain to use given mac address */
+static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
+ const u8 *mac)
+{
+ return &vxlan->fdb_head[eth_hash(mac)];
+}
+
+/* Look up Ethernet address in forwarding table */
+static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
+ const u8 *mac)
+
+{
+ struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
+ struct vxlan_fdb *f;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_rcu(f, node, head, hlist) {
+ if (compare_ether_addr(mac, f->eth_addr) == 0)
+ return f;
+ }
+
+ return NULL;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ const u8 *mac, __be32 ip,
+ __u16 state, __u16 flags)
+{
+ struct vxlan_fdb *f;
+ int notify = 0;
+
+ f = vxlan_find_mac(vxlan, mac);
+ if (f) {
+ if (flags & NLM_F_EXCL) {
+ netdev_dbg(vxlan->dev,
+ "lost race to create %pM\n", mac);
+ return -EEXIST;
+ }
+ if (f->state != state) {
+ f->state = state;
+ f->updated = jiffies;
+ notify = 1;
+ }
+ } else {
+ if (!(flags & NLM_F_CREATE))
+ return -ENOENT;
+
+ if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
+ return -ENOSPC;
+
+ netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
+ f = kmalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return -ENOMEM;
+
+ notify = 1;
+ f->remote_ip = ip;
+ f->state = state;
+ f->updated = f->used = jiffies;
+ memcpy(f->eth_addr, mac, ETH_ALEN);
+
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->hlist,
+ vxlan_fdb_head(vxlan, mac));
+ }
+
+ if (notify)
+ vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
+
+ return 0;
+}
+
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+{
+ netdev_dbg(vxlan->dev,
+ "delete %pM\n", f->eth_addr);
+
+ --vxlan->addrcnt;
+ vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
+
+ hlist_del_rcu(&f->hlist);
+ kfree_rcu(f, rcu);
+}
+
+/* Add static entry (via netlink) */
+static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 flags)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ __be32 ip;
+ int err;
+
+ if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
+ pr_info("RTM_NEWNEIGH with invalid state %#x\n",
+ ndm->ndm_state);
+ return -EINVAL;
+ }
+
+ if (tb[NDA_DST] == NULL)
+ return -EINVAL;
+
+ if (nla_len(tb[NDA_DST]) != sizeof(__be32))
+ return -EAFNOSUPPORT;
+
+ ip = nla_get_be32(tb[NDA_DST]);
+
+ spin_lock_bh(&vxlan->hash_lock);
+ err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
+ spin_unlock_bh(&vxlan->hash_lock);
+
+ return err;
+}
+
+/* Delete entry (via netlink) */
+static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+ const unsigned char *addr)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_fdb *f;
+ int err = -ENOENT;
+
+ spin_lock_bh(&vxlan->hash_lock);
+ f = vxlan_find_mac(vxlan, addr);
+ if (f) {
+ vxlan_fdb_destroy(vxlan, f);
+ err = 0;
+ }
+ spin_unlock_bh(&vxlan->hash_lock);
+
+ return err;
+}
+
+/* Dump forwarding table */
+static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev, int idx)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ unsigned int h;
+
+ for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ struct vxlan_fdb *f;
+ struct hlist_node *n;
+ int err;
+
+ hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
+ if (idx < cb->args[0])
+ goto skip;
+
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI);
+ if (err < 0)
+ break;
+skip:
+ ++idx;
+ }
+ }
+
+ return idx;
+}
+
+/* Watch incoming packets to learn mapping between Ethernet address
+ * and Tunnel endpoint.
+ */
+static void vxlan_snoop(struct net_device *dev,
+ __be32 src_ip, const u8 *src_mac)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_fdb *f;
+ int err;
+
+ f = vxlan_find_mac(vxlan, src_mac);
+ if (likely(f)) {
+ f->used = jiffies;
+ if (likely(f->remote_ip == src_ip))
+ return;
+
+ if (net_ratelimit())
+ netdev_info(dev,
+ "%pM migrated from %pI4 to %pI4\n",
+ src_mac, &f->remote_ip, &src_ip);
+
+ f->remote_ip = src_ip;
+ f->updated = jiffies;
+ } else {
+ /* learned new entry */
+ spin_lock(&vxlan->hash_lock);
+ err = vxlan_fdb_create(vxlan, src_mac, src_ip,
+ NUD_REACHABLE,
+ NLM_F_EXCL|NLM_F_CREATE);
+ spin_unlock(&vxlan->hash_lock);
+ }
+}
+
+
+/* See if multicast group is already in use by other ID */
+static bool vxlan_group_used(struct vxlan_net *vn,
+ const struct vxlan_dev *this)
+{
+ const struct vxlan_dev *vxlan;
+ struct hlist_node *node;
+ unsigned h;
+
+ for (h = 0; h < VNI_HASH_SIZE; ++h)
+ hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
+ if (vxlan == this)
+ continue;
+
+ if (!netif_running(vxlan->dev))
+ continue;
+
+ if (vxlan->gaddr == this->gaddr)
+ return true;
+ }
+
+ return false;
+}
+
+/* kernel equivalent to IP_ADD_MEMBERSHIP */
+static int vxlan_join_group(struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+ struct sock *sk = vn->sock->sk;
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = vxlan->gaddr,
+ };
+ int err;
+
+ /* Already a member of group */
+ if (vxlan_group_used(vn, vxlan))
+ return 0;
+
+ /* Need to drop RTNL to call multicast join */
+ rtnl_unlock();
+ lock_sock(sk);
+ err = ip_mc_join_group(sk, &mreq);
+ release_sock(sk);
+ rtnl_lock();
+
+ return err;
+}
+
+
+/* kernel equivalent to IP_DROP_MEMBERSHIP */
+static int vxlan_leave_group(struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+ int err = 0;
+ struct sock *sk = vn->sock->sk;
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = vxlan->gaddr,
+ };
+
+ /* Only leave group when last vxlan is done. */
+ if (vxlan_group_used(vn, vxlan))
+ return 0;
+
+ /* Need to drop RTNL to call multicast leave */
+ rtnl_unlock();
+ lock_sock(sk);
+ err = ip_mc_leave_group(sk, &mreq);
+ release_sock(sk);
+ rtnl_lock();
+
+ return err;
+}
+
+/* Callback from net/ipv4/udp.c to receive packets */
+static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct iphdr *oip;
+ struct vxlanhdr *vxh;
+ struct vxlan_dev *vxlan;
+ struct vxlan_stats *stats;
+ __u32 vni;
+ int err;
+
+ /* pop off outer UDP header */
+ __skb_pull(skb, sizeof(struct udphdr));
+
+ /* Need Vxlan and inner Ethernet header to be present */
+ if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
+ goto error;
+
+ /* Drop packets with reserved bits set */
+ vxh = (struct vxlanhdr *) skb->data;
+ if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
+ (vxh->vx_vni & htonl(0xff))) {
+ netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+ ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+ goto error;
+ }
+
+ __skb_pull(skb, sizeof(struct vxlanhdr));
+ skb_postpull_rcsum(skb, eth_hdr(skb), sizeof(struct vxlanhdr));
+
+ /* Is this VNI defined? */
+ vni = ntohl(vxh->vx_vni) >> 8;
+ vxlan = vxlan_find_vni(sock_net(sk), vni);
+ if (!vxlan) {
+ netdev_dbg(skb->dev, "unknown vni %d\n", vni);
+ goto drop;
+ }
+
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ vxlan->dev->stats.rx_length_errors++;
+ vxlan->dev->stats.rx_errors++;
+ goto drop;
+ }
+
+ /* Re-examine inner Ethernet packet */
+ oip = ip_hdr(skb);
+ skb->protocol = eth_type_trans(skb, vxlan->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+ /* Ignore packet loops (and multicast echo) */
+ if (compare_ether_addr(eth_hdr(skb)->h_source,
+ vxlan->dev->dev_addr) == 0)
+ goto drop;
+
+ if (vxlan->learn)
+ vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
+
+ __skb_tunnel_rx(skb, vxlan->dev);
+ skb_reset_network_header(skb);
+
+ err = IP_ECN_decapsulate(oip, skb);
+ if (unlikely(err)) {
+ if (log_ecn_error)
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ &oip->saddr, oip->tos);
+ if (err > 1) {
+ ++vxlan->dev->stats.rx_frame_errors;
+ ++vxlan->dev->stats.rx_errors;
+ goto drop;
+ }
+ }
+
+ stats = this_cpu_ptr(vxlan->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ netif_rx(skb);
+
+ return 0;
+error:
+ /* Put UDP header back */
+ __skb_push(skb, sizeof(struct udphdr));
+
+ return 1;
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+ return 0;
+}
+
+/* Extract dsfield from inner protocol */
+static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP))
+ return iph->tos;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return ipv6_get_dsfield((const struct ipv6hdr *)iph);
+ else
+ return 0;
+}
+
+/* Propogate ECN bits out */
+static inline u8 vxlan_ecn_encap(u8 tos,
+ const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ u8 inner = vxlan_get_dsfield(iph, skb);
+
+ return INET_ECN_encapsulate(tos, inner);
+}
+
+/* Transmit local packets over Vxlan
+ *
+ * Outer IP header inherits ECN and DF from inner header.
+ * Outer UDP destination is the VXLAN assigned port.
+ * source port is based on hash of flow if available
+ * otherwise use a random value
+ */
+static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct rtable *rt;
+ const struct ethhdr *eth;
+ const struct iphdr *old_iph;
+ struct iphdr *iph;
+ struct vxlanhdr *vxh;
+ struct udphdr *uh;
+ struct flowi4 fl4;
+ struct vxlan_fdb *f;
+ unsigned int pkt_len = skb->len;
+ u32 hash;
+ __be32 dst;
+ __be16 df = 0;
+ __u8 tos, ttl;
+ int err;
+
+ /* Need space for new headers (invalidates iph ptr) */
+ if (skb_cow_head(skb, VXLAN_HEADROOM))
+ goto drop;
+
+ eth = (void *)skb->data;
+ old_iph = ip_hdr(skb);
+
+ if (!is_multicast_ether_addr(eth->h_dest) &&
+ (f = vxlan_find_mac(vxlan, eth->h_dest)))
+ dst = f->remote_ip;
+ else if (vxlan->gaddr) {
+ dst = vxlan->gaddr;
+ } else
+ goto drop;
+
+ ttl = vxlan->ttl;
+ if (!ttl && IN_MULTICAST(ntohl(dst)))
+ ttl = 1;
+
+ tos = vxlan->tos;
+ if (tos == 1)
+ tos = vxlan_get_dsfield(old_iph, skb);
+
+ hash = skb_get_rxhash(skb);
+
+ rt = ip_route_output_gre(dev_net(dev), &fl4, dst,
+ vxlan->saddr, vxlan->vni,
+ RT_TOS(tos), vxlan->link);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to %pI4\n", &dst);
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
+
+ if (rt->dst.dev == dev) {
+ netdev_dbg(dev, "circular route to %pI4\n", &dst);
+ ip_rt_put(rt);
+ dev->stats.collisions++;
+ goto tx_error;
+ }
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_FLAGS);
+ vxh->vx_vni = htonl(vxlan->vni << 8);
+
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+
+ uh->dest = htons(vxlan_port);
+ uh->source = hash ? :random32();
+
+ uh->len = htons(skb->len);
+ uh->check = 0;
+
+ __skb_push(skb, sizeof(*iph));
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = sizeof(struct iphdr) >> 2;
+ iph->frag_off = df;
+ iph->protocol = IPPROTO_UDP;
+ iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
+ iph->daddr = fl4.daddr;
+ iph->saddr = fl4.saddr;
+ iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+ /* See __IPTUNNEL_XMIT */
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(iph, &rt->dst, NULL);
+
+ err = ip_local_out(skb);
+ if (likely(net_xmit_eval(err) == 0)) {
+ struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += pkt_len;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ }
+ return NETDEV_TX_OK;
+
+drop:
+ dev->stats.tx_dropped++;
+ goto tx_free;
+
+tx_error:
+ dev->stats.tx_errors++;
+tx_free:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/* Walk the forwarding table and purge stale entries */
+static void vxlan_cleanup(unsigned long arg)
+{
+ struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
+ unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
+ unsigned int h;
+
+ if (!netif_running(vxlan->dev))
+ return;
+
+ spin_lock_bh(&vxlan->hash_lock);
+ for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ struct hlist_node *p, *n;
+ hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
+ struct vxlan_fdb *f
+ = container_of(p, struct vxlan_fdb, hlist);
+ unsigned long timeout;
+
+ if (f->state == NUD_PERMANENT)
+ continue;
+
+ timeout = f->used + vxlan->age_interval * HZ;
+ if (time_before_eq(timeout, jiffies)) {
+ netdev_dbg(vxlan->dev,
+ "garbage collect %pM\n",
+ f->eth_addr);
+ f->state = NUD_STALE;
+ vxlan_fdb_destroy(vxlan, f);
+ } else if (time_before(timeout, next_timer))
+ next_timer = timeout;
+ }
+ }
+ spin_unlock_bh(&vxlan->hash_lock);
+
+ mod_timer(&vxlan->age_timer, next_timer);
+}
+
+/* Setup stats when device is created */
+static int vxlan_init(struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ vxlan->stats = alloc_percpu(struct vxlan_stats);
+ if (!vxlan->stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Start ageing timer and join group when device is brought up */
+static int vxlan_open(struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ int err;
+
+ if (vxlan->gaddr) {
+ err = vxlan_join_group(dev);
+ if (err)
+ return err;
+ }
+
+ if (vxlan->age_interval)
+ mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
+
+ return 0;
+}
+
+/* Purge the forwarding table */
+static void vxlan_flush(struct vxlan_dev *vxlan)
+{
+ unsigned h;
+
+ spin_lock_bh(&vxlan->hash_lock);
+ for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ struct hlist_node *p, *n;
+ hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
+ struct vxlan_fdb *f
+ = container_of(p, struct vxlan_fdb, hlist);
+ vxlan_fdb_destroy(vxlan, f);
+ }
+ }
+ spin_unlock_bh(&vxlan->hash_lock);
+}
+
+/* Cleanup timer and forwarding table on shutdown */
+static int vxlan_stop(struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ if (vxlan->gaddr)
+ vxlan_leave_group(dev);
+
+ del_timer_sync(&vxlan->age_timer);
+
+ vxlan_flush(vxlan);
+
+ return 0;
+}
+
+/* Merge per-cpu statistics */
+static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_stats tmp, sum = { 0 };
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ unsigned int start;
+ const struct vxlan_stats *stats
+ = per_cpu_ptr(vxlan->stats, cpu);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->syncp);
+ memcpy(&tmp, stats, sizeof(tmp));
+ } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+
+ sum.tx_bytes += tmp.tx_bytes;
+ sum.tx_packets += tmp.tx_packets;
+ sum.rx_bytes += tmp.rx_bytes;
+ sum.rx_packets += tmp.rx_packets;
+ }
+
+ stats->tx_bytes = sum.tx_bytes;
+ stats->tx_packets = sum.tx_packets;
+ stats->rx_bytes = sum.rx_bytes;
+ stats->rx_packets = sum.rx_packets;
+
+ stats->multicast = dev->stats.multicast;
+ stats->rx_length_errors = dev->stats.rx_length_errors;
+ stats->rx_frame_errors = dev->stats.rx_frame_errors;
+ stats->rx_errors = dev->stats.rx_errors;
+
+ stats->tx_dropped = dev->stats.tx_dropped;
+ stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
+ stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
+ stats->collisions = dev->stats.collisions;
+ stats->tx_errors = dev->stats.tx_errors;
+
+ return stats;
+}
+
+/* Stub, nothing needs to be done. */
+static void vxlan_set_multicast_list(struct net_device *dev)
+{
+}
+
+static const struct net_device_ops vxlan_netdev_ops = {
+ .ndo_init = vxlan_init,
+ .ndo_open = vxlan_open,
+ .ndo_stop = vxlan_stop,
+ .ndo_start_xmit = vxlan_xmit,
+ .ndo_get_stats64 = vxlan_stats64,
+ .ndo_set_rx_mode = vxlan_set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_fdb_add = vxlan_fdb_add,
+ .ndo_fdb_del = vxlan_fdb_delete,
+ .ndo_fdb_dump = vxlan_fdb_dump,
+};
+
+/* Info for udev, that this is a virtual tunnel endpoint */
+static struct device_type vxlan_type = {
+ .name = "vxlan",
+};
+
+static void vxlan_free(struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ free_percpu(vxlan->stats);
+ free_netdev(dev);
+}
+
+/* Initialize the device structure. */
+static void vxlan_setup(struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ unsigned h;
+
+ eth_hw_addr_random(dev);
+ ether_setup(dev);
+
+ dev->netdev_ops = &vxlan_netdev_ops;
+ dev->destructor = vxlan_free;
+ SET_NETDEV_DEVTYPE(dev, &vxlan_type);
+
+ dev->tx_queue_len = 0;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+
+ spin_lock_init(&vxlan->hash_lock);
+
+ init_timer_deferrable(&vxlan->age_timer);
+ vxlan->age_timer.function = vxlan_cleanup;
+ vxlan->age_timer.data = (unsigned long) vxlan;
+
+ vxlan->dev = dev;
+
+ for (h = 0; h < FDB_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
+}
+
+static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
+ [IFLA_VXLAN_ID] = { .type = NLA_U32 },
+ [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
+ [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+ [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
+ [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
+ [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
+ [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
+ [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
+};
+
+static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
+ pr_debug("invalid link address (not ethernet)\n");
+ return -EINVAL;
+ }
+
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
+ pr_debug("invalid all zero ethernet address\n");
+ return -EADDRNOTAVAIL;
+ }
+ }
+
+ if (!data)
+ return -EINVAL;
+
+ if (data[IFLA_VXLAN_ID]) {
+ __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+ if (id >= VXLAN_VID_MASK)
+ return -ERANGE;
+ }
+
+ if (data[IFLA_VXLAN_GROUP]) {
+ __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+ if (!IN_MULTICAST(ntohl(gaddr))) {
+ pr_debug("group address is not IPv4 multicast\n");
+ return -EADDRNOTAVAIL;
+ }
+ }
+ return 0;
+}
+
+static int vxlan_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ __u32 vni;
+ int err;
+
+ if (!data[IFLA_VXLAN_ID])
+ return -EINVAL;
+
+ vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+ if (vxlan_find_vni(net, vni)) {
+ pr_info("duplicate VNI %u\n", vni);
+ return -EEXIST;
+ }
+ vxlan->vni = vni;
+
+ if (data[IFLA_VXLAN_GROUP])
+ vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+
+ if (data[IFLA_VXLAN_LOCAL])
+ vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
+
+ if (data[IFLA_VXLAN_LINK]) {
+ vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]);
+
+ if (!tb[IFLA_MTU]) {
+ struct net_device *lowerdev;
+ lowerdev = __dev_get_by_index(net, vxlan->link);
+ dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
+ }
+ }
+
+ if (data[IFLA_VXLAN_TOS])
+ vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
+
+ if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
+ vxlan->learn = true;
+
+ if (data[IFLA_VXLAN_AGEING])
+ vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
+ else
+ vxlan->age_interval = FDB_AGE_DEFAULT;
+
+ if (data[IFLA_VXLAN_LIMIT])
+ vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+
+ err = register_netdevice(dev);
+ if (!err)
+ hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
+
+ return err;
+}
+
+static void vxlan_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ hlist_del_rcu(&vxlan->hlist);
+
+ unregister_netdevice_queue(dev, head);
+}
+
+static size_t vxlan_get_size(const struct net_device *dev)
+{
+
+ return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
+ nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
+ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
+ nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
+ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
+ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
+ 0;
+}
+
+static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ const struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
+ goto nla_put_failure;
+
+ if (vxlan->gaddr && nla_put_u32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
+ goto nla_put_failure;
+
+ if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
+ goto nla_put_failure;
+
+ if (vxlan->saddr && nla_put_u32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
+ nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
+ nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) ||
+ nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
+ nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
+ .kind = "vxlan",
+ .maxtype = IFLA_VXLAN_MAX,
+ .policy = vxlan_policy,
+ .priv_size = sizeof(struct vxlan_dev),
+ .setup = vxlan_setup,
+ .validate = vxlan_validate,
+ .newlink = vxlan_newlink,
+ .dellink = vxlan_dellink,
+ .get_size = vxlan_get_size,
+ .fill_info = vxlan_fill_info,
+};
+
+static __net_init int vxlan_init_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct sock *sk;
+ struct sockaddr_in vxlan_addr = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ };
+ int rc;
+ unsigned h;
+
+ /* Create UDP socket for encapsulation receive. */
+ rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
+ if (rc < 0) {
+ pr_debug("UDP socket create failed\n");
+ return rc;
+ }
+ /* Put in proper namespace */
+ sk = vn->sock->sk;
+ sk_change_net(sk, net);
+
+ vxlan_addr.sin_port = htons(vxlan_port);
+
+ rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
+ sizeof(vxlan_addr));
+ if (rc < 0) {
+ pr_debug("bind for UDP socket %pI4:%u (%d)\n",
+ &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
+ sk_release_kernel(sk);
+ vn->sock = NULL;
+ return rc;
+ }
+
+ /* Disable multicast loopback */
+ inet_sk(sk)->mc_loop = 0;
+
+ /* Mark socket as an encapsulation socket. */
+ udp_sk(sk)->encap_type = 1;
+ udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
+ udp_encap_enable();
+
+ for (h = 0; h < VNI_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&vn->vni_list[h]);
+
+ return 0;
+}
+
+static __net_exit void vxlan_exit_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ if (vn->sock) {
+ sk_release_kernel(vn->sock->sk);
+ vn->sock = NULL;
+ }
+}
+
+static struct pernet_operations vxlan_net_ops = {
+ .init = vxlan_init_net,
+ .exit = vxlan_exit_net,
+ .id = &vxlan_net_id,
+ .size = sizeof(struct vxlan_net),
+};
+
+static int __init vxlan_init_module(void)
+{
+ int rc;
+
+ get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
+
+ rc = register_pernet_device(&vxlan_net_ops);
+ if (rc)
+ goto out1;
+
+ rc = rtnl_link_register(&vxlan_link_ops);
+ if (rc)
+ goto out2;
+
+ return 0;
+
+out2:
+ unregister_pernet_device(&vxlan_net_ops);
+out1:
+ return rc;
+}
+module_init(vxlan_init_module);
+
+static void __exit vxlan_cleanup_module(void)
+{
+ rtnl_link_unregister(&vxlan_link_ops);
+ unregister_pernet_device(&vxlan_net_ops);
+}
+module_exit(vxlan_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VXLAN_VERSION);
+MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
+MODULE_ALIAS_RTNL_LINK("vxlan");
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 025426132754..9c34d2fccfac 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -222,7 +222,6 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
struct sk_buff *skb;
const struct i2400m_tlv_detailed_device_info *ddi;
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
- const unsigned char zeromac[ETH_ALEN] = { 0 };
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
skb = i2400m_get_device_info(i2400m);
@@ -244,7 +243,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
"to that of boot mode's\n");
dev_warn(dev, "device reports %pM\n", ddi->mac_address);
dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
- if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
+ if (is_zero_ether_addr(ddi->mac_address))
dev_err(dev, "device reports an invalid MAC address, "
"not updating\n");
else {
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 689a71c1af71..154a4965be4f 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1661,7 +1661,9 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
}
/* Put adm8211_tx_hdr on skb and transmit */
-static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct adm8211_tx_hdr *txhdr;
size_t payload_len, hdrlen;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index f9f15bb3f03a..3cd05a7173f6 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -87,7 +87,6 @@ static struct pci_driver airo_driver = {
/* Include Wireless Extension definition and check version - Jean II */
#include <linux/wireless.h>
#define WIRELESS_SPY /* enable iwspy support */
-#include <net/iw_handler.h> /* New driver API */
#define CISCO_EXT /* enable Cisco extensions */
#ifdef CISCO_EXT
@@ -232,8 +231,10 @@ static int adhoc;
static int probe = 1;
+static kuid_t proc_kuid;
static int proc_uid /* = 0 */;
+static kgid_t proc_kgid;
static int proc_gid /* = 0 */;
static int airo_perm = 0555;
@@ -4499,78 +4500,79 @@ struct proc_data {
static int setup_proc_entry( struct net_device *dev,
struct airo_info *apriv ) {
struct proc_dir_entry *entry;
+
/* First setup the device directory */
strcpy(apriv->proc_name,dev->name);
apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm,
airo_entry);
if (!apriv->proc_entry)
goto fail;
- apriv->proc_entry->uid = proc_uid;
- apriv->proc_entry->gid = proc_gid;
+ apriv->proc_entry->uid = proc_kuid;
+ apriv->proc_entry->gid = proc_kgid;
/* Setup the StatsDelta */
entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm,
apriv->proc_entry, &proc_statsdelta_ops, dev);
if (!entry)
goto fail_stats_delta;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ entry->uid = proc_kuid;
+ entry->gid = proc_kgid;
/* Setup the Stats */
entry = proc_create_data("Stats", S_IRUGO & proc_perm,
apriv->proc_entry, &proc_stats_ops, dev);
if (!entry)
goto fail_stats;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ entry->uid = proc_kuid;
+ entry->gid = proc_kgid;
/* Setup the Status */
entry = proc_create_data("Status", S_IRUGO & proc_perm,
apriv->proc_entry, &proc_status_ops, dev);
if (!entry)
goto fail_status;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ entry->uid = proc_kuid;
+ entry->gid = proc_kgid;
/* Setup the Config */
entry = proc_create_data("Config", proc_perm,
apriv->proc_entry, &proc_config_ops, dev);
if (!entry)
goto fail_config;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ entry->uid = proc_kuid;
+ entry->gid = proc_kgid;
/* Setup the SSID */
entry = proc_create_data("SSID", proc_perm,
apriv->proc_entry, &proc_SSID_ops, dev);
if (!entry)
goto fail_ssid;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ entry->uid = proc_kuid;
+ entry->gid = proc_kgid;
/* Setup the APList */
entry = proc_create_data("APList", proc_perm,
apriv->proc_entry, &proc_APList_ops, dev);
if (!entry)
goto fail_aplist;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ entry->uid = proc_kuid;
+ entry->gid = proc_kgid;
/* Setup the BSSList */
entry = proc_create_data("BSSList", proc_perm,
apriv->proc_entry, &proc_BSSList_ops, dev);
if (!entry)
goto fail_bsslist;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ entry->uid = proc_kuid;
+ entry->gid = proc_kgid;
/* Setup the WepKey */
entry = proc_create_data("WepKey", proc_perm,
apriv->proc_entry, &proc_wepkey_ops, dev);
if (!entry)
goto fail_wepkey;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ entry->uid = proc_kuid;
+ entry->gid = proc_kgid;
return 0;
@@ -5697,11 +5699,16 @@ static int __init airo_init_module( void )
{
int i;
+ proc_kuid = make_kuid(&init_user_ns, proc_uid);
+ proc_kgid = make_kgid(&init_user_ns, proc_gid);
+ if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid))
+ return -EINVAL;
+
airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL);
if (airo_entry) {
- airo_entry->uid = proc_uid;
- airo_entry->gid = proc_gid;
+ airo_entry->uid = proc_kuid;
+ airo_entry->gid = proc_kgid;
}
for (i = 0; i < 4 && io[i] && irq[i]; i++) {
@@ -5976,13 +5983,11 @@ static int airo_set_wap(struct net_device *dev,
Cmd cmd;
Resp rsp;
APListRid APList_rid;
- static const u8 any[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
- static const u8 off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
if (awrq->sa_family != ARPHRD_ETHER)
return -EINVAL;
- else if (!memcmp(any, awrq->sa_data, ETH_ALEN) ||
- !memcmp(off, awrq->sa_data, ETH_ALEN)) {
+ else if (is_broadcast_ether_addr(awrq->sa_data) ||
+ is_zero_ether_addr(awrq->sa_data)) {
memset(&cmd, 0, sizeof(cmd));
cmd.cmd=CMD_LOSE_SYNC;
if (down_interruptible(&local->sem))
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 88b8d64c90f1..99b9ddf21273 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -498,36 +498,6 @@ exit:
return ret;
}
-#define HEX2STR_BUFFERS 4
-#define HEX2STR_MAX_LEN 64
-
-/* Convert binary data into hex string */
-static char *hex2str(void *buf, size_t len)
-{
- static atomic_t a = ATOMIC_INIT(0);
- static char bufs[HEX2STR_BUFFERS][3 * HEX2STR_MAX_LEN + 1];
- char *ret = bufs[atomic_inc_return(&a) & (HEX2STR_BUFFERS - 1)];
- char *obuf = ret;
- u8 *ibuf = buf;
-
- if (len > HEX2STR_MAX_LEN)
- len = HEX2STR_MAX_LEN;
-
- if (len == 0)
- goto exit;
-
- while (len--) {
- obuf = hex_byte_pack(obuf, *ibuf++);
- *obuf++ = '-';
- }
- obuf--;
-
-exit:
- *obuf = '\0';
-
- return ret;
-}
-
/* LED trigger */
static int tx_activity;
static void at76_ledtrig_tx_timerfunc(unsigned long data);
@@ -1004,9 +974,9 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;
for (i = 0; i < WEP_KEYS; i++)
- at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %s",
+ at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %*phD",
wiphy_name(priv->hw->wiphy), i,
- hex2str(m->wep_default_keyvalue[i], key_len));
+ key_len, m->wep_default_keyvalue[i]);
exit:
kfree(m);
}
@@ -1031,7 +1001,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration "
"%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d "
"CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d "
- "current_bssid %pM current_essid %s current_bss_type %d "
+ "current_bssid %pM current_essid %*phD current_bss_type %d "
"pm_mode %d ibss_change %d res %d "
"multi_domain_capability_implemented %d "
"international_roaming %d country_string %.3s",
@@ -1041,7 +1011,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
m->CFP_mode, m->privacy_option_implemented, m->DTIM_period,
m->CFP_period, m->current_bssid,
- hex2str(m->current_essid, IW_ESSID_MAX_SIZE),
+ IW_ESSID_MAX_SIZE, m->current_essid,
m->current_bss_type, m->power_mgmt_mode, m->ibss_change,
m->res, m->multi_domain_capability_implemented,
m->multi_domain_capability_enabled, m->country_string);
@@ -1069,7 +1039,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
"cwmin %d cwmax %d short_retry_time %d long_retry_time %d "
"scan_type %d scan_channel %d probe_delay %u "
"min_channel_time %d max_channel_time %d listen_int %d "
- "desired_ssid %s desired_bssid %pM desired_bsstype %d",
+ "desired_ssid %*phD desired_bssid %pM desired_bsstype %d",
wiphy_name(priv->hw->wiphy),
le32_to_cpu(m->max_tx_msdu_lifetime),
le32_to_cpu(m->max_rx_lifetime),
@@ -1080,7 +1050,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
le16_to_cpu(m->min_channel_time),
le16_to_cpu(m->max_channel_time),
le16_to_cpu(m->listen_interval),
- hex2str(m->desired_ssid, IW_ESSID_MAX_SIZE),
+ IW_ESSID_MAX_SIZE, m->desired_ssid,
m->desired_bssid, m->desired_bsstype);
exit:
kfree(m);
@@ -1160,13 +1130,13 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
goto exit;
}
- at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %s",
+ at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %*phD",
wiphy_name(priv->hw->wiphy),
- hex2str(m->channel_list, sizeof(m->channel_list)));
+ (int)sizeof(m->channel_list), m->channel_list);
- at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %s",
+ at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %*phD",
wiphy_name(priv->hw->wiphy),
- hex2str(m->tx_powerlevel, sizeof(m->tx_powerlevel)));
+ (int)sizeof(m->tx_powerlevel), m->tx_powerlevel);
exit:
kfree(m);
}
@@ -1369,9 +1339,9 @@ static int at76_startup_device(struct at76_priv *priv)
int ret;
at76_dbg(DBG_PARAMS,
- "%s param: ssid %.*s (%s) mode %s ch %d wep %s key %d "
+ "%s param: ssid %.*s (%*phD) mode %s ch %d wep %s key %d "
"keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size,
- priv->essid, hex2str(priv->essid, IW_ESSID_MAX_SIZE),
+ priv->essid, IW_ESSID_MAX_SIZE, priv->essid,
priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra",
priv->channel, priv->wep_enabled ? "enabled" : "disabled",
priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]);
@@ -1726,7 +1696,9 @@ static void at76_mac80211_tx_callback(struct urb *urb)
ieee80211_wake_queues(priv->hw);
}
-static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void at76_mac80211_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct at76_priv *priv = hw->priv;
struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 6169fbd23ed1..4521342c62cc 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -159,6 +159,7 @@ struct ath_common {
bool btcoex_enabled;
bool disable_ani;
+ bool antenna_diversity;
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 64a453a6dfe4..3150def17193 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1331,7 +1331,6 @@ struct ath5k_hw {
unsigned int nexttbtt; /* next beacon time in TU */
struct ath5k_txq *cabq; /* content after beacon */
- int power_level; /* Requested tx power in dBm */
bool assoc; /* associate state */
bool enable_beacon; /* true if beacons are on */
@@ -1425,6 +1424,7 @@ struct ath5k_hw {
/* Value in dB units */
s16 txp_cck_ofdm_pwr_delta;
bool txp_setup;
+ int txp_requested; /* Requested tx power in dBm */
} ah_txpower;
struct ath5k_nfcal_hist ah_nfcal_hist;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 2aab20ee9f38..9fd6d9a9942e 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -723,7 +723,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
ieee80211_get_hdrlen_from_skb(skb), padsize,
get_hw_packet_type(skb),
- (ah->power_level * 2),
+ (ah->ah_txpower.txp_requested * 2),
hw_rate,
info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
cts_rate, duration);
@@ -1778,7 +1778,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
ieee80211_get_hdrlen_from_skb(skb), padsize,
- AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
+ AR5K_PKT_TYPE_BEACON,
+ (ah->ah_txpower.txp_requested * 2),
ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1, AR5K_TXKEYIX_INVALID,
antenna, flags, 0, 0);
@@ -2445,6 +2446,7 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
hw->wiphy->interface_modes =
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index d56453e43d7e..7a28538e6e05 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -55,7 +55,8 @@
\********************/
static void
-ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct ath5k_hw *ah = hw->priv;
u16 qnum = skb_get_queue_mapping(skb);
@@ -207,8 +208,8 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
}
if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
- (ah->power_level != conf->power_level)) {
- ah->power_level = conf->power_level;
+ (ah->ah_txpower.txp_requested != conf->power_level)) {
+ ah->ah_txpower.txp_requested = conf->power_level;
/* Half dB steps */
ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
@@ -488,6 +489,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (ath5k_modparam_nohwcrypt)
return -EOPNOTSUPP;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT)
+ return -EOPNOTSUPP;
+
if (vif->type == NL80211_IFTYPE_ADHOC &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
@@ -522,7 +526,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
}
break;
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 8b71a2d947e0..ab363f34b4df 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1975,11 +1975,13 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
spur_delta_phase = (spur_offset << 18) / 25;
spur_freq_sigma_delta = (spur_delta_phase >> 10);
symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2;
+ break;
case AR5K_BWMODE_5MHZ:
/* Both sample_freq and chip_freq are 10MHz (?) */
spur_delta_phase = (spur_offset << 19) / 25;
spur_freq_sigma_delta = (spur_delta_phase >> 10);
symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
+ break;
default:
if (channel->band == IEEE80211_BAND_5GHZ) {
/* Both sample_freq and chip_freq are 40MHz */
@@ -3516,6 +3518,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
{
unsigned int i;
u16 *rates;
+ s16 rate_idx_scaled = 0;
/* max_pwr is power level we got from driver/user in 0.5dB
* units, switch to 0.25dB units so we can compare */
@@ -3562,20 +3565,32 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
for (i = 8; i <= 15; i++)
rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
+ /* Save min/max and current tx power for this channel
+ * in 0.25dB units.
+ *
+ * Note: We use rates[0] for current tx power because
+ * it covers most of the rates, in most cases. It's our
+ * tx power limit and what the user expects to see. */
+ ah->ah_txpower.txp_min_pwr = 2 * rates[7];
+ ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
+
+ /* Set max txpower for correct OFDM operation on all rates
+ * -that is the txpower for 54Mbit-, it's used for the PAPD
+ * gain probe and it's in 0.5dB units */
+ ah->ah_txpower.txp_ofdm = rates[7];
+
/* Now that we have all rates setup use table offset to
* match the power range set by user with the power indices
* on PCDAC/PDADC table */
for (i = 0; i < 16; i++) {
- rates[i] += ah->ah_txpower.txp_offset;
+ rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset;
/* Don't get out of bounds */
- if (rates[i] > 63)
- rates[i] = 63;
+ if (rate_idx_scaled > 63)
+ rate_idx_scaled = 63;
+ if (rate_idx_scaled < 0)
+ rate_idx_scaled = 0;
+ rates[i] = rate_idx_scaled;
}
-
- /* Min/max in 0.25dB units */
- ah->ah_txpower.txp_min_pwr = 2 * rates[7];
- ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
- ah->ah_txpower.txp_ofdm = rates[7];
}
@@ -3639,10 +3654,17 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (!ah->ah_txpower.txp_setup ||
(channel->hw_value != curr_channel->hw_value) ||
(channel->center_freq != curr_channel->center_freq)) {
- /* Reset TX power values */
+ /* Reset TX power values but preserve requested
+ * tx power from above */
+ int requested_txpower = ah->ah_txpower.txp_requested;
+
memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
+
+ /* Restore TPC setting and requested tx power */
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
+ ah->ah_txpower.txp_requested = requested_txpower;
+
/* Calculate the powertable */
ret = ath5k_setup_channel_powertable(ah, channel,
ee_mode, type);
@@ -3789,8 +3811,9 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
* RF buffer settings on 5211/5212+ so that we
* properly set curve indices.
*/
- ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ?
- ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER);
+ ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ?
+ ah->ah_txpower.txp_requested * 2 :
+ AR5K_TUNE_MAX_TXPOWER);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 86aeef4b9d7e..7089f8160ad5 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1488,7 +1488,7 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
}
static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
- char *name,
+ const char *name,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params)
@@ -3477,7 +3477,7 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
ar->num_vif--;
}
-struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type)
{
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 56b1ebe79812..780f77775a91 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -25,7 +25,7 @@ enum ath6kl_cfg_suspend_mode {
ATH6KL_CFG_SUSPEND_SCHED_SCAN,
};
-struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type);
void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ff007f500feb..e09ec40ce71a 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -237,7 +237,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
entry_cck->fir_step_level);
/* Skip MRC CCK for pre AR9003 families */
- if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah))
+ if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
return;
if (aniState->mrcCCK != entry_cck->mrc_cck_on)
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index bbcfeb3b2a60..664844c5d3d5 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -311,6 +311,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
struct ath_ant_comb *antcomb,
int alt_ratio)
{
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+
if (ant_conf->div_group == 0) {
/* Adjust the fast_div_bias based on main and alt lna conf */
switch ((ant_conf->main_lna_conf << 4) |
@@ -360,18 +363,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->alt_lna_conf) {
case 0x01: /* A-B LNA2 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x02: /* A-B LNA1 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x03: /* A-B A+B */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x10: /* LNA2 A-B */
if (!(antcomb->scan) &&
@@ -379,13 +376,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x3f;
else
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x12: /* LNA2 LNA1 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x13: /* LNA2 A+B */
if (!(antcomb->scan) &&
@@ -393,8 +386,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x3f;
else
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x20: /* LNA1 A-B */
if (!(antcomb->scan) &&
@@ -402,13 +393,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x3f;
else
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x21: /* LNA1 LNA2 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x23: /* LNA1 A+B */
if (!(antcomb->scan) &&
@@ -416,23 +403,15 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x3f;
else
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x31: /* A+B LNA2 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x32: /* A+B LNA1 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
default:
break;
@@ -443,18 +422,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->alt_lna_conf) {
case 0x01: /* A-B LNA2 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x02: /* A-B LNA1 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x03: /* A-B A+B */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x10: /* LNA2 A-B */
if (!(antcomb->scan) &&
@@ -462,13 +435,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x12: /* LNA2 LNA1 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x13: /* LNA2 A+B */
if (!(antcomb->scan) &&
@@ -476,8 +445,6 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x20: /* LNA1 A-B */
if (!(antcomb->scan) &&
@@ -485,13 +452,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x21: /* LNA1 LNA2 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x23: /* LNA1 A+B */
if (!(antcomb->scan) &&
@@ -499,23 +462,77 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x31: /* A+B LNA2 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
break;
case 0x32: /* A+B LNA1 */
ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
+ break;
+ default:
+ break;
+ }
+ } else if (ant_conf->div_group == 3) {
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x39;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x10: /* LNA2 A-B */
+ if ((antcomb->scan == 0) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+ ant_conf->fast_div_bias = 0x3f;
+ } else {
+ ant_conf->fast_div_bias = 0x1;
+ }
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x39;
+ break;
+ case 0x13: /* LNA2 A+B */
+ if ((antcomb->scan == 0) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+ ant_conf->fast_div_bias = 0x3f;
+ } else {
+ ant_conf->fast_div_bias = 0x1;
+ }
+ break;
+ case 0x20: /* LNA1 A-B */
+ if ((antcomb->scan == 0) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+ ant_conf->fast_div_bias = 0x3f;
+ } else {
+ ant_conf->fast_div_bias = 0x4;
+ }
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x6;
+ break;
+ case 0x23: /* LNA1 A+B */
+ if ((antcomb->scan == 0) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
+ ant_conf->fast_div_bias = 0x3f;
+ } else {
+ ant_conf->fast_div_bias = 0x6;
+ }
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x6;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
break;
default:
break;
@@ -759,6 +776,7 @@ div_comb_done:
void ath_ant_comb_update(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_hw_antcomb_conf div_ant_conf;
u8 lna_conf;
@@ -773,4 +791,7 @@ void ath_ant_comb_update(struct ath_softc *sc)
div_ant_conf.alt_lna_conf = lna_conf;
ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
+
+ if (common->antenna_diversity)
+ ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d066f2516e47..5bbe5057ba18 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -138,7 +138,8 @@ static const struct ar9300_eeprom ar9300_default = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -713,7 +714,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -1289,7 +1291,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -1865,7 +1868,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -2440,7 +2444,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0},
+ .tempslopextension = {0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -3524,7 +3529,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
- else if (AR_SREV_9462(ah) || AR_SREV_9550(ah))
+ else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
else {
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3561,9 +3566,9 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
int chain;
u32 regval;
- u32 ant_div_ctl1;
static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
AR_PHY_SWITCH_CHAIN_0,
AR_PHY_SWITCH_CHAIN_1,
@@ -3572,7 +3577,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
- if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9462_ALL, value);
} else if (AR_SREV_9550(ah)) {
@@ -3616,7 +3621,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
}
}
- if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
/*
* main_lnaconf, alt_lnaconf, main_tb, alt_tb
@@ -3626,41 +3631,44 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
regval &= (~AR_ANT_DIV_CTRL_ALL);
regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
/* enable_lnadiv */
- regval &= (~AR_PHY_9485_ANT_DIV_LNADIV);
- regval |= ((value >> 6) & 0x1) <<
- AR_PHY_9485_ANT_DIV_LNADIV_S;
+ regval &= (~AR_PHY_ANT_DIV_LNADIV);
+ regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+
+ if (AR_SREV_9565(ah)) {
+ if (ah->shared_chain_lnadiv) {
+ regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+ } else {
+ regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
+ regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+ }
+ }
+
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
/*enable fast_div */
regval = REG_READ(ah, AR_PHY_CCK_DETECT);
regval &= (~AR_FAST_DIV_ENABLE);
- regval |= ((value >> 7) & 0x1) <<
- AR_FAST_DIV_ENABLE_S;
+ regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
- ant_div_ctl1 =
- ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
- /* check whether antenna diversity is enabled */
- if ((ant_div_ctl1 >> 0x6) == 0x3) {
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
/*
* clear bits 25-30 main_lnaconf, alt_lnaconf,
* main_tb, alt_tb
*/
- regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
- AR_PHY_9485_ANT_DIV_ALT_LNACONF |
- AR_PHY_9485_ANT_DIV_ALT_GAINTB |
- AR_PHY_9485_ANT_DIV_MAIN_GAINTB));
+ regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_DIV_ALT_GAINTB |
+ AR_PHY_ANT_DIV_MAIN_GAINTB));
/* by default use LNA1 for the main antenna */
- regval |= (AR_PHY_9485_ANT_DIV_LNA1 <<
- AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S);
- regval |= (AR_PHY_9485_ANT_DIV_LNA2 <<
- AR_PHY_9485_ANT_DIV_ALT_LNACONF_S);
+ regval |= (AR_PHY_ANT_DIV_LNA1 <<
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (AR_PHY_ANT_DIV_LNA2 <<
+ AR_PHY_ANT_DIV_ALT_LNACONF_S);
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
}
-
-
}
-
}
static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
@@ -3847,7 +3855,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
return;
- } else if (AR_SREV_9462(ah)) {
+ } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
reg_val = le32_to_cpu(pBase->swreg);
REG_WRITE(ah, AR_PHY_PMU1, reg_val);
} else {
@@ -3878,7 +3886,7 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
AR_PHY_PMU2_PGM))
udelay(10);
- } else if (AR_SREV_9462(ah))
+ } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
else {
reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) |
@@ -3981,6 +3989,62 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
bias & 0x3);
}
+static int ar9003_hw_get_thermometer(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+ int thermometer = (pBase->miscConfiguration >> 1) & 0x3;
+
+ return --thermometer;
+}
+
+static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
+{
+ int thermometer = ar9003_hw_get_thermometer(ah);
+ u8 therm_on = (thermometer < 0) ? 0 : 1;
+
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+ if (ah->caps.tx_chainmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+ if (ah->caps.tx_chainmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
+
+ therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+ if (ah->caps.tx_chainmask & BIT(1)) {
+ therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+ }
+ if (ah->caps.tx_chainmask & BIT(2)) {
+ therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
+ AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
+ }
+}
+
+static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
+{
+ u32 data, ko, kg;
+
+ if (!AR_SREV_9462_20(ah))
+ return;
+ ar9300_otp_read_word(ah, 1, &data);
+ ko = data & 0xff;
+ kg = (data >> 8) & 0xff;
+ if (ko || kg) {
+ REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
+ AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET, ko);
+ REG_RMW_FIELD(ah, AR_PHY_BB_THERM_ADC_3,
+ AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN,
+ kg + 256);
+ }
+}
+
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -3996,6 +4060,8 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_internal_regulator_apply(ah);
ar9003_hw_apply_tuning_caps(ah);
ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
+ ar9003_hw_thermometer_apply(ah);
+ ar9003_hw_thermo_cal_apply(ah);
}
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4532,7 +4598,7 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
{
int tempSlope = 0;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- int f[3], t[3];
+ int f[8], t[8], i;
REG_RMW(ah, AR_PHY_TPC_11_B0,
(correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
@@ -4565,7 +4631,14 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
*/
if (frequency < 4000)
tempSlope = eep->modalHeader2G.tempSlope;
- else if (eep->base_ext2.tempSlopeLow != 0) {
+ else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
+ for (i = 0; i < 8; i++) {
+ t[i] = eep->base_ext1.tempslopextension[i];
+ f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
+ }
+ tempSlope = ar9003_hw_power_interpolate((s32) frequency,
+ f, t, 8);
+ } else if (eep->base_ext2.tempSlopeLow != 0) {
t[0] = eep->base_ext2.tempSlopeLow;
f[0] = 5180;
t[1] = eep->modalHeader5G.tempSlope;
@@ -4905,90 +4978,79 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
chan->channel);
- /*
- * compare test group from regulatory
- * channel list with test mode from pCtlMode
- * list
- */
- if ((((cfgCtl & ~CTL_MODE_M) |
- (pCtlMode[ctlMode] & CTL_MODE_M)) ==
- ctlIndex[i]) ||
- (((cfgCtl & ~CTL_MODE_M) |
- (pCtlMode[ctlMode] & CTL_MODE_M)) ==
- ((ctlIndex[i] & CTL_MODE_M) |
- SD_NO_CTL))) {
- twiceMinEdgePower =
- ar9003_hw_get_max_edge_power(pEepData,
- freq, i,
- is2ghz);
-
- if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
- /*
- * Find the minimum of all CTL
- * edge powers that apply to
- * this channel
- */
- twiceMaxEdgePower =
- min(twiceMaxEdgePower,
- twiceMinEdgePower);
- else {
- /* specific */
- twiceMaxEdgePower =
- twiceMinEdgePower;
- break;
- }
+ /*
+ * compare test group from regulatory
+ * channel list with test mode from pCtlMode
+ * list
+ */
+ if ((((cfgCtl & ~CTL_MODE_M) |
+ (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+ ctlIndex[i]) ||
+ (((cfgCtl & ~CTL_MODE_M) |
+ (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+ ((ctlIndex[i] & CTL_MODE_M) |
+ SD_NO_CTL))) {
+ twiceMinEdgePower =
+ ar9003_hw_get_max_edge_power(pEepData,
+ freq, i,
+ is2ghz);
+
+ if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL)
+ /*
+ * Find the minimum of all CTL
+ * edge powers that apply to
+ * this channel
+ */
+ twiceMaxEdgePower =
+ min(twiceMaxEdgePower,
+ twiceMinEdgePower);
+ else {
+ /* specific */
+ twiceMaxEdgePower = twiceMinEdgePower;
+ break;
}
}
+ }
- minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
+ minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
- ath_dbg(common, REGULATORY,
- "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
- ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
- scaledPower, minCtlPower);
-
- /* Apply ctl mode to correct target power set */
- switch (pCtlMode[ctlMode]) {
- case CTL_11B:
- for (i = ALL_TARGET_LEGACY_1L_5L;
- i <= ALL_TARGET_LEGACY_11S; i++)
- pPwrArray[i] =
- (u8)min((u16)pPwrArray[i],
- minCtlPower);
- break;
- case CTL_11A:
- case CTL_11G:
- for (i = ALL_TARGET_LEGACY_6_24;
- i <= ALL_TARGET_LEGACY_54; i++)
- pPwrArray[i] =
- (u8)min((u16)pPwrArray[i],
- minCtlPower);
- break;
- case CTL_5GHT20:
- case CTL_2GHT20:
- for (i = ALL_TARGET_HT20_0_8_16;
- i <= ALL_TARGET_HT20_21; i++)
- pPwrArray[i] =
- (u8)min((u16)pPwrArray[i],
- minCtlPower);
- pPwrArray[ALL_TARGET_HT20_22] =
- (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22],
- minCtlPower);
- pPwrArray[ALL_TARGET_HT20_23] =
- (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23],
- minCtlPower);
- break;
- case CTL_5GHT40:
- case CTL_2GHT40:
- for (i = ALL_TARGET_HT40_0_8_16;
- i <= ALL_TARGET_HT40_23; i++)
- pPwrArray[i] =
- (u8)min((u16)pPwrArray[i],
- minCtlPower);
- break;
- default:
- break;
- }
+ ath_dbg(common, REGULATORY,
+ "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
+ ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+ scaledPower, minCtlPower);
+
+ /* Apply ctl mode to correct target power set */
+ switch (pCtlMode[ctlMode]) {
+ case CTL_11B:
+ for (i = ALL_TARGET_LEGACY_1L_5L;
+ i <= ALL_TARGET_LEGACY_11S; i++)
+ pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+ minCtlPower);
+ break;
+ case CTL_11A:
+ case CTL_11G:
+ for (i = ALL_TARGET_LEGACY_6_24;
+ i <= ALL_TARGET_LEGACY_54; i++)
+ pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+ minCtlPower);
+ break;
+ case CTL_5GHT20:
+ case CTL_2GHT20:
+ for (i = ALL_TARGET_HT20_0_8_16;
+ i <= ALL_TARGET_HT20_23; i++)
+ pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+ minCtlPower);
+ break;
+ case CTL_5GHT40:
+ case CTL_2GHT40:
+ for (i = ALL_TARGET_HT40_0_8_16;
+ i <= ALL_TARGET_HT40_23; i++)
+ pPwrArray[i] = (u8)min((u16)pPwrArray[i],
+ minCtlPower);
+ break;
+ default:
+ break;
+ }
} /* end ctl mode checking */
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 3a1ff55bceb9..41b1a75e6bec 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -267,7 +267,8 @@ struct cal_ctl_data_5g {
struct ar9300_BaseExtension_1 {
u8 ant_div_control;
- u8 future[11];
+ u8 future[3];
+ u8 tempslopextension[8];
int8_t quick_drop_low;
int8_t quick_drop_high;
} __packed;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 1e8a4da5952f..1a36fa262639 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -24,6 +24,7 @@
#include "ar955x_1p0_initvals.h"
#include "ar9580_1p0_initvals.h"
#include "ar9462_2p0_initvals.h"
+#include "ar9565_1p0_initvals.h"
/* General hardware code for the AR9003 hadware family */
@@ -34,14 +35,12 @@
*/
static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
{
-#define PCIE_PLL_ON_CREQ_DIS_L1_2P0 \
- ar9462_pciephy_pll_on_clkreq_disable_L1_2p0
-
#define AR9462_BB_CTX_COEFJ(x) \
ar9462_##x##_baseband_core_txfir_coeff_japan_2484
#define AR9462_BBC_TXIFR_COEFFJ \
ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+
if (AR_SREV_9330_11(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -220,10 +219,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
/* Awake -> Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- PCIE_PLL_ON_CREQ_DIS_L1_2P0);
+ ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
/* Sleep -> Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- PCIE_PLL_ON_CREQ_DIS_L1_2P0);
+ ar9462_pciephy_pll_on_clkreq_disable_L1_2p0);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -302,6 +301,39 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9580_1p0_modes_fast_clock);
+ } else if (AR_SREV_9565(ah)) {
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9565_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9565_1p0_mac_postamble);
+
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9565_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9565_1p0_baseband_postamble);
+
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9565_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9565_1p0_radio_postamble);
+
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9565_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9565_1p0_soc_postamble);
+
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p0_Common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9565_1p0_pciephy_pll_on_clkreq_disable_L1);
+
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar9565_1p0_modes_fast_clock);
} else {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -374,6 +406,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_modes_low_ob_db_tx_gain_table_2p0);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_modes_low_ob_db_tx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
@@ -402,6 +437,9 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_modes_high_ob_db_tx_gain_table_2p0);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_modes_high_ob_db_tx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9300Modes_high_ob_db_tx_gain_table_2p2);
@@ -424,6 +462,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_low_ob_db_tx_gain_table);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_modes_low_ob_db_tx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9300Modes_low_ob_db_tx_gain_table_2p2);
@@ -446,6 +487,9 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_high_power_tx_gain_table);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9565_1p0_modes_high_power_tx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9300Modes_high_power_tx_gain_table_2p2);
@@ -538,6 +582,9 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_wo_xlna_rx_gain_table);
+ else if (AR_SREV_9565(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9565_1p0_common_wo_xlna_rx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9300Common_wo_xlna_rx_gain_table_2p2);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 78816b8b2173..301bf72c53bf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -31,7 +31,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
u32 val, ctl12, ctl17;
u8 desc_len;
- desc_len = (AR_SREV_9462(ah) ? 0x18 : 0x17);
+ desc_len = ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x18 : 0x17);
val = (ATHEROS_VENDOR_ID << AR_DescId_S) |
(1 << AR_TxRxDesc_S) |
@@ -182,6 +182,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
+ bool fatal_int;
if (ath9k_hw_mci_is_enabled(ah))
async_mask |= AR_INTR_ASYNC_MASK_MCI;
@@ -310,6 +311,22 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if (sync_cause) {
ath9k_debug_sync_cause(common, sync_cause);
+ fatal_int =
+ (sync_cause &
+ (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+ ? true : false;
+
+ if (fatal_int) {
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+ ath_dbg(common, ANY,
+ "received PCI FATAL interrupt\n");
+ }
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+ ath_dbg(common, ANY,
+ "received PCI PERR interrupt\n");
+ }
+ *masked |= ATH9K_INT_FATAL;
+ }
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
@@ -531,7 +548,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_status |= ATH9K_RXERR_PHY;
rxs->rs_phyerr = phyerr;
}
- };
+ }
}
if (rxsp->status11 & AR_KeyMiss)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 9a34fcaae3ff..44c202ce6c66 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -714,6 +714,7 @@ bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
return true;
}
+EXPORT_SYMBOL(ar9003_mci_start_reset);
int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata)
@@ -812,8 +813,8 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
}
-void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
- bool is_full_sleep)
+int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
@@ -823,14 +824,13 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
is_full_sleep, is_2g);
if (!mci->gpm_addr && !mci->sched_addr) {
- ath_dbg(common, MCI,
- "MCI GPM and schedule buffers are not allocated\n");
- return;
+ ath_err(common, "MCI GPM and schedule buffers are not allocated\n");
+ return -ENOMEM;
}
if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
- ath_dbg(common, MCI, "BTCOEX control register is dead\n");
- return;
+ ath_err(common, "BTCOEX control register is dead\n");
+ return -EINVAL;
}
/* Program MCI DMA related registers */
@@ -912,6 +912,8 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
if (en_int)
ar9003_mci_enable_interrupt(ah);
+
+ return 0;
}
void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
@@ -1026,6 +1028,7 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
ar9003_mci_osla_setup(ah, true);
+ REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
} else {
ar9003_mci_send_lna_take(ah, true);
udelay(5);
@@ -1142,8 +1145,8 @@ void ar9003_mci_init_cal_done(struct ath_hw *ah)
ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
}
-void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
- u16 len, u32 sched_addr)
+int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
@@ -1152,7 +1155,7 @@ void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
mci->gpm_len = len;
mci->sched_addr = sched_addr;
- ar9003_mci_reset(ah, true, true, true);
+ return ar9003_mci_reset(ah, true, true, true);
}
EXPORT_SYMBOL(ar9003_mci_setup);
@@ -1201,12 +1204,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
ar9003_mci_2g5g_switch(ah, false);
break;
- case MCI_STATE_SET_BT_CAL_START:
- mci->bt_state = MCI_BT_CAL_START;
- break;
- case MCI_STATE_SET_BT_CAL:
- mci->bt_state = MCI_BT_CAL;
- break;
case MCI_STATE_RESET_REQ_WAKE:
ar9003_mci_reset_req_wakeup(ah);
mci->update_2g5g = true;
@@ -1240,6 +1237,10 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
case MCI_STATE_NEED_FTP_STOMP:
value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
break;
+ case MCI_STATE_NEED_FLUSH_BT_INFO:
+ value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
+ mci->need_flush_btinfo = false;
+ break;
default:
break;
}
@@ -1289,7 +1290,7 @@ void ar9003_mci_set_power_awake(struct ath_hw *ah)
}
REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
- bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP;
+ bt_sleep = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_REMOTE_SLEEP);
REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
REG_WRITE(ah, AR_DIAG_SW, diag_sw);
@@ -1327,6 +1328,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
if (first) {
gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+
+ if (gpm_ptr >= mci->gpm_len)
+ gpm_ptr = 0;
+
mci->gpm_idx = gpm_ptr;
return gpm_ptr;
}
@@ -1371,6 +1376,10 @@ u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
more_gpm = MCI_GPM_NOMORE;
temp_index = mci->gpm_idx;
+
+ if (temp_index >= mci->gpm_len)
+ temp_index = 0;
+
mci->gpm_idx++;
if (mci->gpm_idx >= mci->gpm_len)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index d33b8e128855..2a2d01889613 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -190,8 +190,6 @@ enum mci_bt_state {
enum mci_state_type {
MCI_STATE_ENABLE,
MCI_STATE_SET_BT_AWAKE,
- MCI_STATE_SET_BT_CAL_START,
- MCI_STATE_SET_BT_CAL,
MCI_STATE_LAST_SCHD_MSG_OFFSET,
MCI_STATE_REMOTE_SLEEP,
MCI_STATE_RESET_REQ_WAKE,
@@ -202,6 +200,7 @@ enum mci_state_type {
MCI_STATE_RECOVER_RX,
MCI_STATE_NEED_FTP_STOMP,
MCI_STATE_DEBUG,
+ MCI_STATE_NEED_FLUSH_BT_INFO,
MCI_STATE_MAX
};
@@ -213,7 +212,8 @@ enum mci_gpm_coex_opcode {
MCI_GPM_COEX_WLAN_CHANNELS,
MCI_GPM_COEX_BT_PROFILE_INFO,
MCI_GPM_COEX_BT_STATUS_UPDATE,
- MCI_GPM_COEX_BT_UPDATE_FLAGS
+ MCI_GPM_COEX_BT_UPDATE_FLAGS,
+ MCI_GPM_COEX_NOOP,
};
#define MCI_GPM_NOMORE 0
@@ -249,8 +249,8 @@ bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
u32 *payload, u8 len, bool wait_done,
bool check_bt);
u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
-void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
- u16 len, u32 sched_addr);
+int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr);
void ar9003_mci_cleanup(struct ath_hw *ah);
void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
u32 *rx_msg_intr);
@@ -272,8 +272,8 @@ void ar9003_mci_check_bt(struct ath_hw *ah);
bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata);
-void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
- bool is_full_sleep);
+int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep);
void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
void ar9003_mci_set_power_awake(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index e476f9f92ce3..759f5f5a7154 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -88,7 +88,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = (freq * 4) / div;
chan_frac = (((freq * 4) % div) * 0x20000) / div;
channelSel = (channelSel << 17) | chan_frac;
- } else if (AR_SREV_9485(ah)) {
+ } else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
u32 chan_frac;
/*
@@ -206,6 +206,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
for (i = 0; i < max_spur_cnts; i++) {
if (AR_SREV_9462(ah) && (i == 0 || i == 3))
continue;
+
negative = 0;
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
AR_SREV_9550(ah))
@@ -301,7 +302,9 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
int freq_offset,
int spur_freq_sd,
int spur_delta_phase,
- int spur_subchannel_sd)
+ int spur_subchannel_sd,
+ int range,
+ int synth_freq)
{
int mask_index = 0;
@@ -316,8 +319,11 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
REG_RMW_FIELD(ah, AR_PHY_TIMING11,
AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
- REG_RMW_FIELD(ah, AR_PHY_TIMING11,
- AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+
+ if (!(AR_SREV_9565(ah) && range == 10 && synth_freq == 2437))
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
@@ -358,9 +364,44 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
}
+static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
+ int freq_offset)
+{
+ int mask_index = 0;
+
+ mask_index = (freq_offset << 4) / 5;
+ if (mask_index < 0)
+ mask_index = mask_index - 1;
+
+ mask_index = mask_index & 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B,
+ mask_index);
+
+ /* A == B */
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A,
+ mask_index);
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B,
+ mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B, 0xe);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B, 0xe);
+
+ /* A == B */
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
+}
+
static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
struct ath9k_channel *chan,
- int freq_offset)
+ int freq_offset,
+ int range,
+ int synth_freq)
{
int spur_freq_sd = 0;
int spur_subchannel_sd = 0;
@@ -402,7 +443,8 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
freq_offset,
spur_freq_sd,
spur_delta_phase,
- spur_subchannel_sd);
+ spur_subchannel_sd,
+ range, synth_freq);
}
/* Spur mitigation for OFDM */
@@ -447,7 +489,17 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
freq_offset -= synth_freq;
if (abs(freq_offset) < range) {
- ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
+ ar9003_hw_spur_ofdm_work(ah, chan, freq_offset,
+ range, synth_freq);
+
+ if (AR_SREV_9565(ah) && (i < 4)) {
+ freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1],
+ mode);
+ freq_offset -= synth_freq;
+ if (abs(freq_offset) < range)
+ ar9003_hw_spur_ofdm_9565(ah, freq_offset);
+ }
+
break;
}
}
@@ -456,7 +508,8 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
+ if (!AR_SREV_9565(ah))
+ ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
ar9003_hw_spur_mitigate_ofdm(ah, chan);
}
@@ -552,9 +605,6 @@ static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
- else if (AR_SREV_9462(ah))
- /* xxx only when MCI support is enabled */
- REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
else
REG_WRITE(ah, AR_SELFGEN_MASK, tx);
@@ -736,7 +786,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
if (chan->channel == 2484)
ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
- if (AR_SREV_9462(ah))
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
@@ -746,9 +796,9 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
ath9k_hw_apply_txpower(ah, chan, false);
- if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
- AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
ah->enabled_cals |= TX_IQ_CAL;
else
ah->enabled_cals &= ~TX_IQ_CAL;
@@ -1111,7 +1161,7 @@ static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
if (AR_SREV_9330(ah))
ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
- if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ;
ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ;
ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ;
@@ -1223,17 +1273,17 @@ static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
}
static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
- struct ath_hw_antcomb_conf *antconf)
+ struct ath_hw_antcomb_conf *antconf)
{
u32 regval;
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
- antconf->main_lna_conf = (regval & AR_PHY_9485_ANT_DIV_MAIN_LNACONF) >>
- AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S;
- antconf->alt_lna_conf = (regval & AR_PHY_9485_ANT_DIV_ALT_LNACONF) >>
- AR_PHY_9485_ANT_DIV_ALT_LNACONF_S;
- antconf->fast_div_bias = (regval & AR_PHY_9485_ANT_FAST_DIV_BIAS) >>
- AR_PHY_9485_ANT_FAST_DIV_BIAS_S;
+ antconf->main_lna_conf = (regval & AR_PHY_ANT_DIV_MAIN_LNACONF) >>
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S;
+ antconf->alt_lna_conf = (regval & AR_PHY_ANT_DIV_ALT_LNACONF) >>
+ AR_PHY_ANT_DIV_ALT_LNACONF_S;
+ antconf->fast_div_bias = (regval & AR_PHY_ANT_FAST_DIV_BIAS) >>
+ AR_PHY_ANT_FAST_DIV_BIAS_S;
if (AR_SREV_9330_11(ah)) {
antconf->lna1_lna2_delta = -9;
@@ -1241,6 +1291,9 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
} else if (AR_SREV_9485(ah)) {
antconf->lna1_lna2_delta = -9;
antconf->div_group = 2;
+ } else if (AR_SREV_9565(ah)) {
+ antconf->lna1_lna2_delta = -3;
+ antconf->div_group = 3;
} else {
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
@@ -1253,26 +1306,84 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
u32 regval;
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
- regval &= ~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
- AR_PHY_9485_ANT_DIV_ALT_LNACONF |
- AR_PHY_9485_ANT_FAST_DIV_BIAS |
- AR_PHY_9485_ANT_DIV_MAIN_GAINTB |
- AR_PHY_9485_ANT_DIV_ALT_GAINTB);
- regval |= ((antconf->main_lna_conf <<
- AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S)
- & AR_PHY_9485_ANT_DIV_MAIN_LNACONF);
- regval |= ((antconf->alt_lna_conf << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S)
- & AR_PHY_9485_ANT_DIV_ALT_LNACONF);
- regval |= ((antconf->fast_div_bias << AR_PHY_9485_ANT_FAST_DIV_BIAS_S)
- & AR_PHY_9485_ANT_FAST_DIV_BIAS);
- regval |= ((antconf->main_gaintb << AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S)
- & AR_PHY_9485_ANT_DIV_MAIN_GAINTB);
- regval |= ((antconf->alt_gaintb << AR_PHY_9485_ANT_DIV_ALT_GAINTB_S)
- & AR_PHY_9485_ANT_DIV_ALT_GAINTB);
+ regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_FAST_DIV_BIAS |
+ AR_PHY_ANT_DIV_MAIN_GAINTB |
+ AR_PHY_ANT_DIV_ALT_GAINTB);
+ regval |= ((antconf->main_lna_conf << AR_PHY_ANT_DIV_MAIN_LNACONF_S)
+ & AR_PHY_ANT_DIV_MAIN_LNACONF);
+ regval |= ((antconf->alt_lna_conf << AR_PHY_ANT_DIV_ALT_LNACONF_S)
+ & AR_PHY_ANT_DIV_ALT_LNACONF);
+ regval |= ((antconf->fast_div_bias << AR_PHY_ANT_FAST_DIV_BIAS_S)
+ & AR_PHY_ANT_FAST_DIV_BIAS);
+ regval |= ((antconf->main_gaintb << AR_PHY_ANT_DIV_MAIN_GAINTB_S)
+ & AR_PHY_ANT_DIV_MAIN_GAINTB);
+ regval |= ((antconf->alt_gaintb << AR_PHY_ANT_DIV_ALT_GAINTB_S)
+ & AR_PHY_ANT_DIV_ALT_GAINTB);
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
}
+static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
+ bool enable)
+{
+ u8 ant_div_ctl1;
+ u32 regval;
+
+ if (!AR_SREV_9565(ah))
+ return;
+
+ ah->shared_chain_lnadiv = enable;
+ ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
+
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= (~AR_ANT_DIV_CTRL_ALL);
+ regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
+ regval &= ~AR_PHY_ANT_DIV_LNADIV;
+ regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+
+ if (enable)
+ regval |= AR_ANT_DIV_ENABLE;
+
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+ regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regval &= ~AR_FAST_DIV_ENABLE;
+ regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+
+ if (enable)
+ regval |= AR_FAST_DIV_ENABLE;
+
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+
+ if (enable) {
+ REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+ if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+ REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_DIV_MAIN_GAINTB |
+ AR_PHY_ANT_DIV_ALT_GAINTB);
+ regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+ }
+}
+
static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
struct ath9k_channel *chan,
u8 *ini_reloaded)
@@ -1312,10 +1423,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
+
if (AR_SREV_9462_20(ah))
- ar9003_hw_prog_ini(ah,
- &ah->ini_radio_post_sys2ant,
- modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
+ modesIndex);
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
@@ -1326,6 +1437,9 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites);
+ if (AR_SREV_9565(ah))
+ REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
+
REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
ah->modes_index = modesIndex;
@@ -1368,6 +1482,7 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
+ ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
ar9003_hw_set_nf_limits(ah);
ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 84d3d4956861..9a48e3d2f231 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -223,15 +223,24 @@
#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
-#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
-#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
-#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B 0x00FE0000
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_B_S 17
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B 0x0001F000
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_B_S 12
#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B 0x00FE0000
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_B_S 17
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B 0x0001F000
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B_S 12
+
/*
* MRC Feild Definitions
@@ -271,23 +280,25 @@
#define AR_ANT_DIV_ENABLE_S 24
-#define AR_PHY_9485_ANT_FAST_DIV_BIAS 0x00007e00
-#define AR_PHY_9485_ANT_FAST_DIV_BIAS_S 9
-#define AR_PHY_9485_ANT_DIV_LNADIV 0x01000000
-#define AR_PHY_9485_ANT_DIV_LNADIV_S 24
-#define AR_PHY_9485_ANT_DIV_ALT_LNACONF 0x06000000
-#define AR_PHY_9485_ANT_DIV_ALT_LNACONF_S 25
-#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF 0x18000000
-#define AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S 27
-#define AR_PHY_9485_ANT_DIV_ALT_GAINTB 0x20000000
-#define AR_PHY_9485_ANT_DIV_ALT_GAINTB_S 29
-#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB 0x40000000
-#define AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S 30
-
-#define AR_PHY_9485_ANT_DIV_LNA1_MINUS_LNA2 0x0
-#define AR_PHY_9485_ANT_DIV_LNA2 0x1
-#define AR_PHY_9485_ANT_DIV_LNA1 0x2
-#define AR_PHY_9485_ANT_DIV_LNA1_PLUS_LNA2 0x3
+#define AR_PHY_ANT_FAST_DIV_BIAS 0x00007e00
+#define AR_PHY_ANT_FAST_DIV_BIAS_S 9
+#define AR_PHY_ANT_SW_RX_PROT 0x00800000
+#define AR_PHY_ANT_SW_RX_PROT_S 23
+#define AR_PHY_ANT_DIV_LNADIV 0x01000000
+#define AR_PHY_ANT_DIV_LNADIV_S 24
+#define AR_PHY_ANT_DIV_ALT_LNACONF 0x06000000
+#define AR_PHY_ANT_DIV_ALT_LNACONF_S 25
+#define AR_PHY_ANT_DIV_MAIN_LNACONF 0x18000000
+#define AR_PHY_ANT_DIV_MAIN_LNACONF_S 27
+#define AR_PHY_ANT_DIV_ALT_GAINTB 0x20000000
+#define AR_PHY_ANT_DIV_ALT_GAINTB_S 29
+#define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000
+#define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30
+
+#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2 0x0
+#define AR_PHY_ANT_DIV_LNA2 0x1
+#define AR_PHY_ANT_DIV_LNA1 0x2
+#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2 0x3
#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
@@ -413,6 +424,8 @@
#define AR_PHY_FIND_SIG_RELSTEP 0x1f
#define AR_PHY_FIND_SIG_RELSTEP_S 0
#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
+#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG 0x00200000
+#define AR_PHY_RESTART_ENABLE_DIV_M2FLAG_S 21
#define AR_PHY_RESTART_DIV_GC 0x001C0000
#define AR_PHY_RESTART_DIV_GC_S 18
#define AR_PHY_RESTART_ENA 0x01
@@ -609,6 +622,12 @@
#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
+#define AR_PHY_BB_THERM_ADC_3 (AR_SM_BASE + 0x250)
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN 0x0001ff00
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_SCALE_GAIN_S 8
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET 0x000000ff
+#define AR_PHY_BB_THERM_ADC_3_THERM_ADC_OFFSET_S 0
+
#define AR_PHY_BB_THERM_ADC_4 (AR_SM_BASE + 0x254)
#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE 0x000000ff
#define AR_PHY_BB_THERM_ADC_4_LATEST_THERM_VALUE_S 0
@@ -630,8 +649,8 @@
#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1
#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
-#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT (AR_SREV_9462(ah) ? 0x00000001 : 0x00000002)
-#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S (AR_SREV_9462(ah) ? 0 : 1)
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002)
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1)
#define AR_PHY_65NM_CH0_SYNTH7 0x16098
#define AR_PHY_65NM_CH0_BIAS1 0x160c0
#define AR_PHY_65NM_CH0_BIAS2 0x160c4
@@ -641,7 +660,7 @@
#define AR_PHY_65NM_CH2_RXTX4 0x1690c
#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
- ((AR_SREV_9462(ah) ? 0x1628c : 0x16280)))
+ (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
@@ -669,7 +688,7 @@
#define AR_SWITCH_TABLE_ALL_S (0)
#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
- (AR_SREV_9462(ah) ? 0x16294 : 0x1628c))
+ ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
@@ -691,17 +710,17 @@
#define AR_CH0_TOP2_XPABIASLVL_S 12
#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
- (AR_SREV_9462(ah) ? 0x16298 : 0x16290))
+ ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : 0x16290))
#define AR_CH0_XTAL_CAPINDAC 0x7f000000
#define AR_CH0_XTAL_CAPINDAC_S 24
#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
#define AR_CH0_XTAL_CAPOUTDAC_S 17
-#define AR_PHY_PMU1 (AR_SREV_9462(ah) ? 0x16340 : 0x16c40)
+#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : 0x16c40)
#define AR_PHY_PMU1_PWD 0x1
#define AR_PHY_PMU1_PWD_S 0
-#define AR_PHY_PMU2 (AR_SREV_9462(ah) ? 0x16344 : 0x16c44)
+#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : 0x16c44)
#define AR_PHY_PMU2_PGM 0x00200000
#define AR_PHY_PMU2_PGM_S 21
@@ -881,6 +900,8 @@
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR 0x20000000
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR_S 29
#define AR_PHY_65NM_RXTX4_XLNA_BIAS 0xC0000000
#define AR_PHY_65NM_RXTX4_XLNA_BIAS_S 30
@@ -1244,4 +1265,24 @@
#define AR_PHY_CL_TAB_CL_GAIN_MOD 0x1f
#define AR_PHY_CL_TAB_CL_GAIN_MOD_S 0
+#define AR_BTCOEX_WL_LNADIV 0x1a64
+#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD 0x00003FFF
+#define AR_BTCOEX_WL_LNADIV_PREDICTED_PERIOD_S 0
+#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY 0x00004000
+#define AR_BTCOEX_WL_LNADIV_DPDT_IGNORE_PRIORITY_S 14
+#define AR_BTCOEX_WL_LNADIV_FORCE_ON 0x00008000
+#define AR_BTCOEX_WL_LNADIV_FORCE_ON_S 15
+#define AR_BTCOEX_WL_LNADIV_MODE_OPTION 0x00030000
+#define AR_BTCOEX_WL_LNADIV_MODE_OPTION_S 16
+#define AR_BTCOEX_WL_LNADIV_MODE 0x007c0000
+#define AR_BTCOEX_WL_LNADIV_MODE_S 18
+#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ 0x00800000
+#define AR_BTCOEX_WL_LNADIV_ALLOWED_TX_ANTDIV_WL_TX_REQ_S 23
+#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE 0x01000000
+#define AR_BTCOEX_WL_LNADIV_DISABLE_TX_ANTDIV_ENABLE_S 24
+#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT 0x02000000
+#define AR_BTCOEX_WL_LNADIV_CONTINUOUS_BT_ACTIVE_PROTECT_S 25
+#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD 0xFC000000
+#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S 26
+
#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 4ef7dcccaa2f..58f30f65c6b6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -58,7 +58,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcfd5c782, 0xcfd5c282},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
new file mode 100644
index 000000000000..843e79f67ff2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -0,0 +1,1231 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9565_1P0_H
+#define INITVALS_9565_1P0_H
+
+/* AR9565 1.0 */
+
+static const u32 ar9565_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x000a0085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00001810, 0x0f000003},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00b00005},
+ {0x000080d8, 0x00400002},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f424},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e848},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x800301ff},
+};
+
+static const u32 ar9565_1p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9565_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a290},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x0d000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098e4, 0x01ffffff},
+ {0x000098e8, 0x01ffffff},
+ {0x000098ec, 0x01ffffff},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009bf0, 0x80000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0078230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x126f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0xe4c355c7},
+ {0x00009e5c, 0xe9198724},
+ {0x00009fc0, 0x823e4fc8},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000096},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a454, 0x03000000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0xbfad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a6b4, 0x00512c01},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a7f0, 0x80000000},
+};
+
+static const u32 ar9565_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x07318fc0, 0x07318fc4, 0x07318fc4, 0x07318fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00100510, 0x00100510, 0x00100510, 0x00100510},
+ {0x0000a28c, 0x00021551, 0x00021551, 0x00021551, 0x00021551},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+ {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016010, 0x6d823601},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x1c99e04f},
+ {0x00016050, 0x6db6db6c},
+ {0x00016058, 0x6c200000},
+ {0x00016080, 0x000c0000},
+ {0x00016084, 0x9a68048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x1203040b},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd28b3330},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc491},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92000000},
+ {0x000160b8, 0x0285dddc},
+ {0x000160bc, 0x02908888},
+ {0x000160c0, 0x006db6d0},
+ {0x000160c4, 0x6dd6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6c1b0},
+ {0x00016100, 0x3fffbe04},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00200400},
+ {0x00016110, 0x00000000},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x050a0001},
+ {0x00016284, 0x3d841440},
+ {0x00016288, 0x00000000},
+ {0x0001628c, 0xe3000000},
+ {0x00016290, 0xa1004080},
+ {0x00016294, 0x40000028},
+ {0x00016298, 0x55aa2900},
+ {0x00016340, 0x131c827a},
+ {0x00016344, 0x00300000},
+};
+
+static const u32 ar9565_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+ {0x000160ac, 0xa4646c08, 0xa4646c08, 0xa4646c08, 0xa4646c08},
+ {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+ {0x0001610c, 0x40000000, 0x40000000, 0x40000000, 0x40000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9565_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00004078, 0x00000002},
+ {0x000040a4, 0x00a0c9c9},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9565_1p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+};
+
+static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18212ede},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0003780c},
+};
+
+static const u32 ar9565_1p0_modes_fast_clock[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x00bf00a0},
+ {0x0000a0c4, 0x11a011a1},
+ {0x0000a0c8, 0x11be11bf},
+ {0x0000a0cc, 0x11bc11bd},
+ {0x0000a0d0, 0x22632264},
+ {0x0000a0d4, 0x22612262},
+ {0x0000a0d8, 0x227f2260},
+ {0x0000a0dc, 0x4322227e},
+ {0x0000a0e0, 0x43204321},
+ {0x0000a0e4, 0x433e433f},
+ {0x0000a0e8, 0x4462433d},
+ {0x0000a0ec, 0x44604461},
+ {0x0000a0f0, 0x447e447f},
+ {0x0000a0f4, 0x5582447d},
+ {0x0000a0f8, 0x55805581},
+ {0x0000a0fc, 0x559e559f},
+ {0x0000a100, 0x66816682},
+ {0x0000a104, 0x669f6680},
+ {0x0000a108, 0x669d669e},
+ {0x0000a10c, 0x77627763},
+ {0x0000a110, 0x77607761},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x00bf00a0},
+ {0x0000a144, 0x11a011a1},
+ {0x0000a148, 0x11be11bf},
+ {0x0000a14c, 0x11bc11bd},
+ {0x0000a150, 0x22632264},
+ {0x0000a154, 0x22612262},
+ {0x0000a158, 0x227f2260},
+ {0x0000a15c, 0x4322227e},
+ {0x0000a160, 0x43204321},
+ {0x0000a164, 0x433e433f},
+ {0x0000a168, 0x4462433d},
+ {0x0000a16c, 0x44604461},
+ {0x0000a170, 0x447e447f},
+ {0x0000a174, 0x5582447d},
+ {0x0000a178, 0x55805581},
+ {0x0000a17c, 0x559e559f},
+ {0x0000a180, 0x66816682},
+ {0x0000a184, 0x669f6680},
+ {0x0000a188, 0x669d669e},
+ {0x0000a18c, 0x77627763},
+ {0x0000a190, 0x77607761},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9565_1p0_modes_low_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+ {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10022223, 0x10022223, 0x0c000200, 0x0c000200},
+ {0x0000a510, 0x15022620, 0x15022620, 0x10000202, 0x10000202},
+ {0x0000a514, 0x19022622, 0x19022622, 0x13000400, 0x13000400},
+ {0x0000a518, 0x1c022822, 0x1c022822, 0x17000402, 0x17000402},
+ {0x0000a51c, 0x21022842, 0x21022842, 0x1b000404, 0x1b000404},
+ {0x0000a520, 0x24022c41, 0x24022c41, 0x1e000603, 0x1e000603},
+ {0x0000a524, 0x29023042, 0x29023042, 0x23000a02, 0x23000a02},
+ {0x0000a528, 0x2d023044, 0x2d023044, 0x27000a04, 0x27000a04},
+ {0x0000a52c, 0x31023644, 0x31023644, 0x2a000a20, 0x2a000a20},
+ {0x0000a530, 0x36025643, 0x36025643, 0x2e000e20, 0x2e000e20},
+ {0x0000a534, 0x3a025a44, 0x3a025a44, 0x32000e22, 0x32000e22},
+ {0x0000a538, 0x3d025e45, 0x3d025e45, 0x36000e24, 0x36000e24},
+ {0x0000a53c, 0x43025e4a, 0x43025e4a, 0x3a001640, 0x3a001640},
+ {0x0000a540, 0x4a025e6c, 0x4a025e6c, 0x3e001660, 0x3e001660},
+ {0x0000a544, 0x50025e8e, 0x50025e8e, 0x41001861, 0x41001861},
+ {0x0000a548, 0x56025eb2, 0x56025eb2, 0x45001a81, 0x45001a81},
+ {0x0000a54c, 0x5c025eb5, 0x5c025eb5, 0x49001a83, 0x49001a83},
+ {0x0000a550, 0x62025ef6, 0x62025ef6, 0x4c001c84, 0x4c001c84},
+ {0x0000a554, 0x65025f56, 0x65025f56, 0x4f001ce3, 0x4f001ce3},
+ {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
+ {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
+ {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
+ {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804201, 0x00804201, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x00804201, 0x00804201, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008201, 0x02008201, 0x02008501, 0x02008501},
+ {0x0000a620, 0x02c10a03, 0x02c10a03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04815205, 0x04815205, 0x02c10b04, 0x02c10b04},
+ {0x0000a628, 0x0581d406, 0x0581d406, 0x03814b04, 0x03814b04},
+ {0x0000a62c, 0x0581d607, 0x0581d607, 0x05018e05, 0x05018e05},
+ {0x0000a630, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+ {0x0000a634, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+ {0x0000a638, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+ {0x0000a63c, 0x0581d607, 0x0581d607, 0x05019406, 0x05019406},
+ {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+ {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+ {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xfc0a9380, 0xfc0a9380, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a618, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a61c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a620, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a624, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a628, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a62c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a630, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a634, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a638, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a63c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x056d82e6, 0x056d82e6, 0x056d82e6, 0x056d82e6},
+ {0x00016048, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+#endif /* INITVALS_9565_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b09285c36c4a..dfe6a4707fd2 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -173,6 +173,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
+#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
+
#define ATH_TX_COMPLETE_POLL_INT 1000
enum ATH_AGGR_STATUS {
@@ -280,6 +282,7 @@ struct ath_tx_control {
struct ath_txq *txq;
struct ath_node *an;
u8 paprd;
+ struct ieee80211_sta *sta;
};
#define ATH_TX_ERROR 0x01
@@ -422,7 +425,6 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
-void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
/*******************/
/* Link Monitoring */
@@ -472,7 +474,7 @@ struct ath_btcoex {
unsigned long op_flags;
int bt_stomp_type; /* Types of BT stomping */
u32 btcoex_no_stomp; /* in usec */
- u32 btcoex_period; /* in usec */
+ u32 btcoex_period; /* in msec */
u32 btscan_no_stomp; /* in usec */
u32 duty_cycle;
u32 bt_wait_time;
@@ -537,6 +539,7 @@ struct ath9k_wow_pattern {
#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
void ath_deinit_leds(struct ath_softc *sc);
+void ath_fill_led_pin(struct ath_softc *sc);
#else
static inline void ath_init_leds(struct ath_softc *sc)
{
@@ -545,6 +548,9 @@ static inline void ath_init_leds(struct ath_softc *sc)
static inline void ath_deinit_leds(struct ath_softc *sc)
{
}
+static inline void ath_fill_led_pin(struct ath_softc *sc)
+{
+}
#endif
/*******************************/
@@ -596,8 +602,6 @@ struct ath_ant_comb {
int main_conf;
enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
- int first_bias;
- int second_bias;
bool first_ratio;
bool second_ratio;
unsigned long scan_start_time;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index acd437384fe4..419e9a3f2fed 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -43,8 +43,8 @@ static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
{ 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
};
-static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
- [AR9300_NUM_WLAN_WEIGHTS] = {
+static const u32 mci_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+ [AR9300_NUM_WLAN_WEIGHTS] = {
{ 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
{ 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
{ 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
@@ -208,14 +208,37 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
}
+/*
+ * For AR9002, bt_weight/wlan_weight are used.
+ * For AR9003 and above, stomp_type is used.
+ */
void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
u32 bt_weight,
- u32 wlan_weight)
+ u32 wlan_weight,
+ enum ath_stomp_type stomp_type)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
- btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
- SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ const u32 *weight = ar9003_wlan_weights[stomp_type];
+ int i;
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
+ btcoex_hw->mci.stomp_ftp)
+ stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
+ weight = mci_wlan_weights[stomp_type];
+ }
+
+ for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+ btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
+ btcoex_hw->wlan_weight[i] = weight[i];
+ }
+ } else {
+ btcoex_hw->bt_coex_weights =
+ SM(bt_weight, AR_BTCOEX_BT_WGHT) |
+ SM(wlan_weight, AR_BTCOEX_WL_WGHT);
+ }
}
EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
@@ -282,7 +305,7 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
ath9k_hw_btcoex_enable_2wire(ah);
break;
case ATH_BTCOEX_CFG_3WIRE:
- if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
ath9k_hw_btcoex_enable_mci(ah);
return;
}
@@ -304,7 +327,7 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
int i;
btcoex_hw->enabled = false;
- if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
@@ -332,26 +355,6 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
-static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
- enum ath_stomp_type stomp_type)
-{
- struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
- const u32 *weight = ar9003_wlan_weights[stomp_type];
- int i;
-
- if (AR_SREV_9462(ah)) {
- if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
- btcoex->mci.stomp_ftp)
- stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
- weight = ar9462_wlan_weights[stomp_type];
- }
-
- for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
- btcoex->bt_weight[i] = AR9300_BT_WGHT;
- btcoex->wlan_weight[i] = weight[i];
- }
-}
-
/*
* Configures appropriate weight based on stomp type.
*/
@@ -359,22 +362,22 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
if (AR_SREV_9300_20_OR_LATER(ah)) {
- ar9003_btcoex_bt_stomp(ah, stomp_type);
+ ath9k_hw_btcoex_set_weight(ah, 0, 0, stomp_type);
return;
}
switch (stomp_type) {
case ATH_BTCOEX_STOMP_ALL:
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_ALL_WLAN_WGHT);
+ AR_STOMP_ALL_WLAN_WGHT, 0);
break;
case ATH_BTCOEX_STOMP_LOW:
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
+ AR_STOMP_LOW_WLAN_WGHT, 0);
break;
case ATH_BTCOEX_STOMP_NONE:
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_NONE_WLAN_WGHT);
+ AR_STOMP_NONE_WLAN_WGHT, 0);
break;
default:
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 20092f98658f..385197ad79b0 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -107,7 +107,8 @@ void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
u32 bt_weight,
- u32 wlan_weight);
+ u32 wlan_weight,
+ enum ath_stomp_type stomp_type);
void ath9k_hw_btcoex_disable(struct ath_hw *ah);
void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index c8ef30127adb..6727b566d294 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -222,6 +222,57 @@ static const struct file_operations fops_disable_ani = {
.llseek = default_llseek,
};
+static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", common->antenna_diversity);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_ant_diversity(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long antenna_diversity;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ if (!AR_SREV_9565(sc->sc_ah))
+ goto exit;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &antenna_diversity))
+ return -EINVAL;
+
+ common->antenna_diversity = !!antenna_diversity;
+ ath9k_ps_wakeup(sc);
+ ath_ant_comb_update(sc);
+ ath_dbg(common, CONFIG, "Antenna diversity: %d\n",
+ common->antenna_diversity);
+ ath9k_ps_restore(sc);
+exit:
+ return count;
+}
+
+static const struct file_operations fops_ant_diversity = {
+ .read = read_file_ant_diversity,
+ .write = write_file_ant_diversity,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -373,6 +424,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
sc->debug.stats.istats.tsfoor++;
if (status & ATH9K_INT_MCI)
sc->debug.stats.istats.mci++;
+ if (status & ATH9K_INT_GENTIMER)
+ sc->debug.stats.istats.gen_timer++;
}
static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -418,6 +471,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("DTIM", dtim);
PR_IS("TSFOOR", tsfoor);
PR_IS("MCI", mci);
+ PR_IS("GENTIMER", gen_timer);
PR_IS("TOTAL", total);
len += snprintf(buf + len, mxlen - len,
@@ -1598,12 +1652,12 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_samps);
#endif
-
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
-
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
+ debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_ant_diversity);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 8b9d080d89da..2ed9785a38fa 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -41,7 +41,6 @@ enum ath_reset_type {
RESET_TYPE_PLL_HANG,
RESET_TYPE_MAC_HANG,
RESET_TYPE_BEACON_STUCK,
- RESET_TYPE_MCI,
__RESET_TYPE_MAX
};
@@ -74,6 +73,8 @@ enum ath_reset_type {
* from a beacon differs from the PCU's internal TSF by more than a
* (programmable) threshold
* @local_timeout: Internal bus timeout.
+ * @mci: MCI interrupt, specific to MCI based BTCOEX chipsets
+ * @gen_timer: Generic hardware timer interrupt
*/
struct ath_interrupt_stats {
u32 total;
@@ -100,6 +101,7 @@ struct ath_interrupt_stats {
u32 bb_watchdog;
u32 tsfoor;
u32 mci;
+ u32 gen_timer;
/* Sync-cause stats */
u32 sync_cause_all;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 484b31305906..319c651fa6c5 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -96,6 +96,7 @@
#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
@@ -108,7 +109,7 @@
#define EEP_RFSILENT_ENABLED_S 0
#define EEP_RFSILENT_POLARITY 0x0002
#define EEP_RFSILENT_POLARITY_S 1
-#define EEP_RFSILENT_GPIO_SEL (AR_SREV_9462(ah) ? 0x00fc : 0x001c)
+#define EEP_RFSILENT_GPIO_SEL ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00fc : 0x001c)
#define EEP_RFSILENT_GPIO_SEL_S 2
#define AR5416_OPFLAGS_11A 0x01
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 9f83f71742a5..d9ed141a053e 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -44,25 +44,6 @@ void ath_init_leds(struct ath_softc *sc)
if (AR_SREV_9100(sc->sc_ah))
return;
- if (sc->sc_ah->led_pin < 0) {
- if (AR_SREV_9287(sc->sc_ah))
- sc->sc_ah->led_pin = ATH_LED_PIN_9287;
- else if (AR_SREV_9485(sc->sc_ah))
- sc->sc_ah->led_pin = ATH_LED_PIN_9485;
- else if (AR_SREV_9300(sc->sc_ah))
- sc->sc_ah->led_pin = ATH_LED_PIN_9300;
- else if (AR_SREV_9462(sc->sc_ah))
- sc->sc_ah->led_pin = ATH_LED_PIN_9462;
- else
- sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
- }
-
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
if (!led_blink)
sc->led_cdev.default_trigger =
ieee80211_get_radio_led_name(sc->hw);
@@ -78,6 +59,31 @@ void ath_init_leds(struct ath_softc *sc)
sc->led_registered = true;
}
+
+void ath_fill_led_pin(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (AR_SREV_9100(ah) || (ah->led_pin >= 0))
+ return;
+
+ if (AR_SREV_9287(ah))
+ ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9485(sc->sc_ah))
+ ah->led_pin = ATH_LED_PIN_9485;
+ else if (AR_SREV_9300(sc->sc_ah))
+ ah->led_pin = ATH_LED_PIN_9300;
+ else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
+ ah->led_pin = ATH_LED_PIN_9462;
+ else
+ ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+ /* LED off, active low */
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+}
#endif
/*******************/
@@ -228,7 +234,12 @@ static void ath_btcoex_period_timer(unsigned long data)
ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock);
- if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
+ /*
+ * btcoex_period is in msec while (btocex/btscan_)no_stomp are in usec,
+ * ensure that we properly convert btcoex_period to usec
+ * for any comparision with (btcoex/btscan_)no_stomp.
+ */
+ if (btcoex->btcoex_period * 1000 != btcoex->btcoex_no_stomp) {
if (btcoex->hw_timer_enabled)
ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
@@ -309,8 +320,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
/* make sure duty cycle timer is also stopped when resuming */
- if (btcoex->hw_timer_enabled)
+ if (btcoex->hw_timer_enabled) {
ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+ btcoex->hw_timer_enabled = false;
+ }
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
@@ -331,18 +344,20 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
del_timer_sync(&btcoex->period_timer);
- if (btcoex->hw_timer_enabled)
+ if (btcoex->hw_timer_enabled) {
ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- btcoex->hw_timer_enabled = false;
+ btcoex->hw_timer_enabled = false;
+ }
}
void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
- if (btcoex->hw_timer_enabled)
+ if (btcoex->hw_timer_enabled) {
ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+ btcoex->hw_timer_enabled = false;
+ }
}
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
@@ -380,7 +395,10 @@ void ath9k_start_btcoex(struct ath_softc *sc)
!ah->btcoex_hw.enabled) {
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
+ AR_STOMP_LOW_WLAN_WGHT, 0);
+ else
+ ath9k_hw_btcoex_set_weight(ah, 0, 0,
+ ATH_BTCOEX_STOMP_NONE);
ath9k_hw_btcoex_enable(ah);
if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
@@ -397,7 +415,7 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_pause(sc);
ath9k_hw_btcoex_disable(ah);
- if (AR_SREV_9462(ah))
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
ath_mci_flush_profile(&sc->btcoex.mci);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index aa327adcc3d8..924c4616c3d9 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -973,8 +973,8 @@ static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
{
int transfer, err;
- const void *data = hif_dev->firmware->data;
- size_t len = hif_dev->firmware->size;
+ const void *data = hif_dev->fw_data;
+ size_t len = hif_dev->fw_size;
u32 addr = AR9271_FIRMWARE;
u8 *buf = kzalloc(4096, GFP_KERNEL);
u32 firm_offset;
@@ -1017,7 +1017,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
return -EIO;
dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
- hif_dev->fw_name, (unsigned long) hif_dev->firmware->size);
+ hif_dev->fw_name, (unsigned long) hif_dev->fw_size);
return 0;
}
@@ -1072,14 +1072,15 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
*/
static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
{
- struct device *parent = hif_dev->udev->dev.parent;
+ struct device *dev = &hif_dev->udev->dev;
+ struct device *parent = dev->parent;
complete(&hif_dev->fw_done);
if (parent)
device_lock(parent);
- device_release_driver(&hif_dev->udev->dev);
+ device_release_driver(dev);
if (parent)
device_unlock(parent);
@@ -1099,11 +1100,11 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
&hif_dev->udev->dev);
- if (hif_dev->htc_handle == NULL) {
- goto err_fw;
- }
+ if (hif_dev->htc_handle == NULL)
+ goto err_dev_alloc;
- hif_dev->firmware = fw;
+ hif_dev->fw_data = fw->data;
+ hif_dev->fw_size = fw->size;
/* Proceed with initialization */
@@ -1121,6 +1122,8 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
goto err_htc_hw_init;
}
+ release_firmware(fw);
+ hif_dev->flags |= HIF_USB_READY;
complete(&hif_dev->fw_done);
return;
@@ -1129,8 +1132,8 @@ err_htc_hw_init:
ath9k_hif_usb_dev_deinit(hif_dev);
err_dev_init:
ath9k_htc_hw_free(hif_dev->htc_handle);
+err_dev_alloc:
release_firmware(fw);
- hif_dev->firmware = NULL;
err_fw:
ath9k_hif_usb_firmware_fail(hif_dev);
}
@@ -1277,11 +1280,10 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
wait_for_completion(&hif_dev->fw_done);
- if (hif_dev->firmware) {
+ if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
ath9k_htc_hw_free(hif_dev->htc_handle);
ath9k_hif_usb_dev_deinit(hif_dev);
- release_firmware(hif_dev->firmware);
}
usb_set_intfdata(interface, NULL);
@@ -1317,13 +1319,23 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
struct htc_target *htc_handle = hif_dev->htc_handle;
int ret;
+ const struct firmware *fw;
ret = ath9k_hif_usb_alloc_urbs(hif_dev);
if (ret)
return ret;
- if (hif_dev->firmware) {
+ if (hif_dev->flags & HIF_USB_READY) {
+ /* request cached firmware during suspend/resume cycle */
+ ret = request_firmware(&fw, hif_dev->fw_name,
+ &hif_dev->udev->dev);
+ if (ret)
+ goto fail_resume;
+
+ hif_dev->fw_data = fw->data;
+ hif_dev->fw_size = fw->size;
ret = ath9k_hif_usb_download_fw(hif_dev);
+ release_firmware(fw);
if (ret)
goto fail_resume;
} else {
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 487ff658b4c1..51496e74b83e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -85,12 +85,14 @@ struct cmd_buf {
};
#define HIF_USB_START BIT(0)
+#define HIF_USB_READY BIT(1)
struct hif_device_usb {
struct usb_device *udev;
struct usb_interface *interface;
const struct usb_device_id *usb_device_id;
- const struct firmware *firmware;
+ const void *fw_data;
+ size_t fw_size;
struct completion fw_done;
struct htc_target *htc_handle;
struct hif_usb_tx tx;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 936e920fb88e..b30596fcf73a 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -542,6 +542,7 @@ void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
int ath9k_tx_init(struct ath9k_htc_priv *priv);
int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta *sta,
struct sk_buff *skb, u8 slot, bool is_cab);
void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 77d541feb910..f42d2eb6af99 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -326,7 +326,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
goto next;
}
- ret = ath9k_htc_tx_start(priv, skb, tx_slot, true);
+ ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true);
if (ret != 0) {
ath9k_htc_tx_clear_slot(priv, tx_slot);
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 07df279c8d46..0eacfc13c915 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -161,7 +161,7 @@ void ath9k_htc_start_btcoex(struct ath9k_htc_priv *priv)
if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
+ AR_STOMP_LOW_WLAN_WGHT, 0);
ath9k_hw_btcoex_enable(ah);
ath_htc_resume_btcoex_work(priv);
}
@@ -173,17 +173,26 @@ void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
if (ah->btcoex_hw.enabled &&
ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
- ath9k_hw_btcoex_disable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
ath_htc_cancel_btcoex_work(priv);
+ ath9k_hw_btcoex_disable(ah);
}
}
void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
{
struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
int qnum;
+ /*
+ * Check if BTCOEX is globally disabled.
+ */
+ if (!common->btcoex_enabled) {
+ ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_NONE;
+ return;
+ }
+
if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index a035a380d669..d98255eb1b9a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -30,6 +30,10 @@ int htc_modparam_nohwcrypt;
module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+static int ath9k_htc_btcoex_enable;
+module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
+MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+
#define CHAN2G(_freq, _idx) { \
.center_freq = (_freq), \
.hw_value = (_idx), \
@@ -635,6 +639,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
common->hw = priv->hw;
common->priv = priv;
common->debug_mask = ath9k_debug;
+ common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
spin_lock_init(&priv->beacon_lock);
spin_lock_init(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c785129692ff..ca78e33ca23e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -489,24 +489,20 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
ista = (struct ath9k_htc_sta *) sta->drv_priv;
memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
- tsta.is_vif_sta = 0;
ista->index = sta_idx;
+ tsta.is_vif_sta = 0;
+ maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor);
+ tsta.maxampdu = cpu_to_be16(maxampdu);
} else {
memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
tsta.is_vif_sta = 1;
+ tsta.maxampdu = cpu_to_be16(0xffff);
}
tsta.sta_index = sta_idx;
tsta.vif_index = avp->index;
- if (!sta) {
- tsta.maxampdu = cpu_to_be16(0xffff);
- } else {
- maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
- sta->ht_cap.ampdu_factor);
- tsta.maxampdu = cpu_to_be16(maxampdu);
- }
-
WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
if (ret) {
if (sta)
@@ -856,7 +852,9 @@ set_timer:
/* mac80211 Callbacks */
/**********************/
-static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_htc_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
struct ath9k_htc_priv *priv = hw->priv;
@@ -883,7 +881,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto fail_tx;
}
- ret = ath9k_htc_tx_start(priv, skb, slot, false);
+ ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false);
if (ret != 0) {
ath_dbg(common, XMIT, "Tx failed\n");
goto clear_slot;
@@ -1331,6 +1329,34 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
return ret;
}
+static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ if (!ath9k_htc_send_rate_cmd(priv, &trate))
+ ath_dbg(common, CONFIG,
+ "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
+ else
+ ath_dbg(common, CONFIG,
+ "Unable to update supported rates for sta: %pM\n",
+ sta->addr);
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
@@ -1419,7 +1445,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
if (priv->ah->sw_mgmt_crypto &&
key->cipher == WLAN_CIPHER_SUITE_CCMP)
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
}
break;
@@ -1758,6 +1784,7 @@ struct ieee80211_ops ath9k_htc_ops = {
.sta_add = ath9k_htc_sta_add,
.sta_remove = ath9k_htc_sta_remove,
.conf_tx = ath9k_htc_conf_tx,
+ .sta_rc_update = ath9k_htc_sta_rc_update,
.bss_info_changed = ath9k_htc_bss_info_changed,
.set_key = ath9k_htc_set_key,
.get_tsf = ath9k_htc_get_tsf,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 47e61d0da33b..06cdcb772d78 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -333,12 +333,12 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv,
}
int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 slot, bool is_cab)
{
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = tx_info->control.sta;
struct ieee80211_vif *vif = tx_info->control.vif;
struct ath9k_htc_sta *ista;
struct ath9k_htc_vif *avp = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 265bf77598a2..0f2b97f6b739 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,6 +78,13 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
}
+static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
+ bool enable)
+{
+ if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv)
+ ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable);
+}
+
/* Private hardware call ops */
/* PHY ops */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 4faf0a395876..f9a6ec5cf470 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -24,6 +24,7 @@
#include "rc.h"
#include "ar9003_mac.h"
#include "ar9003_mci.h"
+#include "ar9003_phy.h"
#include "debug.h"
#include "ath9k.h"
@@ -355,7 +356,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
(val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
- if (AR_SREV_9462(ah))
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
ah->is_pciexpress = true;
else
ah->is_pciexpress = (val &
@@ -602,6 +603,11 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (AR_SREV_9462(ah))
ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
+ if (AR_SREV_9565(ah)) {
+ ah->WARegVal |= AR_WA_BIT22;
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
+ }
+
ath9k_hw_init_defaults(ah);
ath9k_hw_init_config(ah);
@@ -647,6 +653,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
case AR_SREV_VERSION_9340:
case AR_SREV_VERSION_9462:
case AR_SREV_VERSION_9550:
+ case AR_SREV_VERSION_9565:
break;
default:
ath_err(common,
@@ -708,7 +715,7 @@ int ath9k_hw_init(struct ath_hw *ah)
int ret;
struct ath_common *common = ath9k_hw_common(ah);
- /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
+ /* These are all the AR5008/AR9001/AR9002/AR9003 hardware family of chipsets */
switch (ah->hw_version.devid) {
case AR5416_DEVID_PCI:
case AR5416_DEVID_PCIE:
@@ -728,6 +735,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_AR9580:
case AR9300_DEVID_AR9462:
case AR9485_DEVID_AR1111:
+ case AR9300_DEVID_AR9565:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -800,8 +808,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
{
u32 pll;
- if (AR_SREV_9485(ah)) {
-
+ if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
/* program BB PLL ki and kd value, ki=0x4, kd=0x40 */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
AR_CH0_BB_DPLL2_PLL_PWD, 0x1);
@@ -912,7 +919,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
}
pll = ath9k_hw_compute_pll_control(ah, chan);
-
+ if (AR_SREV_9565(ah))
+ pll |= 0x40000;
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
@@ -1726,12 +1734,12 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ret)
goto fail;
- ath9k_hw_loadnf(ah, ah->curchan);
- ath9k_hw_start_nfcal(ah, true);
-
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_2g5g_switch(ah, false);
+ ath9k_hw_loadnf(ah, ah->curchan);
+ ath9k_hw_start_nfcal(ah, true);
+
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
@@ -2018,6 +2026,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_apply_gpio_override(ah);
+ if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv)
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2034,7 +2045,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
- if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
@@ -2401,7 +2412,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (eeval & AR5416_OPFLAGS_11G)
pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
- if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
+ if (AR_SREV_9485(ah) ||
+ AR_SREV_9285(ah) ||
+ AR_SREV_9330(ah) ||
+ AR_SREV_9565(ah))
chip_chainmask = 1;
else if (AR_SREV_9462(ah))
chip_chainmask = 3;
@@ -2489,7 +2503,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
- if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah))
+ if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah))
pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
@@ -2525,7 +2539,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
}
- if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
/*
* enable the diversity-combining algorithm only when
@@ -2568,14 +2582,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
- if (AR_SREV_9462(ah)) {
-
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
pCap->hw_caps |= ATH9K_HW_CAP_MCI;
if (AR_SREV_9462_20(ah))
pCap->hw_caps |= ATH9K_HW_CAP_RTT;
-
}
@@ -2741,7 +2753,7 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
ENABLE_REGWRITE_BUFFER(ah);
- if (AR_SREV_9462(ah))
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
REG_WRITE(ah, AR_RX_FILTER, bits);
@@ -3038,7 +3050,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
gen_tmr_configuration[timer->index].mode_mask);
- if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
/*
* Starting from AR9462, each generic timer can select which tsf
* to use. But we still follow the old rule, 0 - 7 use tsf and
@@ -3072,6 +3084,16 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
gen_tmr_configuration[timer->index].mode_mask);
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ /*
+ * Need to switch back to TSF if it was using TSF2.
+ */
+ if ((timer->index >= AR_GEN_TIMER_BANK_1_LEN)) {
+ REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
+ (1 << timer->index));
+ }
+ }
+
/* Disable both trigger and thresh interrupt masks */
REG_CLR_BIT(ah, AR_IMR_S5,
(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
@@ -3153,6 +3175,7 @@ static struct {
{ AR_SREV_VERSION_9485, "9485" },
{ AR_SREV_VERSION_9462, "9462" },
{ AR_SREV_VERSION_9550, "9550" },
+ { AR_SREV_VERSION_9565, "9565" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index de6968fc64f4..566a4ce4f156 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -50,6 +50,7 @@
#define AR9300_DEVID_AR9330 0x0035
#define AR9300_DEVID_QCA955X 0x0038
#define AR9485_DEVID_AR1111 0x0037
+#define AR9300_DEVID_AR9565 0x0036
#define AR5416_AR9100_DEVID 0x000b
@@ -685,7 +686,7 @@ struct ath_hw_ops {
struct ath_hw_antcomb_conf *antconf);
void (*antdiv_comb_conf_set)(struct ath_hw *ah,
struct ath_hw_antcomb_conf *antconf);
-
+ void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
};
struct ath_nf_limits {
@@ -729,6 +730,7 @@ struct ath_hw {
bool aspm_enabled;
bool is_monitoring;
bool need_an_top2_fixup;
+ bool shared_chain_lnadiv;
u16 tx_trig_level;
u32 nf_regs[6];
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f33712140fa5..fad3ccd5cd91 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -46,6 +46,10 @@ static int ath9k_btcoex_enable;
module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+static int ath9k_enable_diversity;
+module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
+MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
+
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -258,7 +262,7 @@ static void setup_ht_cap(struct ath_softc *sc,
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
- if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
max_streams = 1;
else if (AR_SREV_9462(ah))
max_streams = 2;
@@ -546,6 +550,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
common->debug_mask = ath9k_debug;
common->btcoex_enabled = ath9k_btcoex_enable == 1;
common->disable_ani = false;
+
+ /*
+ * Enable Antenna diversity only when BTCOEX is disabled
+ * and the user manually requests the feature.
+ */
+ if (!common->btcoex_enabled && ath9k_enable_diversity)
+ common->antenna_diversity = 1;
+
spin_lock_init(&common->cc_lock);
spin_lock_init(&sc->sc_serial_rw);
@@ -597,6 +609,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
+ ath_fill_led_pin(sc);
if (common->bus_ops->aspm_init)
common->bus_ops->aspm_init(common);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a22df749b8db..31ab82e3ba85 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -696,7 +696,9 @@ mutex_unlock:
return r;
}
-static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -756,6 +758,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
+ txctl.sta = control->sta;
ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
@@ -983,47 +986,21 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- int ret = 0;
- ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MESH_POINT:
- break;
- default:
- ath_err(common, "Interface type %d not yet supported\n",
- vif->type);
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- if (ath9k_uses_beacons(vif->type)) {
- if (sc->nbcnvifs >= ATH_BCBUF) {
- ath_err(common, "Not enough beacon buffers when adding"
- " new interface of type: %i\n",
- vif->type);
- ret = -ENOBUFS;
- goto out;
- }
- }
-
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
-
sc->nvifs++;
+ ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, vif);
+ ath9k_ps_restore(sc);
+
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
-out:
mutex_unlock(&sc->mutex);
- ath9k_ps_restore(sc);
- return ret;
+ return 0;
}
static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1033,21 +1010,9 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int ret = 0;
ath_dbg(common, CONFIG, "Change Interface\n");
-
mutex_lock(&sc->mutex);
- ath9k_ps_wakeup(sc);
-
- if (ath9k_uses_beacons(new_type) &&
- !ath9k_uses_beacons(vif->type)) {
- if (sc->nbcnvifs >= ATH_BCBUF) {
- ath_err(common, "No beacon slot available\n");
- ret = -ENOBUFS;
- goto out;
- }
- }
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
@@ -1055,14 +1020,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
vif->type = new_type;
vif->p2p = p2p;
+ ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, vif);
+ ath9k_ps_restore(sc);
+
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
-out:
- ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
- return ret;
+ return 0;
}
static void ath9k_remove_interface(struct ieee80211_hw *hw,
@@ -1073,7 +1039,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG, "Detach Interface\n");
- ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
sc->nvifs--;
@@ -1081,10 +1046,11 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
+ ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, NULL);
+ ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
- ath9k_ps_restore(sc);
}
static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1440,7 +1406,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
if (sc->sc_ah->sw_mgmt_crypto &&
key->cipher == WLAN_CIPHER_SUITE_CCMP)
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
}
break;
@@ -2257,7 +2223,7 @@ static int ath9k_suspend(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath_cancel_work(sc);
- del_timer_sync(&common->ani.timer);
+ ath_stop_ani(sc);
del_timer_sync(&sc->rx_poll_timer);
if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index fb536e7e661b..ec2d7c807567 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -80,6 +80,7 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
struct ath_mci_profile_info *info, *tinfo;
mci->aggr_limit = 0;
+ mci->num_mgmt = 0;
if (list_empty(&mci->info))
return;
@@ -120,7 +121,14 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
goto skip_tuning;
+ mci->aggr_limit = 0;
btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
+ btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
+ if (NUM_PROF(mci))
+ btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ else
+ btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
+ ATH_BTCOEX_STOMP_LOW;
if (num_profile == 1) {
info = list_first_entry(&mci->info,
@@ -132,7 +140,8 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
else if (info->T == 6) {
mci->aggr_limit = 6;
btcoex->duty_cycle = 30;
- }
+ } else
+ mci->aggr_limit = 6;
ath_dbg(common, MCI,
"Single SCO, aggregation limit %d 1/4 ms\n",
mci->aggr_limit);
@@ -191,6 +200,23 @@ skip_tuning:
ath9k_btcoex_timer_resume(sc);
}
+static void ath_mci_wait_btcal_done(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ /* Stop tx & rx */
+ ieee80211_stop_queues(sc->hw);
+ ath_stoprecv(sc);
+ ath_drain_all_txq(sc, false);
+
+ /* Wait for cal done */
+ ar9003_mci_start_reset(ah, ah->curchan);
+
+ /* Resume tx & rx */
+ ath_startrecv(sc);
+ ieee80211_wake_queues(sc->hw);
+}
+
static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
{
struct ath_hw *ah = sc->sc_ah;
@@ -201,8 +227,8 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
switch (opcode) {
case MCI_GPM_BT_CAL_REQ:
if (mci_hw->bt_state == MCI_BT_AWAKE) {
- ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START);
- ath9k_queue_reset(sc, RESET_TYPE_MCI);
+ mci_hw->bt_state = MCI_BT_CAL_START;
+ ath_mci_wait_btcal_done(sc);
}
ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
break;
@@ -224,8 +250,8 @@ static void ath9k_mci_work(struct work_struct *work)
ath_mci_update_scheme(sc);
}
-static void ath_mci_process_profile(struct ath_softc *sc,
- struct ath_mci_profile_info *info)
+static u8 ath_mci_process_profile(struct ath_softc *sc,
+ struct ath_mci_profile_info *info)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex;
@@ -251,25 +277,15 @@ static void ath_mci_process_profile(struct ath_softc *sc,
if (info->start) {
if (!entry && !ath_mci_add_profile(common, mci, info))
- return;
+ return 0;
} else
ath_mci_del_profile(common, mci, entry);
- btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
- mci->aggr_limit = mci->num_sco ? 6 : 0;
-
- btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
- if (NUM_PROF(mci))
- btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- else
- btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
- ATH_BTCOEX_STOMP_LOW;
-
- ieee80211_queue_work(sc->hw, &sc->mci_work);
+ return 1;
}
-static void ath_mci_process_status(struct ath_softc *sc,
- struct ath_mci_profile_status *status)
+static u8 ath_mci_process_status(struct ath_softc *sc,
+ struct ath_mci_profile_status *status)
{
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_mci_profile *mci = &btcoex->mci;
@@ -278,14 +294,14 @@ static void ath_mci_process_status(struct ath_softc *sc,
/* Link status type are not handled */
if (status->is_link)
- return;
+ return 0;
info.conn_handle = status->conn_handle;
if (ath_mci_find_profile(mci, &info))
- return;
+ return 0;
if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
- return;
+ return 0;
if (status->is_critical)
__set_bit(status->conn_handle, mci->status);
@@ -299,7 +315,9 @@ static void ath_mci_process_status(struct ath_softc *sc,
} while (++i < ATH_MCI_MAX_PROFILE);
if (old_num_mgmt != mci->num_mgmt)
- ieee80211_queue_work(sc->hw, &sc->mci_work);
+ return 1;
+
+ return 0;
}
static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -308,9 +326,16 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
struct ath_mci_profile_info profile_info;
struct ath_mci_profile_status profile_status;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 major, minor;
+ u8 major, minor, update_scheme = 0;
u32 seq_num;
+ if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) &&
+ ar9003_mci_state(ah, MCI_STATE_ENABLE)) {
+ ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
+ ath_mci_flush_profile(&sc->btcoex.mci);
+ ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY);
+ }
+
switch (opcode) {
case MCI_GPM_COEX_VERSION_QUERY:
ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
@@ -336,7 +361,7 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
break;
}
- ath_mci_process_profile(sc, &profile_info);
+ update_scheme += ath_mci_process_profile(sc, &profile_info);
break;
case MCI_GPM_COEX_BT_STATUS_UPDATE:
profile_status.is_link = *(rx_payload +
@@ -352,12 +377,14 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
profile_status.is_link, profile_status.conn_handle,
profile_status.is_critical, seq_num);
- ath_mci_process_status(sc, &profile_status);
+ update_scheme += ath_mci_process_status(sc, &profile_status);
break;
default:
ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
break;
}
+ if (update_scheme)
+ ieee80211_queue_work(sc->hw, &sc->mci_work);
}
int ath_mci_setup(struct ath_softc *sc)
@@ -365,6 +392,7 @@ int ath_mci_setup(struct ath_softc *sc)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_mci_coex *mci = &sc->mci_coex;
struct ath_mci_buf *buf = &mci->sched_buf;
+ int ret;
buf->bf_addr = dma_alloc_coherent(sc->dev,
ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
@@ -384,9 +412,13 @@ int ath_mci_setup(struct ath_softc *sc)
mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
- ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
- mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
- mci->sched_buf.bf_paddr);
+ ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+ mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+ mci->sched_buf.bf_paddr);
+ if (ret) {
+ ath_err(common, "Failed to initialize MCI\n");
+ return ret;
+ }
INIT_WORK(&sc->mci_work, ath9k_mci_work);
ath_dbg(common, MCI, "MCI Initialized\n");
@@ -551,9 +583,11 @@ void ath_mci_intr(struct ath_softc *sc)
}
if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
- (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+ ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
+ }
}
void ath_mci_enable(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index a978984d78a5..0e630a99b68b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -38,6 +38,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
{ PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
+ { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E AR9565 */
{ 0 }
};
@@ -113,41 +114,33 @@ static void ath_pci_aspm_init(struct ath_common *common)
struct ath_hw *ah = sc->sc_ah;
struct pci_dev *pdev = to_pci_dev(sc->dev);
struct pci_dev *parent;
- int pos;
- u8 aspm;
+ u16 aspm;
if (!ah->is_pciexpress)
return;
- pos = pci_pcie_cap(pdev);
- if (!pos)
- return;
-
parent = pdev->bus->self;
if (!parent)
return;
- if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
+ if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
+ (AR_SREV_9285(ah))) {
/* Bluetooth coexistance requires disabling ASPM. */
- pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
- aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
- pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm);
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
/*
* Both upstream and downstream PCIe components should
* have the same ASPM settings.
*/
- pos = pci_pcie_cap(parent);
- pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm);
- aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
- pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm);
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
ath_info(common, "Disabling ASPM since BTCOEX is enabled\n");
return;
}
- pos = pci_pcie_cap(parent);
- pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm);
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
ah->aspm_enabled = true;
/* Initialize PCIe PM and SERDES registers. */
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e034add9cd5a..27ed80b54881 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -25,141 +25,141 @@ static const struct ath_rate_table ar5416_11na_ratetable = {
8, /* MCS start */
{
[0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
- 5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */
+ 5400, 0, 12 }, /* 6 Mb */
[1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
- 7800, 1, 18, 0, 1, 1, 1 }, /* 9 Mb */
+ 7800, 1, 18 }, /* 9 Mb */
[2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */
+ 10000, 2, 24 }, /* 12 Mb */
[3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */
+ 13900, 3, 36 }, /* 18 Mb */
[4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */
+ 17300, 4, 48 }, /* 24 Mb */
[5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */
+ 23000, 5, 72 }, /* 36 Mb */
[6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */
+ 27400, 6, 96 }, /* 48 Mb */
[7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */
+ 29300, 7, 108 }, /* 54 Mb */
[8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */
+ 6400, 0, 0 }, /* 6.5 Mb */
[9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */
+ 12700, 1, 1 }, /* 13 Mb */
[10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */
+ 18800, 2, 2 }, /* 19.5 Mb */
[11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */
+ 25000, 3, 3 }, /* 26 Mb */
[12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */
+ 36700, 4, 4 }, /* 39 Mb */
[13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */
+ 48100, 5, 5 }, /* 52 Mb */
[14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */
+ 53500, 6, 6 }, /* 58.5 Mb */
[15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */
+ 59000, 7, 7 }, /* 65 Mb */
[16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */
+ 65400, 7, 7 }, /* 75 Mb */
[17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */
+ 12700, 8, 8 }, /* 13 Mb */
[18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */
+ 24800, 9, 9 }, /* 26 Mb */
[19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */
+ 36600, 10, 10 }, /* 39 Mb */
[20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */
+ 48100, 11, 11 }, /* 52 Mb */
[21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */
+ 69500, 12, 12 }, /* 78 Mb */
[22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */
+ 89500, 13, 13 }, /* 104 Mb */
[23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */
+ 98900, 14, 14 }, /* 117 Mb */
[24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */
+ 108300, 15, 15 }, /* 130 Mb */
[25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */
+ 120000, 15, 15 }, /* 144.4 Mb */
[26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */
+ 17400, 16, 16 }, /* 19.5 Mb */
[27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */
+ 35100, 17, 17 }, /* 39 Mb */
[28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */
+ 52600, 18, 18 }, /* 58.5 Mb */
[29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */
+ 70400, 19, 19 }, /* 78 Mb */
[30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */
+ 104900, 20, 20 }, /* 117 Mb */
[31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/
+ 115800, 20, 20 }, /* 130 Mb*/
[32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */
+ 137200, 21, 21 }, /* 156 Mb */
[33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */
+ 151100, 21, 21 }, /* 173.3 Mb */
[34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */
+ 152800, 22, 22 }, /* 175.5 Mb */
[35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/
+ 168400, 22, 22 }, /* 195 Mb*/
[36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */
+ 168400, 23, 23 }, /* 195 Mb */
[37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */
+ 185000, 23, 23 }, /* 216.7 Mb */
[38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/
+ 13200, 0, 0 }, /* 13.5 Mb*/
[39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/
+ 25900, 1, 1 }, /* 27.0 Mb*/
[40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/
+ 38600, 2, 2 }, /* 40.5 Mb*/
[41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */
+ 49800, 3, 3 }, /* 54 Mb */
[42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */
+ 72200, 4, 4 }, /* 81 Mb */
[43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */
+ 92900, 5, 5 }, /* 108 Mb */
[44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/
+ 102700, 6, 6 }, /* 121.5 Mb*/
[45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */
+ 112000, 7, 7 }, /* 135 Mb */
[46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */
+ 122000, 7, 7 }, /* 150 Mb */
[47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */
+ 25800, 8, 8 }, /* 27 Mb */
[48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */
+ 49800, 9, 9 }, /* 54 Mb */
[49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */
+ 71900, 10, 10 }, /* 81 Mb */
[50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */
+ 92500, 11, 11 }, /* 108 Mb */
[51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */
+ 130300, 12, 12 }, /* 162 Mb */
[52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */
+ 162800, 13, 13 }, /* 216 Mb */
[53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */
+ 178200, 14, 14 }, /* 243 Mb */
[54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */
+ 192100, 15, 15 }, /* 270 Mb */
[55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */
+ 207000, 15, 15 }, /* 300 Mb */
[56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */
+ 36100, 16, 16 }, /* 40.5 Mb */
[57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */
+ 72900, 17, 17 }, /* 81 Mb */
[58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */
+ 108300, 18, 18 }, /* 121.5 Mb */
[59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19, 4, 59, 59, 59 }, /* 162 Mb */
+ 142000, 19, 19 }, /* 162 Mb */
[60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20, 4, 60, 61, 61 }, /* 243 Mb */
+ 205100, 20, 20 }, /* 243 Mb */
[61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20, 4, 60, 61, 61 }, /* 270 Mb */
+ 224700, 20, 20 }, /* 270 Mb */
[62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21, 4, 62, 63, 63 }, /* 324 Mb */
+ 263100, 21, 21 }, /* 324 Mb */
[63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21, 4, 62, 63, 63 }, /* 360 Mb */
+ 288000, 21, 21 }, /* 360 Mb */
[64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */
+ 290700, 22, 22 }, /* 364.5 Mb */
[65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */
+ 317200, 22, 22 }, /* 405 Mb */
[66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */
+ 317200, 23, 23 }, /* 405 Mb */
[67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */
+ 346400, 23, 23 }, /* 450 Mb */
},
50, /* probe interval */
WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -173,149 +173,149 @@ static const struct ath_rate_table ar5416_11ng_ratetable = {
12, /* MCS start */
{
[0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
- 900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */
+ 900, 0, 2 }, /* 1 Mb */
[1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
- 1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */
+ 1900, 1, 4 }, /* 2 Mb */
[2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
- 4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */
+ 4900, 2, 11 }, /* 5.5 Mb */
[3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
- 8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */
+ 8100, 3, 22 }, /* 11 Mb */
[4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
- 5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */
+ 5400, 4, 12 }, /* 6 Mb */
[5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
- 7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */
+ 7800, 5, 18 }, /* 9 Mb */
[6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */
+ 10100, 6, 24 }, /* 12 Mb */
[7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */
+ 14100, 7, 36 }, /* 18 Mb */
[8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */
+ 17700, 8, 48 }, /* 24 Mb */
[9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */
+ 23700, 9, 72 }, /* 36 Mb */
[10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */
+ 27400, 10, 96 }, /* 48 Mb */
[11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */
+ 30900, 11, 108 }, /* 54 Mb */
[12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */
+ 6400, 0, 0 }, /* 6.5 Mb */
[13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */
+ 12700, 1, 1 }, /* 13 Mb */
[14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/
+ 18800, 2, 2 }, /* 19.5 Mb*/
[15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */
+ 25000, 3, 3 }, /* 26 Mb */
[16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */
+ 36700, 4, 4 }, /* 39 Mb */
[17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */
+ 48100, 5, 5 }, /* 52 Mb */
[18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */
+ 53500, 6, 6 }, /* 58.5 Mb */
[19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */
+ 59000, 7, 7 }, /* 65 Mb */
[20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/
+ 65400, 7, 7 }, /* 65 Mb*/
[21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */
+ 12700, 8, 8 }, /* 13 Mb */
[22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */
+ 24800, 9, 9 }, /* 26 Mb */
[23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */
+ 36600, 10, 10 }, /* 39 Mb */
[24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */
+ 48100, 11, 11 }, /* 52 Mb */
[25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */
+ 69500, 12, 12 }, /* 78 Mb */
[26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */
+ 89500, 13, 13 }, /* 104 Mb */
[27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */
+ 98900, 14, 14 }, /* 117 Mb */
[28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */
+ 108300, 15, 15 }, /* 130 Mb */
[29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */
+ 120000, 15, 15 }, /* 144.4 Mb */
[30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */
+ 17400, 16, 16 }, /* 19.5 Mb */
[31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */
+ 35100, 17, 17 }, /* 39 Mb */
[32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */
+ 52600, 18, 18 }, /* 58.5 Mb */
[33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */
+ 70400, 19, 19 }, /* 78 Mb */
[34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */
+ 104900, 20, 20 }, /* 117 Mb */
[35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */
+ 115800, 20, 20 }, /* 130 Mb */
[36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */
+ 137200, 21, 21 }, /* 156 Mb */
[37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */
+ 151100, 21, 21 }, /* 173.3 Mb */
[38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */
+ 152800, 22, 22 }, /* 175.5 Mb */
[39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */
+ 168400, 22, 22 }, /* 195 Mb */
[40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */
+ 168400, 23, 23 }, /* 195 Mb */
[41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */
+ 185000, 23, 23 }, /* 216.7 Mb */
[42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */
+ 13200, 0, 0 }, /* 13.5 Mb */
[43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */
+ 25900, 1, 1 }, /* 27.0 Mb */
[44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */
+ 38600, 2, 2 }, /* 40.5 Mb */
[45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */
+ 49800, 3, 3 }, /* 54 Mb */
[46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */
+ 72200, 4, 4 }, /* 81 Mb */
[47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */
+ 92900, 5, 5 }, /* 108 Mb */
[48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */
+ 102700, 6, 6 }, /* 121.5 Mb */
[49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */
+ 112000, 7, 7 }, /* 135 Mb */
[50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */
+ 122000, 7, 7 }, /* 150 Mb */
[51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */
+ 25800, 8, 8 }, /* 27 Mb */
[52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */
+ 49800, 9, 9 }, /* 54 Mb */
[53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */
+ 71900, 10, 10 }, /* 81 Mb */
[54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */
+ 92500, 11, 11 }, /* 108 Mb */
[55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */
+ 130300, 12, 12 }, /* 162 Mb */
[56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */
+ 162800, 13, 13 }, /* 216 Mb */
[57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */
+ 178200, 14, 14 }, /* 243 Mb */
[58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */
+ 192100, 15, 15 }, /* 270 Mb */
[59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */
+ 207000, 15, 15 }, /* 300 Mb */
[60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */
+ 36100, 16, 16 }, /* 40.5 Mb */
[61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */
+ 72900, 17, 17 }, /* 81 Mb */
[62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */
+ 108300, 18, 18 }, /* 121.5 Mb */
[63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */
+ 142000, 19, 19 }, /* 162 Mb */
[64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */
+ 205100, 20, 20 }, /* 243 Mb */
[65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20, 8, 64, 65, 65 }, /* 270 Mb */
+ 224700, 20, 20 }, /* 270 Mb */
[66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */
+ 263100, 21, 21 }, /* 324 Mb */
[67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */
+ 288000, 21, 21 }, /* 360 Mb */
[68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */
+ 290700, 22, 22 }, /* 364.5 Mb */
[69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */
+ 317200, 22, 22 }, /* 405 Mb */
[70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */
+ 317200, 23, 23 }, /* 405 Mb */
[71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */
+ 346400, 23, 23 }, /* 450 Mb */
},
50, /* probe interval */
WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
@@ -326,21 +326,21 @@ static const struct ath_rate_table ar5416_11a_ratetable = {
0,
{
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 0, 12, 0},
+ 5400, 0, 12},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 1, 18, 0},
+ 7800, 1, 18},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 2, 24, 2},
+ 10000, 2, 24},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 3, 36, 2},
+ 13900, 3, 36},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 4, 48, 4},
+ 17300, 4, 48},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 5, 72, 4},
+ 23000, 5, 72},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 6, 96, 4},
+ 27400, 6, 96},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 7, 108, 4},
+ 29300, 7, 108},
},
50, /* probe interval */
0, /* Phy rates allowed initially */
@@ -351,63 +351,62 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
0,
{
{ RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
- 900, 0, 2, 0},
+ 900, 0, 2},
{ RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
- 1900, 1, 4, 1},
+ 1900, 1, 4},
{ RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
- 4900, 2, 11, 2},
+ 4900, 2, 11},
{ RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
- 8100, 3, 22, 3},
+ 8100, 3, 22},
{ RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 4, 12, 4},
+ 5400, 4, 12},
{ RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 5, 18, 4},
+ 7800, 5, 18},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 6, 24, 6},
+ 10000, 6, 24},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 7, 36, 6},
+ 13900, 7, 36},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 8, 48, 8},
+ 17300, 8, 48},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 9, 72, 8},
+ 23000, 9, 72},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 10, 96, 8},
+ 27400, 10, 96},
{ RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 11, 108, 8},
+ 29300, 11, 108},
},
50, /* probe interval */
0, /* Phy rates allowed initially */
};
-static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
+static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
struct ieee80211_tx_rate *rate)
{
- int rix = 0, i = 0;
- static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
+ const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+ int rix, i, idx = 0;
if (!(rate->flags & IEEE80211_TX_RC_MCS))
return rate->idx;
- while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) {
- rix++; i++;
+ for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
+ idx = ath_rc_priv->valid_rate_index[i];
+
+ if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
+ rate_table->info[idx].ratecode == rate->idx)
+ break;
}
- rix += rate->idx + rate_table->mcs_start;
+ rix = idx;
- if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- (rate->flags & IEEE80211_TX_RC_SHORT_GI))
- rix = rate_table->info[rix].ht_index;
- else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rix = rate_table->info[rix].sgi_index;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- rix = rate_table->info[rix].cw40index;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rix++;
return rix;
}
-static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv)
+static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
{
+ const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
u8 i, j, idx, idx_next;
for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
@@ -424,21 +423,6 @@ static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
}
}
-static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv)
-{
- u8 i;
-
- for (i = 0; i < ath_rc_priv->rate_table_size; i++)
- ath_rc_priv->valid_rate_index[i] = 0;
-}
-
-static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv,
- u8 index, int valid_tx_rate)
-{
- BUG_ON(index > ath_rc_priv->rate_table_size);
- ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate;
-}
-
static inline
int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
struct ath_rate_priv *ath_rc_priv,
@@ -479,8 +463,7 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
}
static inline int
-ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
+ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
u8 cur_valid_txrate, u8 *next_idx)
{
int8_t i;
@@ -495,10 +478,9 @@ ath_rc_get_lower_rix(const struct ath_rate_table *rate_table,
return 0;
}
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
- const struct ath_rate_table *rate_table,
- u32 capflag)
+static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
{
+ const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
u8 i, hi = 0;
for (i = 0; i < rate_table->rate_cnt; i++) {
@@ -506,14 +488,14 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
u32 phy = rate_table->info[i].phy;
u8 valid_rate_count = 0;
- if (!ath_rc_valid_phyrate(phy, capflag, 0))
+ if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
continue;
valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1);
+ ath_rc_priv->valid_rate_index[i] = true;
hi = i;
}
}
@@ -521,76 +503,73 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
return hi;
}
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
- const struct ath_rate_table *rate_table,
- struct ath_rateset *rateset,
- u32 capflag)
+static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
+ u32 phy, u32 capflag)
{
- u8 i, j, hi = 0;
+ if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
+ return false;
- /* Use intersection of working rates and valid rates */
- for (i = 0; i < rateset->rs_nrates; i++) {
- for (j = 0; j < rate_table->rate_cnt; j++) {
- u32 phy = rate_table->info[j].phy;
- u16 rate_flags = rate_table->info[j].rate_flags;
- u8 rate = rateset->rs_rates[i];
- u8 dot11rate = rate_table->info[j].dot11rate;
-
- /* We allow a rate only if its valid and the
- * capflag matches one of the validity
- * (VALID/VALID_20/VALID_40) flags */
-
- if ((rate == dot11rate) &&
- (rate_flags & WLAN_RC_CAP_MODE(capflag)) ==
- WLAN_RC_CAP_MODE(capflag) &&
- (rate_flags & WLAN_RC_CAP_STREAM(capflag)) &&
- !WLAN_RC_PHY_HT(phy)) {
- u8 valid_rate_count = 0;
-
- if (!ath_rc_valid_phyrate(phy, capflag, 0))
- continue;
-
- valid_rate_count =
- ath_rc_priv->valid_phy_ratecnt[phy];
-
- ath_rc_priv->valid_phy_rateidx[phy]
- [valid_rate_count] = j;
- ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
- hi = max(hi, j);
- }
- }
- }
+ if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
+ return false;
- return hi;
+ if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+ return false;
+
+ return true;
}
-static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
- const struct ath_rate_table *rate_table,
- struct ath_rateset *rateset, u32 capflag)
+static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
+ u32 phy, u32 capflag)
{
- u8 i, j, hi = 0;
+ if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
+ return false;
+
+ if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+ return false;
+
+ if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
+ return false;
+
+ return true;
+}
+
+static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
+{
+ const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
+ struct ath_rateset *rateset;
+ u32 phy, capflag = ath_rc_priv->ht_cap;
+ u16 rate_flags;
+ u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
+
+ if (legacy)
+ rateset = &ath_rc_priv->neg_rates;
+ else
+ rateset = &ath_rc_priv->neg_ht_rates;
- /* Use intersection of working rates and valid rates */
for (i = 0; i < rateset->rs_nrates; i++) {
for (j = 0; j < rate_table->rate_cnt; j++) {
- u32 phy = rate_table->info[j].phy;
- u16 rate_flags = rate_table->info[j].rate_flags;
- u8 rate = rateset->rs_rates[i];
- u8 dot11rate = rate_table->info[j].dot11rate;
-
- if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) ||
- !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) ||
- !WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
+ phy = rate_table->info[j].phy;
+ rate_flags = rate_table->info[j].rate_flags;
+ rate = rateset->rs_rates[i];
+ dot11rate = rate_table->info[j].dot11rate;
+
+ if (legacy &&
+ !ath_rc_check_legacy(rate, dot11rate,
+ rate_flags, phy, capflag))
+ continue;
+
+ if (!legacy &&
+ !ath_rc_check_ht(rate, dot11rate,
+ rate_flags, phy, capflag))
continue;
if (!ath_rc_valid_phyrate(phy, capflag, 0))
continue;
- ath_rc_priv->valid_phy_rateidx[phy]
- [ath_rc_priv->valid_phy_ratecnt[phy]] = j;
+ valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
+ ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
+ ath_rc_priv->valid_rate_index[j] = true;
hi = max(hi, j);
}
}
@@ -598,13 +577,10 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
return hi;
}
-/* Finds the highest rate index we can use */
-static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- const struct ath_rate_table *rate_table,
- int *is_probing,
- bool legacy)
+static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
+ int *is_probing)
{
+ const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
u32 best_thruput, this_thruput, now_msec;
u8 rate, next_rate, best_rate, maxindex, minindex;
int8_t index = 0;
@@ -624,8 +600,6 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
u8 per_thres;
rate = ath_rc_priv->valid_rate_index[index];
- if (legacy && !(rate_table->info[rate].rate_flags & RC_LEGACY))
- continue;
if (rate > ath_rc_priv->rate_max_phy)
continue;
@@ -707,8 +681,6 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
rate->count = tries;
rate->idx = rate_table->info[rix].ratecode;
- if (txrc->short_preamble)
- rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
if (txrc->rts || rtsctsenable)
rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
@@ -726,37 +698,25 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
const struct ath_rate_table *rate_table,
struct ieee80211_tx_info *tx_info)
{
- struct ieee80211_tx_rate *rates = tx_info->control.rates;
- int i = 0, rix = 0, cix, enable_g_protection = 0;
+ struct ieee80211_bss_conf *bss_conf;
- /* get the cix for the lowest valid rix */
- for (i = 3; i >= 0; i--) {
- if (rates[i].count && (rates[i].idx >= 0)) {
- rix = ath_rc_get_rateindex(rate_table, &rates[i]);
- break;
- }
- }
- cix = rate_table->info[rix].ctrl_rate;
+ if (!tx_info->control.vif)
+ return;
+ /*
+ * For legacy frames, mac80211 takes care of CTS protection.
+ */
+ if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
+ return;
- /* All protection frames are transmited at 2Mb/s for 802.11g,
- * otherwise we transmit them at 1Mb/s */
- if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ &&
- !conf_is_ht(&sc->hw->conf))
- enable_g_protection = 1;
+ bss_conf = &tx_info->control.vif->bss_conf;
+
+ if (!bss_conf->basic_rates)
+ return;
/*
- * If 802.11g protection is enabled, determine whether to use RTS/CTS or
- * just CTS. Note that this is only done for OFDM/HT unicast frames.
+ * For now, use the lowest allowed basic rate for HT frames.
*/
- if ((tx_info->control.vif &&
- tx_info->control.vif->bss_conf.use_cts_prot) &&
- (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM ||
- WLAN_RC_PHY_HT(rate_table->info[rix].phy))) {
- rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT;
- cix = rate_table->info[enable_g_protection].ctrl_rate;
- }
-
- tx_info->control.rts_cts_rate_idx = cix;
+ tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
}
static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
@@ -789,14 +749,8 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
try_per_rate = 4;
rate_table = ath_rc_priv->rate_table;
- rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
- &is_probe, false);
+ rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
- /*
- * If we're in HT mode and both us and our peer supports LDPC.
- * We don't need to check our own device's capabilities as our own
- * ht capabilities would have already been intersected with our peer's.
- */
if (conf_is_ht(&sc->hw->conf) &&
(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
tx_info->flags |= IEEE80211_TX_CTL_LDPC;
@@ -806,52 +760,45 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
if (is_probe) {
- /* set one try for probe rates. For the
- * probes don't enable rts */
+ /*
+ * Set one try for probe rates. For the
+ * probes don't enable RTS.
+ */
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
1, rix, 0);
-
- /* Get the next tried/allowed rate. No RTS for the next series
- * after the probe rate
+ /*
+ * Get the next tried/allowed rate.
+ * No RTS for the next series after the probe rate.
*/
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
+ ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
try_per_rate, rix, 0);
tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
- /* Set the chosen rate. No RTS for first series entry. */
+ /*
+ * Set the chosen rate. No RTS for first series entry.
+ */
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
try_per_rate, rix, 0);
}
- /* Fill in the other rates for multirate retry */
- for ( ; i < 3; i++) {
+ for ( ; i < 4; i++) {
+ /*
+ * Use twice the number of tries for the last MRR segment.
+ */
+ if (i + 1 == 4)
+ try_per_rate = 8;
+
+ ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
- /* All other rates in the series have RTS enabled */
+ /*
+ * All other rates in the series have RTS enabled.
+ */
ath_rc_rate_set_series(rate_table, &rates[i], txrc,
try_per_rate, rix, 1);
}
- /* Use twice the number of tries for the last MRR segment. */
- try_per_rate = 8;
-
- /*
- * If the last rate in the rate series is MCS and has
- * more than 80% of per thresh, then use a legacy rate
- * as last retry to ensure that the frame is tried in both
- * MCS and legacy rate.
- */
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
- if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
- (ath_rc_priv->per[rix] > 45))
- rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
- &is_probe, true);
-
- /* All other rates in the series have RTS enabled */
- ath_rc_rate_set_series(rate_table, &rates[i], txrc,
- try_per_rate, rix, 1);
/*
* NB:Change rate series to enable aggregation when operating
* at lower MCS rates. When first rate in series is MCS2
@@ -893,7 +840,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
rates[0].count = ATH_TXMAXTRY;
}
- /* Setup RTS/CTS */
ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
}
@@ -1046,9 +992,6 @@ static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
stats->per = per;
}
-/* Update PER, RSSI and whatever else that the code thinks it is doing.
- If you can make sense of all this, you really need to go out more. */
-
static void ath_rc_update_ht(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
struct ieee80211_tx_info *tx_info,
@@ -1077,8 +1020,8 @@ static void ath_rc_update_ht(struct ath_softc *sc,
if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
rate_table->info[tx_rate].ratekbps <=
rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
- ath_rc_get_lower_rix(rate_table, ath_rc_priv,
- (u8)tx_rate, &ath_rc_priv->rate_max_phy);
+ ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
+ &ath_rc_priv->rate_max_phy);
/* Don't probe for a little while. */
ath_rc_priv->probe_time = now_msec;
@@ -1122,25 +1065,42 @@ static void ath_rc_update_ht(struct ath_softc *sc,
}
+static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
+{
+ struct ath_rc_stats *stats;
+
+ stats = &rc->rcstats[final_rate];
+ stats->success++;
+}
static void ath_rc_tx_status(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_info *tx_info,
- int final_ts_idx, int xretries, int long_retry)
+ struct sk_buff *skb)
{
- const struct ath_rate_table *rate_table;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rates = tx_info->status.rates;
+ struct ieee80211_tx_rate *rate;
+ int final_ts_idx = 0, xretries = 0, long_retry = 0;
u8 flags;
u32 i = 0, rix;
- rate_table = ath_rc_priv->rate_table;
+ for (i = 0; i < sc->hw->max_rates; i++) {
+ rate = &tx_info->status.rates[i];
+ if (rate->idx < 0 || !rate->count)
+ break;
+
+ final_ts_idx = i;
+ long_retry = rate->count - 1;
+ }
+
+ if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
+ xretries = 1;
/*
* If the first rate is not the final index, there
* are intermediate rate failures to be processed.
*/
if (final_ts_idx != 0) {
- /* Process intermediate rates that failed.*/
for (i = 0; i < final_ts_idx ; i++) {
if (rates[i].count != 0 && (rates[i].idx >= 0)) {
flags = rates[i].flags;
@@ -1152,32 +1112,24 @@ static void ath_rc_tx_status(struct ath_softc *sc,
!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
return;
- rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+ rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
ath_rc_update_ht(sc, ath_rc_priv, tx_info,
- rix, xretries ? 1 : 2,
- rates[i].count);
+ rix, xretries ? 1 : 2,
+ rates[i].count);
}
}
- } else {
- /*
- * Handle the special case of MIMO PS burst, where the second
- * aggregate is sent out with only one rate and one try.
- * Treating it as an excessive retry penalizes the rate
- * inordinately.
- */
- if (rates[0].count == 1 && xretries == 1)
- xretries = 2;
}
- flags = rates[i].flags;
+ flags = rates[final_ts_idx].flags;
/* If HT40 and we have switched mode from 40 to 20 => don't update */
if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
return;
- rix = ath_rc_get_rateindex(rate_table, &rates[i]);
+ rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
+ ath_debug_stat_rc(ath_rc_priv, rix);
}
static const
@@ -1185,8 +1137,6 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
enum ieee80211_band band,
bool is_ht)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-
switch(band) {
case IEEE80211_BAND_2GHZ:
if (is_ht)
@@ -1197,34 +1147,25 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
return &ar5416_11na_ratetable;
return &ar5416_11a_ratetable;
default:
- ath_dbg(common, CONFIG, "Invalid band\n");
return NULL;
}
}
static void ath_rc_init(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta,
- const struct ath_rate_table *rate_table)
+ struct ath_rate_priv *ath_rc_priv)
{
+ const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_rateset *ht_mcs = &ath_rc_priv->neg_ht_rates;
u8 i, j, k, hi = 0, hthi = 0;
- /* Initial rate table size. Will change depending
- * on the working rate set */
ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
- /* Initialize thresholds according to the global rate table */
for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
ath_rc_priv->per[i] = 0;
+ ath_rc_priv->valid_rate_index[i] = 0;
}
- /* Determine the valid rates */
- ath_rc_init_valid_rate_idx(ath_rc_priv);
-
for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
for (j = 0; j < RATE_TABLE_SIZE; j++)
ath_rc_priv->valid_phy_rateidx[i][j] = 0;
@@ -1232,25 +1173,19 @@ static void ath_rc_init(struct ath_softc *sc,
}
if (!rateset->rs_nrates) {
- /* No working rate, just initialize valid rates */
- hi = ath_rc_init_validrates(ath_rc_priv, rate_table,
- ath_rc_priv->ht_cap);
+ hi = ath_rc_init_validrates(ath_rc_priv);
} else {
- /* Use intersection of working rates and valid rates */
- hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table,
- rateset, ath_rc_priv->ht_cap);
- if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) {
- hthi = ath_rc_setvalid_htrates(ath_rc_priv,
- rate_table,
- ht_mcs,
- ath_rc_priv->ht_cap);
- }
+ hi = ath_rc_setvalid_rates(ath_rc_priv, true);
+
+ if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
+ hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
+
hi = max(hi, hthi);
}
ath_rc_priv->rate_table_size = hi + 1;
ath_rc_priv->rate_max_phy = 0;
- BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+ WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1258,28 +1193,26 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->valid_phy_rateidx[i][j];
}
- if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1)
- || !ath_rc_priv->valid_phy_ratecnt[i])
+ if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
+ !ath_rc_priv->valid_phy_ratecnt[i])
continue;
ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
}
- BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
- BUG_ON(k > RATE_TABLE_SIZE);
+ WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
+ WARN_ON(k > RATE_TABLE_SIZE);
ath_rc_priv->max_valid_rate = k;
- ath_rc_sort_validrates(rate_table, ath_rc_priv);
+ ath_rc_sort_validrates(ath_rc_priv);
ath_rc_priv->rate_max_phy = (k > 4) ?
- ath_rc_priv->valid_rate_index[k-4] :
- ath_rc_priv->valid_rate_index[k-1];
- ath_rc_priv->rate_table = rate_table;
+ ath_rc_priv->valid_rate_index[k-4] :
+ ath_rc_priv->valid_rate_index[k-1];
ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
ath_rc_priv->ht_cap);
}
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
- bool is_cw40, bool is_sgi)
+static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
{
u8 caps = 0;
@@ -1289,10 +1222,14 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
else if (sta->ht_cap.mcs.rx_mask[1])
caps |= WLAN_RC_DS_FLAG;
- if (is_cw40)
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
caps |= WLAN_RC_40_FLAG;
- if (is_sgi)
- caps |= WLAN_RC_SGI_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ caps |= WLAN_RC_SGI_FLAG;
+ } else {
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ caps |= WLAN_RC_SGI_FLAG;
+ }
}
return caps;
@@ -1319,15 +1256,6 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
/* mac80211 Rate Control callbacks */
/***********************************/
-static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
- struct ath_rc_stats *stats;
-
- stats = &rc->rcstats[final_rate];
- stats->success++;
-}
-
-
static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
@@ -1335,22 +1263,8 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ath_softc *sc = priv;
struct ath_rate_priv *ath_rc_priv = priv_sta;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr;
- int final_ts_idx = 0, tx_status = 0;
- int long_retry = 0;
- __le16 fc;
- int i;
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
- for (i = 0; i < sc->hw->max_rates; i++) {
- struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
- if (rate->idx < 0 || !rate->count)
- break;
-
- final_ts_idx = i;
- long_retry = rate->count - 1;
- }
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 fc = hdr->frame_control;
if (!priv_sta || !ieee80211_is_data(fc))
return;
@@ -1363,11 +1277,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
return;
- if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
- tx_status = 1;
-
- ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
- long_retry);
+ ath_rc_tx_status(sc, ath_rc_priv, skb);
/* Check if aggregation has to be enabled for this tid */
if (conf_is_ht(&sc->hw->conf) &&
@@ -1383,19 +1293,14 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
ieee80211_start_tx_ba_session(sta, tid, 0);
}
}
-
- ath_debug_stat_rc(ath_rc_priv,
- ath_rc_get_rateindex(ath_rc_priv->rate_table,
- &tx_info->status.rates[final_ts_idx]));
}
static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
struct ath_softc *sc = priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_rate_priv *ath_rc_priv = priv_sta;
- const struct ath_rate_table *rate_table;
- bool is_cw40, is_sgi = false;
int i, j = 0;
for (i = 0; i < sband->n_bitrates; i++) {
@@ -1417,20 +1322,15 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
ath_rc_priv->neg_ht_rates.rs_nrates = j;
}
- is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
- if (is_cw40)
- is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
- else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
- is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
-
- /* Choose rate table first */
-
- rate_table = ath_choose_rate_table(sc, sband->band,
- sta->ht_cap.ht_supported);
+ ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
+ sta->ht_cap.ht_supported);
+ if (!ath_rc_priv->rate_table) {
+ ath_err(common, "No rate table chosen\n");
+ return;
+ }
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi);
- ath_rc_init(sc, priv_sta, sband, sta, rate_table);
+ ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+ ath_rc_init(sc, priv_sta);
}
static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
@@ -1439,40 +1339,14 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
{
struct ath_softc *sc = priv;
struct ath_rate_priv *ath_rc_priv = priv_sta;
- const struct ath_rate_table *rate_table = NULL;
- bool oper_cw40 = false, oper_sgi;
- bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
- bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
-
- /* FIXME: Handle AP mode later when we support CWM */
if (changed & IEEE80211_RC_BW_CHANGED) {
- if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
- return;
-
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
- oper_cw40 = true;
-
- if (oper_cw40)
- oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
- true : false;
- else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
- oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
- true : false;
- else
- oper_sgi = false;
-
- if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
- rate_table = ath_choose_rate_table(sc, sband->band,
- sta->ht_cap.ht_supported);
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
- oper_cw40, oper_sgi);
- ath_rc_init(sc, priv_sta, sband, sta, rate_table);
-
- ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
- "Operating HT Bandwidth changed to: %d\n",
- sc->hw->conf.channel_type);
- }
+ ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
+ ath_rc_init(sc, priv_sta);
+
+ ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
+ "Operating HT Bandwidth changed to: %d\n",
+ sc->hw->conf.channel_type);
}
}
@@ -1484,7 +1358,7 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
struct ath_rate_priv *rc = file->private_data;
char *buf;
unsigned int len = 0, max;
- int i = 0;
+ int rix;
ssize_t retval;
if (rc->rate_table == NULL)
@@ -1500,7 +1374,8 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
"HT", "MCS", "Rate",
"Success", "Retries", "XRetries", "PER");
- for (i = 0; i < rc->rate_table_size; i++) {
+ for (rix = 0; rix < rc->max_valid_rate; rix++) {
+ u8 i = rc->valid_rate_index[rix];
u32 ratekbps = rc->rate_table->info[i].ratekbps;
struct ath_rc_stats *stats = &rc->rcstats[i];
char mcs[5];
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 75f8e9b06b28..268e67dc5fb2 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -160,10 +160,6 @@ struct ath_rate_table {
u32 user_ratekbps;
u8 ratecode;
u8 dot11rate;
- u8 ctrl_rate;
- u8 cw40index;
- u8 sgi_index;
- u8 ht_index;
} info[RATE_TABLE_SIZE];
u32 probe_interval;
u8 initial_ratemax;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4480c0cc655f..83d16e7ed272 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -424,8 +424,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
- /* The following may also be needed for other older chips */
- if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
+ /* This is needed for older chips */
+ if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
rfilt |= ATH9K_RX_FILTER_PROM;
rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 87cac8eb7834..4e6760f8596d 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -801,6 +801,8 @@
#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
#define AR_SREV_VERSION_9462 0x280
#define AR_SREV_REVISION_9462_20 2
+#define AR_SREV_VERSION_9565 0x2C0
+#define AR_SREV_REVISION_9565_10 0
#define AR_SREV_VERSION_9550 0x400
#define AR_SREV_5416(_ah) \
@@ -909,6 +911,13 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
+#define AR_SREV_9565(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
+
+#define AR_SREV_9565_10(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9565_10))
+
#define AR_SREV_9550(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 44a08eb53c62..a483d518758c 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -497,7 +497,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
- if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
/*
* this is needed to prevent the chip waking up
* the host within 3-4 seconds with certain
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 0d4155aec48d..36618e3a5e60 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -568,7 +568,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (!an->sleeping) {
ath_tx_queue_tid(txq, tid);
- if (ts->ts_status & ATH9K_TXERR_FILT)
+ if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->ac->clear_ps_filter = true;
}
}
@@ -1773,11 +1773,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
TX_STAT_INC(txq->axq_qnum, queued);
}
-static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
+static void setup_frame_info(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
int framelen)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = tx_info->control.sta;
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
const struct ieee80211_rate *rate;
@@ -1819,10 +1820,14 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
{
struct ath_hw *ah = sc->sc_ah;
struct ath9k_channel *curchan = ah->curchan;
+
if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
(curchan->channelFlags & CHANNEL_5GHZ) &&
(chainmask == 0x7) && (rate < 0x90))
return 0x3;
+ else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
+ IS_CCK_RATE(rate))
+ return 0x2;
else
return chainmask;
}
@@ -1935,7 +1940,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
+ struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
@@ -1979,7 +1984,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
!ieee80211_is_data(hdr->frame_control))
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- setup_frame_info(hw, skb, frmlen);
+ setup_frame_info(hw, sta, skb, frmlen);
/*
* At this point, the vif, hw_key and sta pointers in the tx control
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 376be11161c0..2aa4a59c72c8 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -425,6 +425,7 @@ struct ar9170 {
bool rx_has_plcp;
struct sk_buff *rx_failover;
int rx_failover_missing;
+ u32 ampdu_ref;
/* FIFO for collecting outstanding BlockAckRequest */
struct list_head bar_list[__AR9170_NUM_TXQ];
@@ -577,7 +578,9 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
/* TX */
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void carl9170_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
void carl9170_tx_janitor(struct work_struct *work);
void carl9170_tx_process_status(struct ar9170 *ar,
const struct carl9170_rsp *cmd);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index c5ca6f1f5836..24ac2876a733 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -341,6 +341,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_WLANTX_CAB)) {
if_comb_types |=
BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_P2P_GO);
}
}
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 53415bfd8bef..e3b1b6e87760 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -304,7 +304,8 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
struct ath_common *common = &ar->common;
u8 *mac_addr, *bssid;
u32 cam_mode = AR9170_MAC_CAM_DEFAULTS;
- u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS;
+ u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS |
+ AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE;
u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG |
AR9170_MAC_RX_CTRL_SHORT_FILTER;
u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS;
@@ -318,10 +319,10 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
bssid = common->curbssid;
switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
cam_mode |= AR9170_MAC_CAM_IBSS;
break;
+ case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
cam_mode |= AR9170_MAC_CAM_AP;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 858e58dfc4dc..67997b39aba7 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -616,10 +616,12 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
goto unlock;
+ case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_WDS) ||
- (vif->type == NL80211_IFTYPE_AP))
+ (vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT))
break;
err = -EBUSY;
@@ -1147,6 +1149,7 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case WLAN_CIPHER_SUITE_CCMP:
ktype = AR9170_ENC_ALG_AESCCMP;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
default:
return -EOPNOTSUPP;
@@ -1778,6 +1781,7 @@ void *carl9170_alloc(size_t priv_size)
hw->wiphy->interface_modes = 0;
hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 6f6a34155667..a0b723078547 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -206,6 +206,7 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
carl9170_update_beacon(ar, true);
break;
@@ -623,7 +624,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
#undef TID_CHECK
}
-static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
+static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
+ struct ieee80211_rx_status *rx_status)
{
__le16 fc;
@@ -636,6 +638,9 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
return true;
}
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+ rx_status->ampdu_reference = ar->ampdu_ref;
+
/*
* "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
* certain frame types can be part of an aMPDU.
@@ -684,12 +689,15 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
if (unlikely(len < sizeof(*mac)))
goto drop;
+ memset(&status, 0, sizeof(status));
+
mpdu_len = len - sizeof(*mac);
mac = (void *)(buf + mpdu_len);
mac_status = mac->status;
switch (mac_status & AR9170_RX_STATUS_MPDU) {
case AR9170_RX_STATUS_MPDU_FIRST:
+ ar->ampdu_ref++;
/* Aggregated MPDUs start with an PLCP header */
if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
head = (void *) buf;
@@ -720,12 +728,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
break;
case AR9170_RX_STATUS_MPDU_LAST:
+ status.flag |= RX_FLAG_AMPDU_IS_LAST;
+
/*
* The last frame of an A-MPDU has an extra tail
* which does contain the phy status of the whole
* aggregate.
*/
-
if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
mpdu_len -= sizeof(struct ar9170_rx_phystatus);
phy = (void *)(buf + mpdu_len);
@@ -773,11 +782,10 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
goto drop;
- memset(&status, 0, sizeof(status));
if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
goto drop;
- if (!carl9170_ampdu_check(ar, buf, mac_status))
+ if (!carl9170_ampdu_check(ar, buf, mac_status, &status))
goto drop;
if (phy)
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6a8681407a1d..84377cf580e0 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -867,14 +867,15 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
return false;
}
-static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
+static int carl9170_tx_prepare(struct ar9170 *ar,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
struct _carl9170_tx_superframe *txc;
struct carl9170_vif_info *cvif;
struct ieee80211_tx_info *info;
struct ieee80211_tx_rate *txrate;
- struct ieee80211_sta *sta;
struct carl9170_tx_info *arinfo;
unsigned int hw_queue;
int i;
@@ -910,8 +911,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
else
cvif = NULL;
- sta = info->control.sta;
-
txc = (void *)skb_push(skb, sizeof(*txc));
memset(txc, 0, sizeof(*txc));
@@ -1457,20 +1456,21 @@ err_unlock_rcu:
return false;
}
-void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void carl9170_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
- struct ieee80211_sta *sta;
+ struct ieee80211_sta *sta = control->sta;
bool run;
if (unlikely(!IS_STARTED(ar)))
goto err_free;
info = IEEE80211_SKB_CB(skb);
- sta = info->control.sta;
- if (unlikely(carl9170_tx_prepare(ar, skb)))
+ if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
goto err_free;
carl9170_tx_accounting(ar, skb);
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 3876c7ea54f4..7a28d21ac389 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -34,8 +34,8 @@ config B43_BCMA
config B43_BCMA_EXTRA
bool "Hardware support that overlaps with the brcmsmac driver"
depends on B43_BCMA
- default n if BRCMSMAC || BRCMSMAC_MODULE
- default y
+ default n if BRCMSMAC
+ default y
config B43_SSB
bool
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 4648bbf76abc..098fe9ee7096 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -4,6 +4,7 @@ b43-y += tables.o
b43-$(CONFIG_B43_PHY_N) += tables_nphy.o
b43-$(CONFIG_B43_PHY_N) += radio_2055.o
b43-$(CONFIG_B43_PHY_N) += radio_2056.o
+b43-$(CONFIG_B43_PHY_N) += radio_2057.o
b43-y += phy_common.o
b43-y += phy_g.o
b43-y += phy_a.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 7c899fc7ddd0..b298e5d68be2 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -241,16 +241,18 @@ enum {
#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */
#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */
#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */
-#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */
-#define B43_SHM_SH_HOSTFMI 0x0060 /* Hostflags for ucode options (middle) */
-#define B43_SHM_SH_HOSTFHI 0x0062 /* Hostflags for ucode options (high) */
+#define B43_SHM_SH_HOSTF1 0x005E /* Hostflags 1 for ucode options */
+#define B43_SHM_SH_HOSTF2 0x0060 /* Hostflags 2 for ucode options */
+#define B43_SHM_SH_HOSTF3 0x0062 /* Hostflags 3 for ucode options */
#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */
#define B43_SHM_SH_RADAR 0x0066 /* Radar register */
#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */
#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */
+#define B43_SHM_SH_HOSTF4 0x0078 /* Hostflags 4 for ucode options */
#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */
#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5 Ghz channel */
#define B43_SHM_SH_CHAN_40MHZ 0x0200 /* Bit set, if 40 Mhz channel width */
+#define B43_SHM_SH_HOSTF5 0x00D4 /* Hostflags 5 for ucode options */
#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */
/* TSSI information */
#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */
@@ -415,6 +417,8 @@ enum {
#define B43_PHYTYPE_HT 0x07
#define B43_PHYTYPE_LCN 0x08
#define B43_PHYTYPE_LCNXN 0x09
+#define B43_PHYTYPE_LCN40 0x0a
+#define B43_PHYTYPE_AC 0x0b
/* PHYRegisters */
#define B43_PHY_ILT_A_CTRL 0x0072
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a140165dfee0..73730e94e0ac 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -533,11 +533,11 @@ u64 b43_hf_read(struct b43_wldev *dev)
{
u64 ret;
- ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI);
+ ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3);
ret <<= 16;
- ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI);
+ ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2);
ret <<= 16;
- ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO);
+ ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1);
return ret;
}
@@ -550,9 +550,9 @@ void b43_hf_write(struct b43_wldev *dev, u64 value)
lo = (value & 0x00000000FFFFULL);
mi = (value & 0x0000FFFF0000ULL) >> 16;
hi = (value & 0xFFFF00000000ULL) >> 32;
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1, lo);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2, mi);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3, hi);
}
/* Read the firmware capabilities bitmask (Opensource firmware only) */
@@ -3412,7 +3412,8 @@ static void b43_tx_work(struct work_struct *work)
}
static void b43_op_tx(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -4282,6 +4283,35 @@ out:
return err;
}
+static char *b43_phy_name(struct b43_wldev *dev, u8 phy_type)
+{
+ switch (phy_type) {
+ case B43_PHYTYPE_A:
+ return "A";
+ case B43_PHYTYPE_B:
+ return "B";
+ case B43_PHYTYPE_G:
+ return "G";
+ case B43_PHYTYPE_N:
+ return "N";
+ case B43_PHYTYPE_LP:
+ return "LP";
+ case B43_PHYTYPE_SSLPN:
+ return "SSLPN";
+ case B43_PHYTYPE_HT:
+ return "HT";
+ case B43_PHYTYPE_LCN:
+ return "LCN";
+ case B43_PHYTYPE_LCNXN:
+ return "LCNXN";
+ case B43_PHYTYPE_LCN40:
+ return "LCN40";
+ case B43_PHYTYPE_AC:
+ return "AC";
+ }
+ return "UNKNOWN";
+}
+
/* Get PHY and RADIO versioning numbers */
static int b43_phy_versioning(struct b43_wldev *dev)
{
@@ -4342,13 +4372,13 @@ static int b43_phy_versioning(struct b43_wldev *dev)
unsupported = 1;
}
if (unsupported) {
- b43err(dev->wl, "FOUND UNSUPPORTED PHY "
- "(Analog %u, Type %u, Revision %u)\n",
- analog_type, phy_type, phy_rev);
+ b43err(dev->wl, "FOUND UNSUPPORTED PHY (Analog %u, Type %d (%s), Revision %u)\n",
+ analog_type, phy_type, b43_phy_name(dev, phy_type),
+ phy_rev);
return -EOPNOTSUPP;
}
- b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n",
- analog_type, phy_type, phy_rev);
+ b43info(dev->wl, "Found PHY: Analog %u, Type %d (%s), Revision %u\n",
+ analog_type, phy_type, b43_phy_name(dev, phy_type), phy_rev);
/* Get RADIO versioning */
if (dev->dev->core_rev >= 24) {
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 3f8883b14d9c..f01676ac481b 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -240,6 +240,21 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
(b43_radio_read16(dev, offset) & mask) | set);
}
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+ u16 value, int delay, int timeout)
+{
+ u16 val;
+ int i;
+
+ for (i = 0; i < timeout; i += delay) {
+ val = b43_radio_read(dev, offset);
+ if ((val & mask) == value)
+ return true;
+ udelay(delay);
+ }
+ return false;
+}
+
u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
{
assert_mac_suspended(dev);
@@ -428,7 +443,7 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset)
average = (a + b + c + d + 2) / 4;
if (is_ofdm) {
/* Adjust for CCK-boost */
- if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO)
+ if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1)
& B43_HF_CCKBOOST)
average = (average >= 13) ? (average - 13) : 0;
}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 9233b13fc16d..f1b999349876 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -365,6 +365,12 @@ void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
/**
+ * b43_radio_wait_value - Waits for a given value in masked register read
+ */
+bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask,
+ u16 value, int delay, int timeout);
+
+/**
* b43_radio_lock - Lock firmware radio register access
*/
void b43_radio_lock(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b92bb9c92ad1..3c35382ee6c2 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -32,6 +32,7 @@
#include "tables_nphy.h"
#include "radio_2055.h"
#include "radio_2056.h"
+#include "radio_2057.h"
#include "main.h"
struct nphy_txgains {
@@ -126,6 +127,46 @@ ok:
b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
+static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off,
+ u8 override)
+{
+ const struct nphy_rf_control_override_rev7 *e;
+ u16 en_addrs[3][2] = {
+ { 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 }
+ };
+ u16 en_addr;
+ u16 en_mask = field;
+ u16 val_addr;
+ u8 i;
+
+ /* Remember: we can get NULL! */
+ e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override);
+
+ for (i = 0; i < 2; i++) {
+ if (override >= ARRAY_SIZE(en_addrs)) {
+ b43err(dev->wl, "Invalid override value %d\n", override);
+ return;
+ }
+ en_addr = en_addrs[override][i];
+
+ val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+
+ if (off) {
+ b43_phy_mask(dev, en_addr, ~en_mask);
+ if (e) /* Do it safer, better than wl */
+ b43_phy_mask(dev, val_addr, ~e->val_mask);
+ } else {
+ if (!core || (core & (1 << i))) {
+ b43_phy_set(dev, en_addr, en_mask);
+ if (e)
+ b43_phy_maskset(dev, val_addr, ~e->val_mask, (value << e->val_shift));
+ }
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
u16 value, u8 core, bool off)
@@ -459,6 +500,137 @@ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
}
/**************************************************
+ * Radio 0x2057
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rcal */
+static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ u16 tmp;
+
+ if (phy->radio_rev == 5) {
+ b43_phy_mask(dev, 0x342, ~0x2);
+ udelay(10);
+ b43_radio_set(dev, R2057_IQTEST_SEL_PU, 0x1);
+ b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1);
+ }
+
+ b43_radio_set(dev, R2057_RCAL_CONFIG, 0x1);
+ udelay(10);
+ b43_radio_set(dev, R2057_RCAL_CONFIG, 0x3);
+ if (!b43_radio_wait_value(dev, R2057_RCCAL_N1_1, 1, 1, 100, 1000000)) {
+ b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+ return 0;
+ }
+ b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2);
+ tmp = b43_radio_read(dev, R2057_RCAL_STATUS) & 0x3E;
+ b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1);
+
+ if (phy->radio_rev == 5) {
+ b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1);
+ b43_radio_mask(dev, 0x1ca, ~0x2);
+ }
+ if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
+ b43_radio_maskset(dev, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp);
+ b43_radio_maskset(dev, R2057_BANDGAP_RCAL_TRIM, ~0xF0,
+ tmp << 2);
+ }
+
+ return tmp & 0x3e;
+}
+
+/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */
+static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ bool special = (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+ phy->radio_rev == 6);
+ u16 tmp;
+
+ if (special) {
+ b43_radio_write(dev, R2057_RCCAL_MASTER, 0x61);
+ b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0);
+ } else {
+ b43_radio_write(dev, 0x1AE, 0x61);
+ b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1);
+ }
+ b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+ b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+ if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+ 5000000))
+ b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+ b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+ if (special) {
+ b43_radio_write(dev, R2057_RCCAL_MASTER, 0x69);
+ b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+ } else {
+ b43_radio_write(dev, 0x1AE, 0x69);
+ b43_radio_write(dev, R2057_RCCAL_TRC0, 0xD5);
+ }
+ b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+ b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+ if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+ 5000000))
+ b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+ b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+ if (special) {
+ b43_radio_write(dev, R2057_RCCAL_MASTER, 0x73);
+ b43_radio_write(dev, R2057_RCCAL_X1, 0x28);
+ b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
+ } else {
+ b43_radio_write(dev, 0x1AE, 0x73);
+ b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+ b43_radio_write(dev, R2057_RCCAL_TRC0, 0x99);
+ }
+ b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
+ if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+ 5000000)) {
+ b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
+ return 0;
+ }
+ tmp = b43_radio_read(dev, R2057_RCCAL_DONE_OSCCAP);
+ b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+ return tmp;
+}
+
+static void b43_radio_2057_init_pre(struct b43_wldev *dev)
+{
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+ /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU);
+}
+
+static void b43_radio_2057_init_post(struct b43_wldev *dev)
+{
+ b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x1);
+
+ b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
+ b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
+ mdelay(2);
+ b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
+ b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
+
+ if (dev->phy.n->init_por) {
+ b43_radio_2057_rcal(dev);
+ b43_radio_2057_rccal(dev);
+ }
+ b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
+
+ dev->phy.n->init_por = false;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
+static void b43_radio_2057_init(struct b43_wldev *dev)
+{
+ b43_radio_2057_init_pre(dev);
+ r2057_upload_inittabs(dev);
+ b43_radio_2057_init_post(dev);
+}
+
+/**************************************************
* Radio 0x2056
**************************************************/
@@ -545,7 +717,9 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
enum ieee80211_band band = b43_current_band(dev->wl);
u16 offset;
u8 i;
- u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+ u16 bias, cbias;
+ u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
+ u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
B43_WARN_ON(dev->phy.rev < 3);
@@ -630,7 +804,56 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
}
} else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
- /* TODO */
+ u16 freq = dev->phy.channel_freq;
+ if (freq < 5100) {
+ paa_boost = 0xA;
+ pada_boost = 0x77;
+ pgaa_boost = 0xF;
+ mixa_boost = 0xF;
+ } else if (freq < 5340) {
+ paa_boost = 0x8;
+ pada_boost = 0x77;
+ pgaa_boost = 0xFB;
+ mixa_boost = 0xF;
+ } else if (freq < 5650) {
+ paa_boost = 0x0;
+ pada_boost = 0x77;
+ pgaa_boost = 0xB;
+ mixa_boost = 0xF;
+ } else {
+ paa_boost = 0x0;
+ pada_boost = 0x77;
+ if (freq != 5825)
+ pgaa_boost = -(freq - 18) / 36 + 168;
+ else
+ pgaa_boost = 6;
+ mixa_boost = 0xF;
+ }
+
+ for (i = 0; i < 2; i++) {
+ offset = i ? B2056_TX1 : B2056_TX0;
+
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAA_BOOST_TUNE, paa_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PADA_BOOST_TUNE, pada_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PGAA_BOOST_TUNE, pgaa_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_MIXA_BOOST_TUNE, mixa_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_TXSPARE1, 0x30);
+ b43_radio_write(dev,
+ offset | B2056_TX_PA_SPARE2, 0xee);
+ b43_radio_write(dev,
+ offset | B2056_TX_PADA_CASCBIAS, 0x03);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+ }
}
udelay(50);
@@ -643,6 +866,37 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
udelay(300);
}
+static u8 b43_radio_2056_rcal(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ u16 mast2, tmp;
+
+ if (phy->rev != 3)
+ return 0;
+
+ mast2 = b43_radio_read(dev, B2056_SYN_PLL_MAST2);
+ b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2 | 0x7);
+
+ udelay(10);
+ b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+ udelay(10);
+ b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x09);
+
+ if (!b43_radio_wait_value(dev, B2056_SYN_RCAL_CODE_OUT, 0x80, 0x80, 100,
+ 1000000)) {
+ b43err(dev->wl, "Radio recalibration timeout\n");
+ return 0;
+ }
+
+ b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01);
+ tmp = b43_radio_read(dev, B2056_SYN_RCAL_CODE_OUT);
+ b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x00);
+
+ b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2);
+
+ return tmp & 0x1f;
+}
+
static void b43_radio_init2056_pre(struct b43_wldev *dev)
{
b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
@@ -665,10 +919,8 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
- /*
- if (nphy->init_por)
- Call Radio 2056 Recalibrate
- */
+ if (dev->phy.n->init_por)
+ b43_radio_2056_rcal(dev);
}
/*
@@ -680,6 +932,8 @@ static void b43_radio_init2056(struct b43_wldev *dev)
b43_radio_init2056_pre(dev);
b2056_upload_inittabs(dev, 0, 0);
b43_radio_init2056_post(dev);
+
+ dev->phy.n->init_por = false;
}
/**************************************************
@@ -753,8 +1007,6 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
struct ssb_sprom *sprom = dev->dev->bus_sprom;
- int i;
- u16 val;
bool workaround = false;
if (sprom->revision < 4)
@@ -777,15 +1029,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
b43_radio_set(dev, B2055_CAL_MISC, 0x1);
msleep(1);
b43_radio_set(dev, B2055_CAL_MISC, 0x40);
- for (i = 0; i < 200; i++) {
- val = b43_radio_read(dev, B2055_CAL_COUT2);
- if (val & 0x80) {
- i = 0;
- break;
- }
- udelay(10);
- }
- if (i)
+ if (!b43_radio_wait_value(dev, B2055_CAL_COUT2, 0x80, 0x80, 10, 2000))
b43err(dev->wl, "radio post init timeout\n");
b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
b43_switch_channel(dev, dev->phy.channel);
@@ -1860,12 +2104,334 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
{
- if (dev->phy.rev >= 3)
+ if (dev->phy.rev >= 7)
+ ; /* TODO */
+ else if (dev->phy.rev >= 3)
b43_nphy_gain_ctl_workarounds_rev3plus(dev);
else
b43_nphy_gain_ctl_workarounds_rev1_2(dev);
}
+/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
+static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
+{
+ if (!offset)
+ offset = (dev->phy.is_40mhz) ? 0x159 : 0x154;
+ return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
+}
+
+static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
+{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+ struct b43_phy *phy = &dev->phy;
+
+ u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+ 0x1F };
+ u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+
+ u16 ntab7_15e_16e[] = { 0x10f, 0x10f };
+ u8 ntab7_138_146[] = { 0x11, 0x11 };
+ u8 ntab7_133[] = { 0x77, 0x11, 0x11 };
+
+ u16 lpf_20, lpf_40, lpf_11b;
+ u16 bcap_val, bcap_val_11b, bcap_val_11n_20, bcap_val_11n_40;
+ u16 scap_val, scap_val_11b, scap_val_11n_20, scap_val_11n_40;
+ bool rccal_ovrd = false;
+
+ u16 rx2tx_lut_20_11b, rx2tx_lut_20_11n, rx2tx_lut_40_11n;
+ u16 bias, conv, filt;
+
+ u32 tmp32;
+ u8 core;
+
+ if (phy->rev == 7) {
+ b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 0x10);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0xFF80, 0x0020);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0x80FF, 0x2700);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0xFF80, 0x002E);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0x80FF, 0x3300);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0xFF80, 0x0037);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0x80FF, 0x3A00);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0xFF80, 0x003C);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0x80FF, 0x3E00);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0xFF80, 0x003E);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0x80FF, 0x3F00);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0xFF80, 0x0040);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0x80FF, 0x4000);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0xFF80, 0x0040);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0x80FF, 0x4000);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0xFF80, 0x0040);
+ b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000);
+ }
+ if (phy->rev <= 8) {
+ b43_phy_write(dev, 0x23F, 0x1B0);
+ b43_phy_write(dev, 0x240, 0x1B0);
+ }
+ if (phy->rev >= 8)
+ b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72);
+
+ b43_ntab_write(dev, B43_NTAB16(8, 0x00), 2);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x10), 2);
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+ tmp32 &= 0xffffff;
+ b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15e), 2, ntab7_15e_16e);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16e), 2, ntab7_15e_16e);
+
+ if (b43_nphy_ipa(dev))
+ b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+ rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
+
+ b43_phy_maskset(dev, 0x299, 0x3FFF, 0x4000);
+ b43_phy_maskset(dev, 0x29D, 0x3FFF, 0x4000);
+
+ lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154);
+ lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
+ lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152);
+ if (b43_nphy_ipa(dev)) {
+ if ((phy->radio_rev == 5 && phy->is_40mhz) ||
+ phy->radio_rev == 7 || phy->radio_rev == 8) {
+ bcap_val = b43_radio_read(dev, 0x16b);
+ scap_val = b43_radio_read(dev, 0x16a);
+ scap_val_11b = scap_val;
+ bcap_val_11b = bcap_val;
+ if (phy->radio_rev == 5 && phy->is_40mhz) {
+ scap_val_11n_20 = scap_val;
+ bcap_val_11n_20 = bcap_val;
+ scap_val_11n_40 = bcap_val_11n_40 = 0xc;
+ rccal_ovrd = true;
+ } else { /* Rev 7/8 */
+ lpf_20 = 4;
+ lpf_11b = 1;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ scap_val_11n_20 = 0xc;
+ bcap_val_11n_20 = 0xc;
+ scap_val_11n_40 = 0xa;
+ bcap_val_11n_40 = 0xa;
+ } else {
+ scap_val_11n_20 = 0x14;
+ bcap_val_11n_20 = 0x14;
+ scap_val_11n_40 = 0xf;
+ bcap_val_11n_40 = 0xf;
+ }
+ rccal_ovrd = true;
+ }
+ }
+ } else {
+ if (phy->radio_rev == 5) {
+ lpf_20 = 1;
+ lpf_40 = 3;
+ bcap_val = b43_radio_read(dev, 0x16b);
+ scap_val = b43_radio_read(dev, 0x16a);
+ scap_val_11b = scap_val;
+ bcap_val_11b = bcap_val;
+ scap_val_11n_20 = 0x11;
+ scap_val_11n_40 = 0x11;
+ bcap_val_11n_20 = 0x13;
+ bcap_val_11n_40 = 0x13;
+ rccal_ovrd = true;
+ }
+ }
+ if (rccal_ovrd) {
+ rx2tx_lut_20_11b = (bcap_val_11b << 8) |
+ (scap_val_11b << 3) |
+ lpf_11b;
+ rx2tx_lut_20_11n = (bcap_val_11n_20 << 8) |
+ (scap_val_11n_20 << 3) |
+ lpf_20;
+ rx2tx_lut_40_11n = (bcap_val_11n_40 << 8) |
+ (scap_val_11n_40 << 3) |
+ lpf_40;
+ for (core = 0; core < 2; core++) {
+ b43_ntab_write(dev, B43_NTAB16(7, 0x152 + core * 16),
+ rx2tx_lut_20_11b);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x153 + core * 16),
+ rx2tx_lut_20_11n);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x154 + core * 16),
+ rx2tx_lut_20_11n);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x155 + core * 16),
+ rx2tx_lut_40_11n);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x156 + core * 16),
+ rx2tx_lut_40_11n);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x157 + core * 16),
+ rx2tx_lut_40_11n);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x158 + core * 16),
+ rx2tx_lut_40_11n);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16),
+ rx2tx_lut_40_11n);
+ }
+ b43_nphy_rf_control_override_rev7(dev, 16, 1, 3, false, 2);
+ }
+ b43_phy_write(dev, 0x32F, 0x3);
+ if (phy->radio_rev == 4 || phy->radio_rev == 6)
+ b43_nphy_rf_control_override_rev7(dev, 4, 1, 3, false, 0);
+
+ if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) {
+ if (sprom->revision &&
+ sprom->boardflags2_hi & B43_BFH2_IPALVLSHIFT_3P3) {
+ b43_radio_write(dev, 0x5, 0x05);
+ b43_radio_write(dev, 0x6, 0x30);
+ b43_radio_write(dev, 0x7, 0x00);
+ b43_radio_set(dev, 0x4f, 0x1);
+ b43_radio_set(dev, 0xd4, 0x1);
+ bias = 0x1f;
+ conv = 0x6f;
+ filt = 0xaa;
+ } else {
+ bias = 0x2b;
+ conv = 0x7f;
+ filt = 0xee;
+ }
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ for (core = 0; core < 2; core++) {
+ if (core == 0) {
+ b43_radio_write(dev, 0x5F, bias);
+ b43_radio_write(dev, 0x64, conv);
+ b43_radio_write(dev, 0x66, filt);
+ } else {
+ b43_radio_write(dev, 0xE8, bias);
+ b43_radio_write(dev, 0xE9, conv);
+ b43_radio_write(dev, 0xEB, filt);
+ }
+ }
+ }
+ }
+
+ if (b43_nphy_ipa(dev)) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
+ phy->radio_rev == 6) {
+ for (core = 0; core < 2; core++) {
+ if (core == 0)
+ b43_radio_write(dev, 0x51,
+ 0x7f);
+ else
+ b43_radio_write(dev, 0xd6,
+ 0x7f);
+ }
+ }
+ if (phy->radio_rev == 3) {
+ for (core = 0; core < 2; core++) {
+ if (core == 0) {
+ b43_radio_write(dev, 0x64,
+ 0x13);
+ b43_radio_write(dev, 0x5F,
+ 0x1F);
+ b43_radio_write(dev, 0x66,
+ 0xEE);
+ b43_radio_write(dev, 0x59,
+ 0x8A);
+ b43_radio_write(dev, 0x80,
+ 0x3E);
+ } else {
+ b43_radio_write(dev, 0x69,
+ 0x13);
+ b43_radio_write(dev, 0xE8,
+ 0x1F);
+ b43_radio_write(dev, 0xEB,
+ 0xEE);
+ b43_radio_write(dev, 0xDE,
+ 0x8A);
+ b43_radio_write(dev, 0x105,
+ 0x3E);
+ }
+ }
+ } else if (phy->radio_rev == 7 || phy->radio_rev == 8) {
+ if (!phy->is_40mhz) {
+ b43_radio_write(dev, 0x5F, 0x14);
+ b43_radio_write(dev, 0xE8, 0x12);
+ } else {
+ b43_radio_write(dev, 0x5F, 0x16);
+ b43_radio_write(dev, 0xE8, 0x16);
+ }
+ }
+ } else {
+ u16 freq = phy->channel_freq;
+ if ((freq >= 5180 && freq <= 5230) ||
+ (freq >= 5745 && freq <= 5805)) {
+ b43_radio_write(dev, 0x7D, 0xFF);
+ b43_radio_write(dev, 0xFE, 0xFF);
+ }
+ }
+ } else {
+ if (phy->radio_rev != 5) {
+ for (core = 0; core < 2; core++) {
+ if (core == 0) {
+ b43_radio_write(dev, 0x5c, 0x61);
+ b43_radio_write(dev, 0x51, 0x70);
+ } else {
+ b43_radio_write(dev, 0xe1, 0x61);
+ b43_radio_write(dev, 0xd6, 0x70);
+ }
+ }
+ }
+ }
+
+ if (phy->radio_rev == 4) {
+ b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+ for (core = 0; core < 2; core++) {
+ if (core == 0) {
+ b43_radio_write(dev, 0x1a1, 0x00);
+ b43_radio_write(dev, 0x1a2, 0x3f);
+ b43_radio_write(dev, 0x1a6, 0x3f);
+ } else {
+ b43_radio_write(dev, 0x1a7, 0x00);
+ b43_radio_write(dev, 0x1ab, 0x3f);
+ b43_radio_write(dev, 0x1ac, 0x3f);
+ }
+ }
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_C1, 0x4);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x4);
+ b43_phy_set(dev, B43_NPHY_AFECTL_C2, 0x4);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4);
+
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x1);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x1);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x1);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x1);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20);
+
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x4);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x4);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x4);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4);
+ }
+
+ b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, 0x2);
+
+ b43_ntab_write(dev, B43_NTAB32(16, 0x100), 20);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x138), 2, ntab7_138_146);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x141), 0x77);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x133), 3, ntab7_133);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x146), 2, ntab7_138_146);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77);
+ b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77);
+
+ if (!phy->is_40mhz) {
+ b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D);
+ b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D);
+ } else {
+ b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x14D);
+ b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x14D);
+ }
+
+ b43_nphy_gain_ctl_workarounds(dev);
+
+ /* TODO
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4,
+ aux_adc_vmid_rev7_core0);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4,
+ aux_adc_vmid_rev7_core1);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0C), 4,
+ aux_adc_gain_rev7);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1C), 4,
+ aux_adc_gain_rev7);
+ */
+}
+
static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
@@ -1916,7 +2482,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
rx2tx_delays[6] = 1;
rx2tx_events[7] = 0x1F;
}
- b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+ b43_nphy_set_rf_sequence(dev, 0, rx2tx_events, rx2tx_delays,
ARRAY_SIZE(rx2tx_events));
}
@@ -1926,8 +2492,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
- b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
- b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+ if (!dev->phy.is_40mhz) {
+ b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+ b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+ } else {
+ b43_ntab_write(dev, B43_NTAB32(16, 3), 0x14D);
+ b43_ntab_write(dev, B43_NTAB32(16, 127), 0x14D);
+ }
b43_nphy_gain_ctl_workarounds(dev);
@@ -1963,13 +2534,14 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
if (dev->phy.rev == 4 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
0x70);
b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
0x70);
}
+ /* Dropped probably-always-true condition */
b43_phy_write(dev, 0x224, 0x03eb);
b43_phy_write(dev, 0x225, 0x03eb);
b43_phy_write(dev, 0x226, 0x0341);
@@ -1982,6 +2554,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_phy_write(dev, 0x22d, 0x042b);
b43_phy_write(dev, 0x22e, 0x0381);
b43_phy_write(dev, 0x22f, 0x0381);
+
+ if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
+ ; /* TODO: 0x0080000000000000 HF */
}
static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -1996,6 +2571,12 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+ if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD ||
+ dev->dev->board_type == 0x8B) {
+ delays1[0] = 0x1;
+ delays1[5] = 0x14;
+ }
+
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
nphy->band5g_pwrgain) {
b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
@@ -2007,8 +2588,10 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
- b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+ if (dev->phy.rev < 3) {
+ b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+ }
if (dev->phy.rev < 2) {
b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
@@ -2024,11 +2607,6 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
- if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
- dev->dev->board_type == 0x8B) {
- delays1[0] = 0x1;
- delays1[5] = 0x14;
- }
b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
@@ -2055,11 +2633,13 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
- b43_phy_mask(dev, B43_NPHY_PIL_DW1,
- ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+ if (dev->phy.rev < 3) {
+ b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+ ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+ }
if (dev->phy.rev == 2)
b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
@@ -2083,7 +2663,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
b43_phy_set(dev, B43_NPHY_IQFLIP,
B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
- if (dev->phy.rev >= 3)
+ if (dev->phy.rev >= 7)
+ b43_nphy_workarounds_rev7plus(dev);
+ else if (dev->phy.rev >= 3)
b43_nphy_workarounds_rev3plus(dev);
else
b43_nphy_workarounds_rev1_2(dev);
@@ -2542,7 +3124,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
b43_nphy_ipa_internal_tssi_setup(dev);
if (phy->rev >= 7)
- ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */
+ b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, false, 0);
else if (phy->rev >= 3)
b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
@@ -2554,7 +3136,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
b43_nphy_rssi_select(dev, 0, 0);
if (phy->rev >= 7)
- ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */
+ b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, true, 0);
else if (phy->rev >= 3)
b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
@@ -4761,6 +5343,7 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
nphy->spur_avoid = (phy->rev >= 3) ?
B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
+ nphy->init_por = true;
nphy->gain_boost = true; /* this way we follow wl, assume it is true */
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4801,6 +5384,8 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
}
+
+ nphy->init_por = true;
}
static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -4887,7 +5472,9 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
if (blocked) {
b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
~B43_NPHY_RFCTL_CMD_CHIP0PU);
- if (dev->phy.rev >= 3) {
+ if (dev->phy.rev >= 7) {
+ /* TODO */
+ } else if (dev->phy.rev >= 3) {
b43_radio_mask(dev, 0x09, ~0x2);
b43_radio_write(dev, 0x204D, 0);
@@ -4905,7 +5492,10 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
b43_radio_write(dev, 0x3064, 0);
}
} else {
- if (dev->phy.rev >= 3) {
+ if (dev->phy.rev >= 7) {
+ b43_radio_2057_init(dev);
+ b43_switch_channel(dev, dev->phy.channel);
+ } else if (dev->phy.rev >= 3) {
b43_radio_init2056(dev);
b43_switch_channel(dev, dev->phy.channel);
} else {
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fd12b386fea1..092c0140c249 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -785,6 +785,7 @@ struct b43_phy_n {
u16 papd_epsilon_offset[2];
s32 preamble_override;
u32 bb_mult_save;
+ bool init_por;
bool gain_boost;
bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/b43/radio_2057.c
new file mode 100644
index 000000000000..d61d6830c5c7
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2057.c
@@ -0,0 +1,141 @@
+/*
+
+ Broadcom B43 wireless driver
+ IEEE 802.11n 2057 radio device data tables
+
+ Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "radio_2057.h"
+#include "phy_common.h"
+
+static u16 r2057_rev4_init[42][2] = {
+ { 0x0E, 0x20 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 },
+ { 0x35, 0x26 }, { 0x3C, 0xff }, { 0x3D, 0xff }, { 0x3E, 0xff },
+ { 0x3F, 0xff }, { 0x62, 0x33 }, { 0x8A, 0xf0 }, { 0x8B, 0x10 },
+ { 0x8C, 0xf0 }, { 0x91, 0x3f }, { 0x92, 0x36 }, { 0xA4, 0x8c },
+ { 0xA8, 0x55 }, { 0xAF, 0x01 }, { 0x10F, 0xf0 }, { 0x110, 0x10 },
+ { 0x111, 0xf0 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x129, 0x8c },
+ { 0x12D, 0x55 }, { 0x134, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+ { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+ { 0x169, 0x02 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+ { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+ { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+};
+
+static u16 r2057_rev5_init[44][2] = {
+ { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+ { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+ { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+ { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+ { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+ { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+ { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 },
+ { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 },
+ { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 },
+ { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 },
+ { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, { 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev5a_init[45][2] = {
+ { 0x00, 0x15 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
+ { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
+ { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
+ { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+ { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 },
+ { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f },
+ { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x14E, 0x01 }, { 0x15E, 0x00 },
+ { 0x15F, 0x00 }, { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 },
+ { 0x163, 0x00 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 },
+ { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 },
+ { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 },
+ { 0x1C2, 0x80 },
+};
+
+static u16 r2057_rev7_init[54][2] = {
+ { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+ { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+ { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x13 },
+ { 0x66, 0xee }, { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 },
+ { 0x7C, 0x14 }, { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f },
+ { 0x92, 0x36 }, { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+ { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x13 }, { 0xEB, 0xee },
+ { 0xF3, 0x58 }, { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x14 },
+ { 0x102, 0xee }, { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 },
+ { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+ { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+ { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+ { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+ { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+static u16 r2057_rev8_init[54][2] = {
+ { 0x00, 0x08 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
+ { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
+ { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x0f },
+ { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 }, { 0x7C, 0x0f },
+ { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 },
+ { 0xA1, 0x20 }, { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 },
+ { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0xF3, 0x58 },
+ { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x0f }, { 0x102, 0xee },
+ { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x126, 0x20 },
+ { 0x14E, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 },
+ { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 },
+ { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 },
+ { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
+ { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
+};
+
+void r2057_upload_inittabs(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ u16 *table = NULL;
+ u16 size, i;
+
+ if (phy->rev == 7) {
+ table = r2057_rev4_init[0];
+ size = ARRAY_SIZE(r2057_rev4_init);
+ } else if (phy->rev == 8 || phy->rev == 9) {
+ if (phy->radio_rev == 5) {
+ if (phy->radio_rev == 8) {
+ table = r2057_rev5_init[0];
+ size = ARRAY_SIZE(r2057_rev5_init);
+ } else {
+ table = r2057_rev5a_init[0];
+ size = ARRAY_SIZE(r2057_rev5a_init);
+ }
+ } else if (phy->radio_rev == 7) {
+ table = r2057_rev7_init[0];
+ size = ARRAY_SIZE(r2057_rev7_init);
+ } else if (phy->radio_rev == 9) {
+ table = r2057_rev8_init[0];
+ size = ARRAY_SIZE(r2057_rev8_init);
+ }
+ }
+
+ if (table) {
+ for (i = 0; i < 10; i++) {
+ pr_info("radio_write 0x%X ", *table);
+ table++;
+ pr_info("0x%X\n", *table);
+ table++;
+ }
+ }
+}
diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/b43/radio_2057.h
new file mode 100644
index 000000000000..eeebd8fbeb0d
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2057.h
@@ -0,0 +1,430 @@
+#ifndef B43_RADIO_2057_H_
+#define B43_RADIO_2057_H_
+
+#include <linux/types.h>
+
+#include "tables_nphy.h"
+
+#define R2057_DACBUF_VINCM_CORE0 0x000
+#define R2057_IDCODE 0x001
+#define R2057_RCCAL_MASTER 0x002
+#define R2057_RCCAL_CAP_SIZE 0x003
+#define R2057_RCAL_CONFIG 0x004
+#define R2057_GPAIO_CONFIG 0x005
+#define R2057_GPAIO_SEL1 0x006
+#define R2057_GPAIO_SEL0 0x007
+#define R2057_CLPO_CONFIG 0x008
+#define R2057_BANDGAP_CONFIG 0x009
+#define R2057_BANDGAP_RCAL_TRIM 0x00a
+#define R2057_AFEREG_CONFIG 0x00b
+#define R2057_TEMPSENSE_CONFIG 0x00c
+#define R2057_XTAL_CONFIG1 0x00d
+#define R2057_XTAL_ICORE_SIZE 0x00e
+#define R2057_XTAL_BUF_SIZE 0x00f
+#define R2057_XTAL_PULLCAP_SIZE 0x010
+#define R2057_RFPLL_MASTER 0x011
+#define R2057_VCOMONITOR_VTH_L 0x012
+#define R2057_VCOMONITOR_VTH_H 0x013
+#define R2057_VCOCAL_BIASRESET_RFPLLREG_VOUT 0x014
+#define R2057_VCO_VARCSIZE_IDAC 0x015
+#define R2057_VCOCAL_COUNTVAL0 0x016
+#define R2057_VCOCAL_COUNTVAL1 0x017
+#define R2057_VCOCAL_INTCLK_COUNT 0x018
+#define R2057_VCOCAL_MASTER 0x019
+#define R2057_VCOCAL_NUMCAPCHANGE 0x01a
+#define R2057_VCOCAL_WINSIZE 0x01b
+#define R2057_VCOCAL_DELAY_AFTER_REFRESH 0x01c
+#define R2057_VCOCAL_DELAY_AFTER_CLOSELOOP 0x01d
+#define R2057_VCOCAL_DELAY_AFTER_OPENLOOP 0x01e
+#define R2057_VCOCAL_DELAY_BEFORE_OPENLOOP 0x01f
+#define R2057_VCO_FORCECAPEN_FORCECAP1 0x020
+#define R2057_VCO_FORCECAP0 0x021
+#define R2057_RFPLL_REFMASTER_SPAREXTALSIZE 0x022
+#define R2057_RFPLL_PFD_RESET_PW 0x023
+#define R2057_RFPLL_LOOPFILTER_R2 0x024
+#define R2057_RFPLL_LOOPFILTER_R1 0x025
+#define R2057_RFPLL_LOOPFILTER_C3 0x026
+#define R2057_RFPLL_LOOPFILTER_C2 0x027
+#define R2057_RFPLL_LOOPFILTER_C1 0x028
+#define R2057_CP_KPD_IDAC 0x029
+#define R2057_RFPLL_IDACS 0x02a
+#define R2057_RFPLL_MISC_EN 0x02b
+#define R2057_RFPLL_MMD0 0x02c
+#define R2057_RFPLL_MMD1 0x02d
+#define R2057_RFPLL_MISC_CAL_RESETN 0x02e
+#define R2057_JTAGXTAL_SIZE_CPBIAS_FILTRES 0x02f
+#define R2057_VCO_ALCREF_BBPLLXTAL_SIZE 0x030
+#define R2057_VCOCAL_READCAP0 0x031
+#define R2057_VCOCAL_READCAP1 0x032
+#define R2057_VCOCAL_STATUS 0x033
+#define R2057_LOGEN_PUS 0x034
+#define R2057_LOGEN_PTAT_RESETS 0x035
+#define R2057_VCOBUF_IDACS 0x036
+#define R2057_VCOBUF_TUNE 0x037
+#define R2057_CMOSBUF_TX2GQ_IDACS 0x038
+#define R2057_CMOSBUF_TX2GI_IDACS 0x039
+#define R2057_CMOSBUF_TX5GQ_IDACS 0x03a
+#define R2057_CMOSBUF_TX5GI_IDACS 0x03b
+#define R2057_CMOSBUF_RX2GQ_IDACS 0x03c
+#define R2057_CMOSBUF_RX2GI_IDACS 0x03d
+#define R2057_CMOSBUF_RX5GQ_IDACS 0x03e
+#define R2057_CMOSBUF_RX5GI_IDACS 0x03f
+#define R2057_LOGEN_MX2G_IDACS 0x040
+#define R2057_LOGEN_MX2G_TUNE 0x041
+#define R2057_LOGEN_MX5G_IDACS 0x042
+#define R2057_LOGEN_MX5G_TUNE 0x043
+#define R2057_LOGEN_MX5G_RCCR 0x044
+#define R2057_LOGEN_INDBUF2G_IDAC 0x045
+#define R2057_LOGEN_INDBUF2G_IBOOST 0x046
+#define R2057_LOGEN_INDBUF2G_TUNE 0x047
+#define R2057_LOGEN_INDBUF5G_IDAC 0x048
+#define R2057_LOGEN_INDBUF5G_IBOOST 0x049
+#define R2057_LOGEN_INDBUF5G_TUNE 0x04a
+#define R2057_CMOSBUF_TX_RCCR 0x04b
+#define R2057_CMOSBUF_RX_RCCR 0x04c
+#define R2057_LOGEN_SEL_PKDET 0x04d
+#define R2057_CMOSBUF_SHAREIQ_PTAT 0x04e
+#define R2057_RXTXBIAS_CONFIG_CORE0 0x04f
+#define R2057_TXGM_TXRF_PUS_CORE0 0x050
+#define R2057_TXGM_IDAC_BLEED_CORE0 0x051
+#define R2057_TXGM_GAIN_CORE0 0x056
+#define R2057_TXGM2G_PKDET_PUS_CORE0 0x057
+#define R2057_PAD2G_PTATS_CORE0 0x058
+#define R2057_PAD2G_IDACS_CORE0 0x059
+#define R2057_PAD2G_BOOST_PU_CORE0 0x05a
+#define R2057_PAD2G_CASCV_GAIN_CORE0 0x05b
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE0 0x05c
+#define R2057_TXMIX2G_LODC_CORE0 0x05d
+#define R2057_PAD2G_TUNE_PUS_CORE0 0x05e
+#define R2057_IPA2G_GAIN_CORE0 0x05f
+#define R2057_TSSI2G_SPARE1_CORE0 0x060
+#define R2057_TSSI2G_SPARE2_CORE0 0x061
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE0 0x062
+#define R2057_IPA2G_IMAIN_CORE0 0x063
+#define R2057_IPA2G_CASCONV_CORE0 0x064
+#define R2057_IPA2G_CASCOFFV_CORE0 0x065
+#define R2057_IPA2G_BIAS_FILTER_CORE0 0x066
+#define R2057_TX5G_PKDET_CORE0 0x069
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE0 0x06a
+#define R2057_PAD5G_PTATS1_CORE0 0x06b
+#define R2057_PAD5G_CLASS_PTATS2_CORE0 0x06c
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE0 0x06d
+#define R2057_PAD5G_CASCV_IMAIN_CORE0 0x06e
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE0 0x06f
+#define R2057_PGA_BOOST_TUNE_CORE0 0x070
+#define R2057_PGA_GAIN_CORE0 0x071
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE0 0x072
+#define R2057_TXMIX5G_BOOST_TUNE_CORE0 0x073
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE0 0x074
+#define R2057_IPA5G_IAUX_CORE0 0x075
+#define R2057_IPA5G_GAIN_CORE0 0x076
+#define R2057_TSSI5G_SPARE1_CORE0 0x077
+#define R2057_TSSI5G_SPARE2_CORE0 0x078
+#define R2057_IPA5G_CASCOFFV_PU_CORE0 0x079
+#define R2057_IPA5G_PTAT_CORE0 0x07a
+#define R2057_IPA5G_IMAIN_CORE0 0x07b
+#define R2057_IPA5G_CASCONV_CORE0 0x07c
+#define R2057_IPA5G_BIAS_FILTER_CORE0 0x07d
+#define R2057_PAD_BIAS_FILTER_BWS_CORE0 0x080
+#define R2057_TR2G_CONFIG1_CORE0_NU 0x081
+#define R2057_TR2G_CONFIG2_CORE0_NU 0x082
+#define R2057_LNA5G_RFEN_CORE0 0x083
+#define R2057_TR5G_CONFIG2_CORE0_NU 0x084
+#define R2057_RXRFBIAS_IBOOST_PU_CORE0 0x085
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE0 0x086
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE0 0x087
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE0 0x088
+#define R2057_RXMIX_CMFBITAIL_PU_CORE0 0x089
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE0 0x08a
+#define R2057_LNA2_IAUX_PTAT_CORE0 0x08b
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE0 0x08c
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE0 0x08d
+#define R2057_RXRFBIAS_BANDSEL_CORE0 0x08e
+#define R2057_TIA_CONFIG_CORE0 0x08f
+#define R2057_TIA_IQGAIN_CORE0 0x090
+#define R2057_TIA_IBIAS2_CORE0 0x091
+#define R2057_TIA_IBIAS1_CORE0 0x092
+#define R2057_TIA_SPARE_Q_CORE0 0x093
+#define R2057_TIA_SPARE_I_CORE0 0x094
+#define R2057_RXMIX2G_PUS_CORE0 0x095
+#define R2057_RXMIX2G_VCMREFS_CORE0 0x096
+#define R2057_RXMIX2G_LODC_QI_CORE0 0x097
+#define R2057_W12G_BW_LNA2G_PUS_CORE0 0x098
+#define R2057_LNA2G_GAIN_CORE0 0x099
+#define R2057_LNA2G_TUNE_CORE0 0x09a
+#define R2057_RXMIX5G_PUS_CORE0 0x09b
+#define R2057_RXMIX5G_VCMREFS_CORE0 0x09c
+#define R2057_RXMIX5G_LODC_QI_CORE0 0x09d
+#define R2057_W15G_BW_LNA5G_PUS_CORE0 0x09e
+#define R2057_LNA5G_GAIN_CORE0 0x09f
+#define R2057_LNA5G_TUNE_CORE0 0x0a0
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE0 0x0a1
+#define R2057_RXBB_BIAS_MASTER_CORE0 0x0a2
+#define R2057_RXBB_VGABUF_IDACS_CORE0 0x0a3
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE0 0x0a4
+#define R2057_TXBUF_VINCM_CORE0 0x0a5
+#define R2057_TXBUF_IDACS_CORE0 0x0a6
+#define R2057_LPF_RESP_RXBUF_BW_CORE0 0x0a7
+#define R2057_RXBB_CC_CORE0 0x0a8
+#define R2057_RXBB_SPARE3_CORE0 0x0a9
+#define R2057_RXBB_RCCAL_HPC_CORE0 0x0aa
+#define R2057_LPF_IDACS_CORE0 0x0ab
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE0 0x0ac
+#define R2057_TXBUF_GAIN_CORE0 0x0ad
+#define R2057_AFELOOPBACK_AACI_RESP_CORE0 0x0ae
+#define R2057_RXBUF_DEGEN_CORE0 0x0af
+#define R2057_RXBB_SPARE2_CORE0 0x0b0
+#define R2057_RXBB_SPARE1_CORE0 0x0b1
+#define R2057_RSSI_MASTER_CORE0 0x0b2
+#define R2057_W2_MASTER_CORE0 0x0b3
+#define R2057_NB_MASTER_CORE0 0x0b4
+#define R2057_W2_IDACS0_Q_CORE0 0x0b5
+#define R2057_W2_IDACS1_Q_CORE0 0x0b6
+#define R2057_W2_IDACS0_I_CORE0 0x0b7
+#define R2057_W2_IDACS1_I_CORE0 0x0b8
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE0 0x0b9
+#define R2057_NB_IDACS_Q_CORE0 0x0ba
+#define R2057_NB_IDACS_I_CORE0 0x0bb
+#define R2057_BACKUP4_CORE0 0x0c1
+#define R2057_BACKUP3_CORE0 0x0c2
+#define R2057_BACKUP2_CORE0 0x0c3
+#define R2057_BACKUP1_CORE0 0x0c4
+#define R2057_SPARE16_CORE0 0x0c5
+#define R2057_SPARE15_CORE0 0x0c6
+#define R2057_SPARE14_CORE0 0x0c7
+#define R2057_SPARE13_CORE0 0x0c8
+#define R2057_SPARE12_CORE0 0x0c9
+#define R2057_SPARE11_CORE0 0x0ca
+#define R2057_TX2G_BIAS_RESETS_CORE0 0x0cb
+#define R2057_TX5G_BIAS_RESETS_CORE0 0x0cc
+#define R2057_IQTEST_SEL_PU 0x0cd
+#define R2057_XTAL_CONFIG2 0x0ce
+#define R2057_BUFS_MISC_LPFBW_CORE0 0x0cf
+#define R2057_TXLPF_RCCAL_CORE0 0x0d0
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE0 0x0d1
+#define R2057_LPF_GAIN_CORE0 0x0d2
+#define R2057_DACBUF_IDACS_BW_CORE0 0x0d3
+#define R2057_RXTXBIAS_CONFIG_CORE1 0x0d4
+#define R2057_TXGM_TXRF_PUS_CORE1 0x0d5
+#define R2057_TXGM_IDAC_BLEED_CORE1 0x0d6
+#define R2057_TXGM_GAIN_CORE1 0x0db
+#define R2057_TXGM2G_PKDET_PUS_CORE1 0x0dc
+#define R2057_PAD2G_PTATS_CORE1 0x0dd
+#define R2057_PAD2G_IDACS_CORE1 0x0de
+#define R2057_PAD2G_BOOST_PU_CORE1 0x0df
+#define R2057_PAD2G_CASCV_GAIN_CORE1 0x0e0
+#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE1 0x0e1
+#define R2057_TXMIX2G_LODC_CORE1 0x0e2
+#define R2057_PAD2G_TUNE_PUS_CORE1 0x0e3
+#define R2057_IPA2G_GAIN_CORE1 0x0e4
+#define R2057_TSSI2G_SPARE1_CORE1 0x0e5
+#define R2057_TSSI2G_SPARE2_CORE1 0x0e6
+#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE1 0x0e7
+#define R2057_IPA2G_IMAIN_CORE1 0x0e8
+#define R2057_IPA2G_CASCONV_CORE1 0x0e9
+#define R2057_IPA2G_CASCOFFV_CORE1 0x0ea
+#define R2057_IPA2G_BIAS_FILTER_CORE1 0x0eb
+#define R2057_TX5G_PKDET_CORE1 0x0ee
+#define R2057_PGA_PTAT_TXGM5G_PU_CORE1 0x0ef
+#define R2057_PAD5G_PTATS1_CORE1 0x0f0
+#define R2057_PAD5G_CLASS_PTATS2_CORE1 0x0f1
+#define R2057_PGA_BOOSTPTAT_IMAIN_CORE1 0x0f2
+#define R2057_PAD5G_CASCV_IMAIN_CORE1 0x0f3
+#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE1 0x0f4
+#define R2057_PGA_BOOST_TUNE_CORE1 0x0f5
+#define R2057_PGA_GAIN_CORE1 0x0f6
+#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE1 0x0f7
+#define R2057_TXMIX5G_BOOST_TUNE_CORE1 0x0f8
+#define R2057_PAD5G_TUNE_MISC_PUS_CORE1 0x0f9
+#define R2057_IPA5G_IAUX_CORE1 0x0fa
+#define R2057_IPA5G_GAIN_CORE1 0x0fb
+#define R2057_TSSI5G_SPARE1_CORE1 0x0fc
+#define R2057_TSSI5G_SPARE2_CORE1 0x0fd
+#define R2057_IPA5G_CASCOFFV_PU_CORE1 0x0fe
+#define R2057_IPA5G_PTAT_CORE1 0x0ff
+#define R2057_IPA5G_IMAIN_CORE1 0x100
+#define R2057_IPA5G_CASCONV_CORE1 0x101
+#define R2057_IPA5G_BIAS_FILTER_CORE1 0x102
+#define R2057_PAD_BIAS_FILTER_BWS_CORE1 0x105
+#define R2057_TR2G_CONFIG1_CORE1_NU 0x106
+#define R2057_TR2G_CONFIG2_CORE1_NU 0x107
+#define R2057_LNA5G_RFEN_CORE1 0x108
+#define R2057_TR5G_CONFIG2_CORE1_NU 0x109
+#define R2057_RXRFBIAS_IBOOST_PU_CORE1 0x10a
+#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE1 0x10b
+#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE1 0x10c
+#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE1 0x10d
+#define R2057_RXMIX_CMFBITAIL_PU_CORE1 0x10e
+#define R2057_LNA2_IMAIN_PTAT_PU_CORE1 0x10f
+#define R2057_LNA2_IAUX_PTAT_CORE1 0x110
+#define R2057_LNA1_IMAIN_PTAT_PU_CORE1 0x111
+#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE1 0x112
+#define R2057_RXRFBIAS_BANDSEL_CORE1 0x113
+#define R2057_TIA_CONFIG_CORE1 0x114
+#define R2057_TIA_IQGAIN_CORE1 0x115
+#define R2057_TIA_IBIAS2_CORE1 0x116
+#define R2057_TIA_IBIAS1_CORE1 0x117
+#define R2057_TIA_SPARE_Q_CORE1 0x118
+#define R2057_TIA_SPARE_I_CORE1 0x119
+#define R2057_RXMIX2G_PUS_CORE1 0x11a
+#define R2057_RXMIX2G_VCMREFS_CORE1 0x11b
+#define R2057_RXMIX2G_LODC_QI_CORE1 0x11c
+#define R2057_W12G_BW_LNA2G_PUS_CORE1 0x11d
+#define R2057_LNA2G_GAIN_CORE1 0x11e
+#define R2057_LNA2G_TUNE_CORE1 0x11f
+#define R2057_RXMIX5G_PUS_CORE1 0x120
+#define R2057_RXMIX5G_VCMREFS_CORE1 0x121
+#define R2057_RXMIX5G_LODC_QI_CORE1 0x122
+#define R2057_W15G_BW_LNA5G_PUS_CORE1 0x123
+#define R2057_LNA5G_GAIN_CORE1 0x124
+#define R2057_LNA5G_TUNE_CORE1 0x125
+#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE1 0x126
+#define R2057_RXBB_BIAS_MASTER_CORE1 0x127
+#define R2057_RXBB_VGABUF_IDACS_CORE1 0x128
+#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE1 0x129
+#define R2057_TXBUF_VINCM_CORE1 0x12a
+#define R2057_TXBUF_IDACS_CORE1 0x12b
+#define R2057_LPF_RESP_RXBUF_BW_CORE1 0x12c
+#define R2057_RXBB_CC_CORE1 0x12d
+#define R2057_RXBB_SPARE3_CORE1 0x12e
+#define R2057_RXBB_RCCAL_HPC_CORE1 0x12f
+#define R2057_LPF_IDACS_CORE1 0x130
+#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE1 0x131
+#define R2057_TXBUF_GAIN_CORE1 0x132
+#define R2057_AFELOOPBACK_AACI_RESP_CORE1 0x133
+#define R2057_RXBUF_DEGEN_CORE1 0x134
+#define R2057_RXBB_SPARE2_CORE1 0x135
+#define R2057_RXBB_SPARE1_CORE1 0x136
+#define R2057_RSSI_MASTER_CORE1 0x137
+#define R2057_W2_MASTER_CORE1 0x138
+#define R2057_NB_MASTER_CORE1 0x139
+#define R2057_W2_IDACS0_Q_CORE1 0x13a
+#define R2057_W2_IDACS1_Q_CORE1 0x13b
+#define R2057_W2_IDACS0_I_CORE1 0x13c
+#define R2057_W2_IDACS1_I_CORE1 0x13d
+#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE1 0x13e
+#define R2057_NB_IDACS_Q_CORE1 0x13f
+#define R2057_NB_IDACS_I_CORE1 0x140
+#define R2057_BACKUP4_CORE1 0x146
+#define R2057_BACKUP3_CORE1 0x147
+#define R2057_BACKUP2_CORE1 0x148
+#define R2057_BACKUP1_CORE1 0x149
+#define R2057_SPARE16_CORE1 0x14a
+#define R2057_SPARE15_CORE1 0x14b
+#define R2057_SPARE14_CORE1 0x14c
+#define R2057_SPARE13_CORE1 0x14d
+#define R2057_SPARE12_CORE1 0x14e
+#define R2057_SPARE11_CORE1 0x14f
+#define R2057_TX2G_BIAS_RESETS_CORE1 0x150
+#define R2057_TX5G_BIAS_RESETS_CORE1 0x151
+#define R2057_SPARE8_CORE1 0x152
+#define R2057_SPARE7_CORE1 0x153
+#define R2057_BUFS_MISC_LPFBW_CORE1 0x154
+#define R2057_TXLPF_RCCAL_CORE1 0x155
+#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE1 0x156
+#define R2057_LPF_GAIN_CORE1 0x157
+#define R2057_DACBUF_IDACS_BW_CORE1 0x158
+#define R2057_DACBUF_VINCM_CORE1 0x159
+#define R2057_RCCAL_START_R1_Q1_P1 0x15a
+#define R2057_RCCAL_X1 0x15b
+#define R2057_RCCAL_TRC0 0x15c
+#define R2057_RCCAL_TRC1 0x15d
+#define R2057_RCCAL_DONE_OSCCAP 0x15e
+#define R2057_RCCAL_N0_0 0x15f
+#define R2057_RCCAL_N0_1 0x160
+#define R2057_RCCAL_N1_0 0x161
+#define R2057_RCCAL_N1_1 0x162
+#define R2057_RCAL_STATUS 0x163
+#define R2057_XTALPUOVR_PINCTRL 0x164
+#define R2057_OVR_REG0 0x165
+#define R2057_OVR_REG1 0x166
+#define R2057_OVR_REG2 0x167
+#define R2057_OVR_REG3 0x168
+#define R2057_OVR_REG4 0x169
+#define R2057_RCCAL_SCAP_VAL 0x16a
+#define R2057_RCCAL_BCAP_VAL 0x16b
+#define R2057_RCCAL_HPC_VAL 0x16c
+#define R2057_RCCAL_OVERRIDES 0x16d
+#define R2057_TX0_IQCAL_GAIN_BW 0x170
+#define R2057_TX0_LOFT_FINE_I 0x171
+#define R2057_TX0_LOFT_FINE_Q 0x172
+#define R2057_TX0_LOFT_COARSE_I 0x173
+#define R2057_TX0_LOFT_COARSE_Q 0x174
+#define R2057_TX0_TX_SSI_MASTER 0x175
+#define R2057_TX0_IQCAL_VCM_HG 0x176
+#define R2057_TX0_IQCAL_IDAC 0x177
+#define R2057_TX0_TSSI_VCM 0x178
+#define R2057_TX0_TX_SSI_MUX 0x179
+#define R2057_TX0_TSSIA 0x17a
+#define R2057_TX0_TSSIG 0x17b
+#define R2057_TX0_TSSI_MISC1 0x17c
+#define R2057_TX0_TXRXCOUPLE_2G_ATTEN 0x17d
+#define R2057_TX0_TXRXCOUPLE_2G_PWRUP 0x17e
+#define R2057_TX0_TXRXCOUPLE_5G_ATTEN 0x17f
+#define R2057_TX0_TXRXCOUPLE_5G_PWRUP 0x180
+#define R2057_TX1_IQCAL_GAIN_BW 0x190
+#define R2057_TX1_LOFT_FINE_I 0x191
+#define R2057_TX1_LOFT_FINE_Q 0x192
+#define R2057_TX1_LOFT_COARSE_I 0x193
+#define R2057_TX1_LOFT_COARSE_Q 0x194
+#define R2057_TX1_TX_SSI_MASTER 0x195
+#define R2057_TX1_IQCAL_VCM_HG 0x196
+#define R2057_TX1_IQCAL_IDAC 0x197
+#define R2057_TX1_TSSI_VCM 0x198
+#define R2057_TX1_TX_SSI_MUX 0x199
+#define R2057_TX1_TSSIA 0x19a
+#define R2057_TX1_TSSIG 0x19b
+#define R2057_TX1_TSSI_MISC1 0x19c
+#define R2057_TX1_TXRXCOUPLE_2G_ATTEN 0x19d
+#define R2057_TX1_TXRXCOUPLE_2G_PWRUP 0x19e
+#define R2057_TX1_TXRXCOUPLE_5G_ATTEN 0x19f
+#define R2057_TX1_TXRXCOUPLE_5G_PWRUP 0x1a0
+#define R2057_AFE_VCM_CAL_MASTER_CORE0 0x1a1
+#define R2057_AFE_SET_VCM_I_CORE0 0x1a2
+#define R2057_AFE_SET_VCM_Q_CORE0 0x1a3
+#define R2057_AFE_STATUS_VCM_IQADC_CORE0 0x1a4
+#define R2057_AFE_STATUS_VCM_I_CORE0 0x1a5
+#define R2057_AFE_STATUS_VCM_Q_CORE0 0x1a6
+#define R2057_AFE_VCM_CAL_MASTER_CORE1 0x1a7
+#define R2057_AFE_SET_VCM_I_CORE1 0x1a8
+#define R2057_AFE_SET_VCM_Q_CORE1 0x1a9
+#define R2057_AFE_STATUS_VCM_IQADC_CORE1 0x1aa
+#define R2057_AFE_STATUS_VCM_I_CORE1 0x1ab
+#define R2057_AFE_STATUS_VCM_Q_CORE1 0x1ac
+
+#define R2057v7_DACBUF_VINCM_CORE0 0x1ad
+#define R2057v7_RCCAL_MASTER 0x1ae
+#define R2057v7_TR2G_CONFIG3_CORE0_NU 0x1af
+#define R2057v7_TR2G_CONFIG3_CORE1_NU 0x1b0
+#define R2057v7_LOGEN_PUS1 0x1b1
+#define R2057v7_OVR_REG5 0x1b2
+#define R2057v7_OVR_REG6 0x1b3
+#define R2057v7_OVR_REG7 0x1b4
+#define R2057v7_OVR_REG8 0x1b5
+#define R2057v7_OVR_REG9 0x1b6
+#define R2057v7_OVR_REG10 0x1b7
+#define R2057v7_OVR_REG11 0x1b8
+#define R2057v7_OVR_REG12 0x1b9
+#define R2057v7_OVR_REG13 0x1ba
+#define R2057v7_OVR_REG14 0x1bb
+#define R2057v7_OVR_REG15 0x1bc
+#define R2057v7_OVR_REG16 0x1bd
+#define R2057v7_OVR_REG1 0x1be
+#define R2057v7_OVR_REG18 0x1bf
+#define R2057v7_OVR_REG19 0x1c0
+#define R2057v7_OVR_REG20 0x1c1
+#define R2057v7_OVR_REG21 0x1c2
+#define R2057v7_OVR_REG2 0x1c3
+#define R2057v7_OVR_REG23 0x1c4
+#define R2057v7_OVR_REG24 0x1c5
+#define R2057v7_OVR_REG25 0x1c6
+#define R2057v7_OVR_REG26 0x1c7
+#define R2057v7_OVR_REG27 0x1c8
+#define R2057v7_OVR_REG28 0x1c9
+#define R2057v7_IQTEST_SEL_PU2 0x1ca
+
+#define R2057_VCM_MASK 0x7
+
+void r2057_upload_inittabs(struct b43_wldev *dev);
+
+#endif /* B43_RADIO_2057_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index f0d8377429c6..97d4e27bf36f 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2757,6 +2757,49 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
{ 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
};
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+ tbl_rf_control_override_rev7_over0[] = {
+ { 0x0004, 0x07A, 0x07D, 0x0002, 1 },
+ { 0x0008, 0x07A, 0x07D, 0x0004, 2 },
+ { 0x0010, 0x07A, 0x07D, 0x0010, 4 },
+ { 0x0020, 0x07A, 0x07D, 0x0020, 5 },
+ { 0x0040, 0x07A, 0x07D, 0x0040, 6 },
+ { 0x0080, 0x0F8, 0x0FA, 0x0080, 7 },
+ { 0x0400, 0x0F8, 0x0FA, 0x0070, 4 },
+ { 0x0800, 0x07B, 0x07E, 0xFFFF, 0 },
+ { 0x1000, 0x07C, 0x07F, 0xFFFF, 0 },
+ { 0x6000, 0x348, 0x349, 0xFFFF, 0 },
+ { 0x2000, 0x348, 0x349, 0x000F, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+ tbl_rf_control_override_rev7_over1[] = {
+ { 0x0002, 0x340, 0x341, 0x0002, 1 },
+ { 0x0008, 0x340, 0x341, 0x0008, 3 },
+ { 0x0020, 0x340, 0x341, 0x0020, 5 },
+ { 0x0010, 0x340, 0x341, 0x0010, 4 },
+ { 0x0004, 0x340, 0x341, 0x0004, 2 },
+ { 0x0080, 0x340, 0x341, 0x0700, 8 },
+ { 0x0800, 0x340, 0x341, 0x4000, 14 },
+ { 0x0400, 0x340, 0x341, 0x2000, 13 },
+ { 0x0200, 0x340, 0x341, 0x0800, 12 },
+ { 0x0100, 0x340, 0x341, 0x0100, 11 },
+ { 0x0040, 0x340, 0x341, 0x0040, 6 },
+ { 0x0001, 0x340, 0x341, 0x0001, 0 },
+};
+
+/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */
+static const struct nphy_rf_control_override_rev7
+ tbl_rf_control_override_rev7_over2[] = {
+ { 0x0008, 0x344, 0x345, 0x0008, 3 },
+ { 0x0002, 0x344, 0x345, 0x0002, 1 },
+ { 0x0001, 0x344, 0x345, 0x0001, 0 },
+ { 0x0004, 0x344, 0x345, 0x0004, 2 },
+ { 0x0010, 0x344, 0x345, 0x0010, 4 },
+};
+
struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
{ 10, 14, 19, 27 },
{ -5, 6, 10, 15 },
@@ -3248,3 +3291,35 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
return e;
}
+
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+ struct b43_wldev *dev, u16 field, u8 override)
+{
+ const struct nphy_rf_control_override_rev7 *e;
+ u8 size, i;
+
+ switch (override) {
+ case 0:
+ e = tbl_rf_control_override_rev7_over0;
+ size = ARRAY_SIZE(tbl_rf_control_override_rev7_over0);
+ break;
+ case 1:
+ e = tbl_rf_control_override_rev7_over1;
+ size = ARRAY_SIZE(tbl_rf_control_override_rev7_over1);
+ break;
+ case 2:
+ e = tbl_rf_control_override_rev7_over2;
+ size = ARRAY_SIZE(tbl_rf_control_override_rev7_over2);
+ break;
+ default:
+ b43err(dev->wl, "Invalid override value %d\n", override);
+ return NULL;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (e[i].field == field)
+ return &e[i];
+ }
+
+ return NULL;
+}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index f348953c0230..c600700ceedc 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -35,6 +35,14 @@ struct nphy_rf_control_override_rev3 {
u8 val_addr1;
};
+struct nphy_rf_control_override_rev7 {
+ u16 field;
+ u16 val_addr_core0;
+ u16 val_addr_core1;
+ u16 val_mask;
+ u8 val_shift;
+};
+
struct nphy_gain_ctl_workaround_entry {
s8 lna1_gain[4];
s8 lna2_gain[4];
@@ -202,5 +210,7 @@ extern const struct nphy_rf_control_override_rev2
tbl_rf_control_override_rev2[];
extern const struct nphy_rf_control_override_rev3
tbl_rf_control_override_rev3[];
+const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7(
+ struct b43_wldev *dev, u16 field, u8 override);
#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 8156135a0590..18e208e3eca1 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1920,7 +1920,7 @@ static int b43legacy_gpio_init(struct b43legacy_wldev *dev)
return 0;
ssb_write32(gpiodev, B43legacy_GPIO_CONTROL,
(ssb_read32(gpiodev, B43legacy_GPIO_CONTROL)
- & mask) | set);
+ & ~mask) | set);
return 0;
}
@@ -2492,6 +2492,7 @@ static void b43legacy_tx_work(struct work_struct *work)
}
static void b43legacy_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
@@ -3894,6 +3895,8 @@ static void b43legacy_remove(struct ssb_device *dev)
cancel_work_sync(&wl->firmware_load);
B43legacy_WARN_ON(!wl);
+ if (!wldev->fw.ucode)
+ return; /* NULL if fw never loaded */
if (wl->current_dev == wldev)
ieee80211_unregister_hw(wl->hw);
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index b480088b3dbe..c9d811eb6556 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -55,6 +55,14 @@ config BRCMFMAC_USB
IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
use the driver for an USB wireless card.
+config BRCMISCAN
+ bool "Broadcom I-Scan (OBSOLETE)"
+ depends on BRCMFMAC
+ ---help---
+ This option enables the I-Scan method. By default fullmac uses the
+ new E-Scan method which uses less memory in firmware and gives no
+ limitation on the number of scan results.
+
config BRCMDBG
bool "Broadcom driver debug functions"
depends on BRCMSMAC || BRCMFMAC
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 8e7e6928c936..3b2c4c20e7fc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -185,7 +185,7 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
return err;
}
-static int
+int
brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
void *data, bool write)
{
@@ -249,7 +249,9 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
int retval;
brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+ sdio_claim_host(sdiodev->func[1]);
retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ sdio_release_host(sdiodev->func[1]);
brcmf_dbg(INFO, "data:0x%02x\n", data);
if (ret)
@@ -264,7 +266,9 @@ u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
int retval;
brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+ sdio_claim_host(sdiodev->func[1]);
retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ sdio_release_host(sdiodev->func[1]);
brcmf_dbg(INFO, "data:0x%08x\n", data);
if (ret)
@@ -279,7 +283,9 @@ void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
int retval;
brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
+ sdio_claim_host(sdiodev->func[1]);
retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+ sdio_release_host(sdiodev->func[1]);
if (ret)
*ret = retval;
@@ -291,7 +297,9 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
int retval;
brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
+ sdio_claim_host(sdiodev->func[1]);
retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+ sdio_release_host(sdiodev->func[1]);
if (ret)
*ret = retval;
@@ -356,15 +364,20 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
+ sdio_claim_host(sdiodev->func[1]);
+
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
if (err)
- return err;
+ goto done;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
fn, addr, pkt);
+done:
+ sdio_release_host(sdiodev->func[1]);
+
return err;
}
@@ -378,15 +391,20 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pktq->qlen);
+ sdio_claim_host(sdiodev->func[1]);
+
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
if (err)
- return err;
+ goto done;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
pktq);
+done:
+ sdio_release_host(sdiodev->func[1]);
+
return err;
}
@@ -428,10 +446,12 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (flags & SDIO_REQ_ASYNC)
return -ENOTSUPP;
+ sdio_claim_host(sdiodev->func[1]);
+
if (bar0 != sdiodev->sbwad) {
err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
if (err)
- return err;
+ goto done;
sdiodev->sbwad = bar0;
}
@@ -443,8 +463,13 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
if (width == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
- addr, pkt);
+ err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
+ addr, pkt);
+
+done:
+ sdio_release_host(sdiodev->func[1]);
+
+ return err;
}
int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
@@ -485,8 +510,10 @@ int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
brcmf_dbg(TRACE, "Enter\n");
/* issue abort cmd52 command through F0 */
+ sdio_claim_host(sdiodev->func[1]);
brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
SDIO_CCCR_ABORT, &t_func);
+ sdio_release_host(sdiodev->func[1]);
brcmf_dbg(TRACE, "Exit\n");
return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 7c4ee72f9d56..c3247d5b3c22 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -42,6 +42,7 @@
#define DMA_ALIGN_MASK 0x03
+#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
@@ -51,6 +52,7 @@
/* devices we support, null terminated */
static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
@@ -101,7 +103,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
if (regaddr == SDIO_CCCR_IOEx) {
sdfunc = sdiodev->func[2];
if (sdfunc) {
- sdio_claim_host(sdfunc);
if (*byte & SDIO_FUNC_ENABLE_2) {
/* Enable Function 2 */
err_ret = sdio_enable_func(sdfunc);
@@ -117,7 +118,6 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
"Disable F2 failed:%d\n",
err_ret);
}
- sdio_release_host(sdfunc);
}
} else if ((regaddr == SDIO_CCCR_ABORT) ||
(regaddr == SDIO_CCCR_IENx)) {
@@ -126,17 +126,13 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
if (!sdfunc)
return -ENOMEM;
sdfunc->num = 0;
- sdio_claim_host(sdfunc);
sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
- sdio_release_host(sdfunc);
kfree(sdfunc);
} else if (regaddr < 0xF0) {
brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
err_ret = -EPERM;
} else {
- sdio_claim_host(sdfunc);
sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
- sdio_release_host(sdfunc);
}
return err_ret;
@@ -157,7 +153,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
/* handle F0 separately */
err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
} else {
- sdio_claim_host(sdiodev->func[func]);
if (rw) /* CMD52 Write */
sdio_writeb(sdiodev->func[func], *byte, regaddr,
&err_ret);
@@ -168,7 +163,6 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
*byte = sdio_readb(sdiodev->func[func], regaddr,
&err_ret);
}
- sdio_release_host(sdiodev->func[func]);
}
if (err_ret)
@@ -195,8 +189,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Claim host controller */
- sdio_claim_host(sdiodev->func[func]);
if (rw) { /* CMD52 Write */
if (nbytes == 4)
@@ -217,9 +209,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
}
- /* Release host controller */
- sdio_release_host(sdiodev->func[func]);
-
if (err_ret)
brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n",
rw ? "write" : "read", err_ret);
@@ -273,9 +262,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Claim host controller */
- sdio_claim_host(sdiodev->func[func]);
-
skb_queue_walk(pktq, pkt) {
uint pkt_len = pkt->len;
pkt_len += 3;
@@ -298,9 +284,6 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
SGCount++;
}
- /* Release host controller */
- sdio_release_host(sdiodev->func[func]);
-
brcmf_dbg(TRACE, "Exit\n");
return err_ret;
}
@@ -326,9 +309,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Claim host controller */
- sdio_claim_host(sdiodev->func[func]);
-
pkt_len += 3;
pkt_len &= (uint)~3;
@@ -342,9 +322,6 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
write ? "TX" : "RX", pkt, addr, pkt_len);
}
- /* Release host controller */
- sdio_release_host(sdiodev->func[func]);
-
return status;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index a11fe54f5950..17e7ae73e008 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -27,6 +27,7 @@
* IO codes that are interpreted by dongle firmware
******************************************************************************/
#define BRCMF_C_UP 2
+#define BRCMF_C_DOWN 3
#define BRCMF_C_SET_PROMISC 10
#define BRCMF_C_GET_RATE 12
#define BRCMF_C_GET_INFRA 19
@@ -50,7 +51,10 @@
#define BRCMF_C_REASSOC 53
#define BRCMF_C_SET_ROAM_TRIGGER 55
#define BRCMF_C_SET_ROAM_DELTA 57
+#define BRCMF_C_GET_BCNPRD 75
+#define BRCMF_C_SET_BCNPRD 76
#define BRCMF_C_GET_DTIMPRD 77
+#define BRCMF_C_SET_DTIMPRD 78
#define BRCMF_C_SET_COUNTRY 84
#define BRCMF_C_GET_PM 85
#define BRCMF_C_SET_PM 86
@@ -130,6 +134,13 @@
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
#define BRCMF_EVENT_MSG_GROUP 0x04
+#define BRCMF_ESCAN_REQ_VERSION 1
+
+#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
+
+#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
+#define BRCMF_STA_ASSOC 0x10 /* Associated */
+
struct brcmf_event_msg {
__be16 version;
__be16 flags;
@@ -140,6 +151,8 @@ struct brcmf_event_msg {
__be32 datalen;
u8 addr[ETH_ALEN];
char ifname[IFNAMSIZ];
+ u8 ifidx;
+ u8 bsscfgidx;
} __packed;
struct brcm_ethhdr {
@@ -454,6 +467,24 @@ struct brcmf_scan_results_le {
__le32 count;
};
+struct brcmf_escan_params_le {
+ __le32 version;
+ __le16 action;
+ __le16 sync_id;
+ struct brcmf_scan_params_le params_le;
+};
+
+struct brcmf_escan_result_le {
+ __le32 buflen;
+ __le32 version;
+ __le16 sync_id;
+ __le16 bss_count;
+ struct brcmf_bss_info_le bss_info_le;
+};
+
+#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct brcmf_escan_result_le) - \
+ sizeof(struct brcmf_bss_info_le))
+
/* used for association with a specific BSSID and chanspec list */
struct brcmf_assoc_params_le {
/* 00:00:00:00:00:00: broadcast scan */
@@ -542,6 +573,28 @@ struct brcmf_channel_info_le {
__le32 scan_channel;
};
+struct brcmf_sta_info_le {
+ __le16 ver; /* version of this struct */
+ __le16 len; /* length in bytes of this structure */
+ __le16 cap; /* sta's advertised capabilities */
+ __le32 flags; /* flags defined below */
+ __le32 idle; /* time since data pkt rx'd from sta */
+ u8 ea[ETH_ALEN]; /* Station address */
+ __le32 count; /* # rates in this set */
+ u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units */
+ /* w/hi bit set if basic */
+ __le32 in; /* seconds elapsed since associated */
+ __le32 listen_interval_inms; /* Min Listen interval in ms for STA */
+ __le32 tx_pkts; /* # of packets transmitted */
+ __le32 tx_failures; /* # of packets failed */
+ __le32 rx_ucast_pkts; /* # of unicast packets received */
+ __le32 rx_mcast_pkts; /* # of multicast packets received */
+ __le32 tx_rate; /* Rate of last successful tx frame */
+ __le32 rx_rate; /* Rate of last successful rx frame */
+ __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
+ __le32 rx_decrypt_failures; /* # of packet decrypted failed */
+};
+
/* Bus independent dongle command */
struct brcmf_dcmd {
uint cmd; /* common dongle cmd definition */
@@ -561,7 +614,7 @@ struct brcmf_pub {
/* Linkage ponters */
struct brcmf_bus *bus_if;
struct brcmf_proto *prot;
- struct brcmf_cfg80211_dev *config;
+ struct brcmf_cfg80211_info *config;
struct device *dev; /* fullmac dongle device pointer */
/* Internal brcmf items */
@@ -634,10 +687,13 @@ extern const struct bcmevent_name bcmevent_names[];
extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
char *buf, uint len);
+extern uint brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
+ char *buf, uint buflen, s32 bssidx);
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
+extern int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd);
/* Return pointer to interface name */
extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@@ -657,10 +713,6 @@ extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
-/* Send packet to dongle via data channel */
-extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
- struct sk_buff *pkt);
-
extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
int enable, int master_mode);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 537f499cc5d2..9b8ee19ea55d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -103,7 +103,7 @@ extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
extern void brcmf_detach(struct device *dev);
/* Indication from bus module to change flow-control state */
-extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on);
+extern void brcmf_txflowblock(struct device *dev, bool state);
/* Notify tx completion */
extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 6f70953f0bad..15c5db5752d1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -80,12 +80,60 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
strncpy(buf, name, buflen);
/* append data onto the end of the name string */
- memcpy(&buf[len], data, datalen);
- len += datalen;
+ if (data && datalen) {
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
+ }
return len;
}
+uint
+brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
+ char *buf, uint buflen, s32 bssidx)
+{
+ const s8 *prefix = "bsscfg:";
+ s8 *p;
+ u32 prefixlen;
+ u32 namelen;
+ u32 iolen;
+ __le32 bssidx_le;
+
+ if (bssidx == 0)
+ return brcmf_c_mkiovar(name, data, datalen, buf, buflen);
+
+ prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
+ namelen = (u32) strlen(name) + 1; /* lengh of iovar name + null */
+ iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
+
+ if (buflen < 0 || iolen > (u32)buflen) {
+ brcmf_dbg(ERROR, "buffer is too short\n");
+ return 0;
+ }
+
+ p = buf;
+
+ /* copy prefix, no null */
+ memcpy(p, prefix, prefixlen);
+ p += prefixlen;
+
+ /* copy iovar name including null */
+ memcpy(p, name, namelen);
+ p += namelen;
+
+ /* bss config index as first data */
+ bssidx_le = cpu_to_le32(bssidx);
+ memcpy(p, &bssidx_le, sizeof(bssidx_le));
+ p += sizeof(bssidx_le);
+
+ /* parameter buffer follows */
+ if (datalen)
+ memcpy(p, data, datalen);
+
+ return iolen;
+
+}
+
bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
{
@@ -205,7 +253,8 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
BRCMF_E_IF, "IF"}, {
BRCMF_E_RSSI, "RSSI"}, {
- BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}
+ BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
+ BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"}
};
uint event_type, flags, auth_type, datalen;
static u32 seqnum_prev;
@@ -350,6 +399,11 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
break;
+ case BRCMF_E_ESCAN_RESULT:
+ brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
+ datalen = 0;
+ break;
+
case BRCMF_E_PFN_NET_FOUND:
case BRCMF_E_PFN_NET_LOST:
case BRCMF_E_PFN_SCAN_COMPLETE:
@@ -425,13 +479,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
}
/* show any appended data */
- if (datalen) {
- buf = (unsigned char *) event_data;
- brcmf_dbg(EVENT, " data (%d) : ", datalen);
- for (i = 0; i < datalen; i++)
- brcmf_dbg(EVENT, " 0x%02x ", *buf++);
- brcmf_dbg(EVENT, "\n");
- }
+ brcmf_dbg_hex_dump(datalen, event_data, datalen, "Received data");
}
#endif /* DEBUG */
@@ -522,8 +570,9 @@ brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
}
#ifdef DEBUG
- brcmf_c_show_host_event(event, event_data);
-#endif /* DEBUG */
+ if (BRCMF_EVENT_ON())
+ brcmf_c_show_host_event(event, event_data);
+#endif /* DEBUG */
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index b784920532d3..fb508c2256dd 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -55,6 +55,7 @@ do { \
#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL)
#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
+#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
#else /* (defined DEBUG) || (defined DEBUG) */
@@ -65,6 +66,7 @@ do { \
#define BRCMF_HDRS_ON() 0
#define BRCMF_BYTES_ON() 0
#define BRCMF_GLOM_ON() 0
+#define BRCMF_EVENT_ON() 0
#endif /* defined(DEBUG) */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 9ab24528f9b9..d7c76ce9d8cb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -272,30 +272,6 @@ static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
schedule_work(&drvr->multicast_work);
}
-int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
-{
- /* Reject if down */
- if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
- return -ENODEV;
-
- /* Update multicast statistic */
- if (pktbuf->len >= ETH_ALEN) {
- u8 *pktdata = (u8 *) (pktbuf->data);
- struct ethhdr *eh = (struct ethhdr *)pktdata;
-
- if (is_multicast_ether_addr(eh->h_dest))
- drvr->tx_multicast++;
- if (ntohs(eh->h_proto) == ETH_P_PAE)
- atomic_inc(&drvr->pend_8021x_cnt);
- }
-
- /* If the protocol uses a data header, apply it */
- brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
-
- /* Use bus module to send data frame */
- return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf);
-}
-
static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int ret;
@@ -338,7 +314,22 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
- ret = brcmf_sendpkt(drvr, ifp->idx, skb);
+ /* Update multicast statistic */
+ if (skb->len >= ETH_ALEN) {
+ u8 *pktdata = (u8 *)(skb->data);
+ struct ethhdr *eh = (struct ethhdr *)pktdata;
+
+ if (is_multicast_ether_addr(eh->h_dest))
+ drvr->tx_multicast++;
+ if (ntohs(eh->h_proto) == ETH_P_PAE)
+ atomic_inc(&drvr->pend_8021x_cnt);
+ }
+
+ /* If the protocol uses a data header, apply it */
+ brcmf_proto_hdrpush(drvr, ifp->idx, skb);
+
+ /* Use bus module to send data frame */
+ ret = drvr->bus_if->brcmf_bus_txdata(drvr->dev, skb);
done:
if (ret)
@@ -350,19 +341,23 @@ done:
return 0;
}
-void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state)
+void brcmf_txflowblock(struct device *dev, bool state)
{
struct net_device *ndev;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ int i;
brcmf_dbg(TRACE, "Enter\n");
- ndev = drvr->iflist[ifidx]->ndev;
- if (state == ON)
- netif_stop_queue(ndev);
- else
- netif_wake_queue(ndev);
+ for (i = 0; i < BRCMF_MAX_IFS; i++)
+ if (drvr->iflist[i]) {
+ ndev = drvr->iflist[i]->ndev;
+ if (state)
+ netif_stop_queue(ndev);
+ else
+ netif_wake_queue(ndev);
+ }
}
static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
@@ -775,6 +770,14 @@ done:
return err;
}
+int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd)
+{
+ brcmf_dbg(TRACE, "enter: cmd %x buf %p len %d\n",
+ dcmd->cmd, dcmd->buf, dcmd->len);
+
+ return brcmf_exec_dcmd(ndev, dcmd->cmd, dcmd->buf, dcmd->len);
+}
+
static int brcmf_netdev_stop(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 472f2ef5c652..3564686add9a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -482,6 +482,15 @@ struct sdpcm_shared_le {
__le32 brpt_addr;
};
+/* SDIO read frame info */
+struct brcmf_sdio_read {
+ u8 seq_num;
+ u8 channel;
+ u16 len;
+ u16 len_left;
+ u16 len_nxtfrm;
+ u8 dat_offset;
+};
/* misc chip info needed by some of the routines */
/* Private data for SDIO bus interaction */
@@ -494,9 +503,8 @@ struct brcmf_sdio {
u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
u32 hostintmask; /* Copy of Host Interrupt Mask */
- u32 intstatus; /* Intstatus bits (events) pending */
- bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
- bool fcstate; /* State of dongle flow-control */
+ atomic_t intstatus; /* Intstatus bits (events) pending */
+ atomic_t fcstate; /* State of dongle flow-control */
uint blocksize; /* Block size of SDIO transfers */
uint roundup; /* Max roundup limit */
@@ -508,9 +516,11 @@ struct brcmf_sdio {
u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
- u16 nextlen; /* Next Read Len from last header */
u8 rx_seq; /* Receive sequence number (expected) */
+ struct brcmf_sdio_read cur_read;
+ /* info of current read frame */
bool rxskip; /* Skip receive (awaiting NAK ACK) */
+ bool rxpending; /* Data frame pending in dongle */
uint rxbound; /* Rx frames to read before resched */
uint txbound; /* Tx frames to send before resched */
@@ -531,7 +541,7 @@ struct brcmf_sdio {
bool intr; /* Use interrupts */
bool poll; /* Use polling */
- bool ipend; /* Device interrupt is pending */
+ atomic_t ipend; /* Device interrupt is pending */
uint spurious; /* Count of spurious interrupts */
uint pollrate; /* Ticks between device polls */
uint polltick; /* Tick counter */
@@ -549,12 +559,9 @@ struct brcmf_sdio {
s32 idleclock; /* How to set bus driver when idle */
s32 sd_rxchain;
bool use_rxchain; /* If brcmf should use PKT chains */
- bool sleeping; /* Is SDIO bus sleeping? */
bool rxflow_mode; /* Rx flow control mode */
bool rxflow; /* Is rx flow control on */
bool alp_only; /* Don't use HT clock (ALP only) */
-/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
- bool usebufpool;
u8 *ctrl_frame_buf;
u32 ctrl_frame_len;
@@ -570,8 +577,8 @@ struct brcmf_sdio {
bool wd_timer_valid;
uint save_ms;
- struct task_struct *dpc_tsk;
- struct completion dpc_wait;
+ struct workqueue_struct *brcmf_wq;
+ struct work_struct datawork;
struct list_head dpc_tsklst;
spinlock_t dpc_tl_lock;
@@ -657,15 +664,6 @@ w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
-/* Packet free applicable unconditionally for sdio and sdspi.
- * Conditional if bufpool was present for gspi bus.
- */
-static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
-{
- if (bus->usebufpool)
- brcmu_pkt_buf_free_skb(pkt);
-}
-
/* Turn backplane clock on or off */
static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
{
@@ -853,81 +851,6 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
return 0;
}
-static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
-{
- int ret;
-
- brcmf_dbg(INFO, "request %s (currently %s)\n",
- sleep ? "SLEEP" : "WAKE",
- bus->sleeping ? "SLEEP" : "WAKE");
-
- /* Done if we're already in the requested state */
- if (sleep == bus->sleeping)
- return 0;
-
- /* Going to sleep: set the alarm and turn off the lights... */
- if (sleep) {
- /* Don't sleep if something is pending */
- if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
- return -EBUSY;
-
- /* Make sure the controller has the bus up */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- /* Tell device to start using OOB wakeup */
- ret = w_sdreg32(bus, SMB_USE_OOB,
- offsetof(struct sdpcmd_regs, tosbmailbox));
- if (ret != 0)
- brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n");
-
- /* Turn off our contribution to the HT clock request */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
-
- /* Isolate the bus */
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
- SBSDIO_DEVCTL_PADS_ISO, NULL);
-
- /* Change state */
- bus->sleeping = true;
-
- } else {
- /* Waking up: bus power up is ok, set local state */
-
- brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- 0, NULL);
-
- /* Make sure the controller has the bus up */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- /* Send misc interrupt to indicate OOB not needed */
- ret = w_sdreg32(bus, 0,
- offsetof(struct sdpcmd_regs, tosbmailboxdata));
- if (ret == 0)
- ret = w_sdreg32(bus, SMB_DEV_INT,
- offsetof(struct sdpcmd_regs, tosbmailbox));
-
- if (ret != 0)
- brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP TO CLEAR OOB!!\n");
-
- /* Make sure we have SD bus access */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
- /* Change state */
- bus->sleeping = false;
- }
-
- return 0;
-}
-
-static void bus_wake(struct brcmf_sdio *bus)
-{
- if (bus->sleeping)
- brcmf_sdbrcm_bussleep(bus, false);
-}
-
static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
@@ -1056,7 +979,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
}
/* Clear partial in any case */
- bus->nextlen = 0;
+ bus->cur_read.len = 0;
/* If we can't reach the device, signal failure */
if (err)
@@ -1108,6 +1031,96 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
}
}
+static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
+ struct brcmf_sdio_read *rd)
+{
+ u16 len, checksum;
+ u8 rx_seq, fc, tx_seq_max;
+
+ /*
+ * 4 bytes hardware header (frame tag)
+ * Byte 0~1: Frame length
+ * Byte 2~3: Checksum, bit-wise inverse of frame length
+ */
+ len = get_unaligned_le16(header);
+ checksum = get_unaligned_le16(header + sizeof(u16));
+ /* All zero means no more to read */
+ if (!(len | checksum)) {
+ bus->rxpending = false;
+ return false;
+ }
+ if ((u16)(~(len ^ checksum))) {
+ brcmf_dbg(ERROR, "HW header checksum error\n");
+ bus->sdcnt.rx_badhdr++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ return false;
+ }
+ if (len < SDPCM_HDRLEN) {
+ brcmf_dbg(ERROR, "HW header length error\n");
+ return false;
+ }
+ rd->len = len;
+
+ /*
+ * 8 bytes hardware header
+ * Byte 0: Rx sequence number
+ * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
+ * Byte 2: Length of next data frame
+ * Byte 3: Data offset
+ * Byte 4: Flow control bits
+ * Byte 5: Maximum Sequence number allow for Tx
+ * Byte 6~7: Reserved
+ */
+ rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
+ rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
+ if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL) {
+ brcmf_dbg(ERROR, "HW header length too long\n");
+ bus->sdiodev->bus_if->dstats.rx_errors++;
+ bus->sdcnt.rx_toolong++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ rd->len = 0;
+ return false;
+ }
+ rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+ if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
+ brcmf_dbg(ERROR, "seq %d: bad data offset\n", rx_seq);
+ bus->sdcnt.rx_badhdr++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ rd->len = 0;
+ return false;
+ }
+ if (rd->seq_num != rx_seq) {
+ brcmf_dbg(ERROR, "seq %d: sequence number error, expect %d\n",
+ rx_seq, rd->seq_num);
+ bus->sdcnt.rx_badseq++;
+ rd->seq_num = rx_seq;
+ }
+ rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
+ /* only warm for NON glom packet */
+ if (rd->channel != SDPCM_GLOM_CHANNEL)
+ brcmf_dbg(ERROR, "seq %d: next length error\n", rx_seq);
+ rd->len_nxtfrm = 0;
+ }
+ fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+ if (bus->flowcontrol != fc) {
+ if (~bus->flowcontrol & fc)
+ bus->sdcnt.fc_xoff++;
+ if (bus->flowcontrol & ~fc)
+ bus->sdcnt.fc_xon++;
+ bus->sdcnt.fc_rcvd++;
+ bus->flowcontrol = fc;
+ }
+ tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+ if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
+ brcmf_dbg(ERROR, "seq %d: max tx seq number error\n", rx_seq);
+ tx_seq_max = bus->tx_seq + 2;
+ }
+ bus->tx_max = tx_seq_max;
+
+ return true;
+}
+
static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
@@ -1122,6 +1135,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
int ifidx = 0;
bool usechain = bus->use_rxchain;
+ u16 next_len;
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
@@ -1185,10 +1199,10 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (pnext) {
brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
totlen, num);
- if (BRCMF_GLOM_ON() && bus->nextlen &&
- totlen != bus->nextlen) {
+ if (BRCMF_GLOM_ON() && bus->cur_read.len &&
+ totlen != bus->cur_read.len) {
brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
- bus->nextlen, totlen, rxseq);
+ bus->cur_read.len, totlen, rxseq);
}
pfirst = pnext = NULL;
} else {
@@ -1199,7 +1213,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
/* Done with descriptor packet */
brcmu_pkt_buf_free_skb(bus->glomd);
bus->glomd = NULL;
- bus->nextlen = 0;
+ bus->cur_read.len = 0;
}
/* Ok -- either we just generated a packet chain,
@@ -1272,12 +1286,13 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
- bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
- if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ next_len = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((next_len << 4) > MAX_RX_DATASZ) {
brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
- bus->nextlen, seq);
- bus->nextlen = 0;
+ next_len, seq);
+ next_len = 0;
}
+ bus->cur_read.len = next_len << 4;
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -1378,7 +1393,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
bus->sdcnt.rxglomfail++;
brcmf_sdbrcm_free_glom(bus);
}
- bus->nextlen = 0;
+ bus->cur_read.len = 0;
return 0;
}
@@ -1573,422 +1588,166 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
}
}
-static void
-brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
- struct sk_buff **pkt, u8 **rxbuf)
+static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
{
- int sdret; /* Return code from calls */
-
- *pkt = brcmu_pkt_buf_get_skb(rdlen + BRCMF_SDALIGN);
- if (*pkt == NULL)
- return;
-
- pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
- *rxbuf = (u8 *) ((*pkt)->data);
- /* Read the entire frame */
- sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, *pkt);
- bus->sdcnt.f2rxdata++;
-
- if (sdret < 0) {
- brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
- rdlen, sdret);
- brcmu_pkt_buf_free_skb(*pkt);
- bus->sdiodev->bus_if->dstats.rx_errors++;
- /* Force retry w/normal header read.
- * Don't attempt NAK for
- * gSPI
- */
- brcmf_sdbrcm_rxfail(bus, true, true);
- *pkt = NULL;
- }
-}
-
-/* Checks the header */
-static int
-brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
- u8 rxseq, u16 nextlen, u16 *len)
-{
- u16 check;
- bool len_consistent; /* Result of comparing readahead len and
- len from hw-hdr */
-
- memcpy(bus->rxhdr, rxbuf, SDPCM_HDRLEN);
-
- /* Extract hardware header fields */
- *len = get_unaligned_le16(bus->rxhdr);
- check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
-
- /* All zeros means readahead info was bad */
- if (!(*len | check)) {
- brcmf_dbg(INFO, "(nextlen): read zeros in HW header???\n");
- goto fail;
- }
-
- /* Validate check bytes */
- if ((u16)~(*len ^ check)) {
- brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
- nextlen, *len, check);
- bus->sdcnt.rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- goto fail;
- }
-
- /* Validate frame length */
- if (*len < SDPCM_HDRLEN) {
- brcmf_dbg(ERROR, "(nextlen): HW hdr length invalid: %d\n",
- *len);
- goto fail;
- }
-
- /* Check for consistency with readahead info */
- len_consistent = (nextlen != (roundup(*len, 16) >> 4));
- if (len_consistent) {
- /* Mismatch, force retry w/normal
- header (may be >4K) */
- brcmf_dbg(ERROR, "(nextlen): mismatch, nextlen %d len %d rnd %d; expected rxseq %d\n",
- nextlen, *len, roundup(*len, 16),
- rxseq);
- brcmf_sdbrcm_rxfail(bus, true, true);
- goto fail;
- }
-
- return 0;
-
-fail:
- brcmf_sdbrcm_pktfree2(bus, pkt);
- return -EINVAL;
-}
-
-/* Return true if there may be more frames to read */
-static uint
-brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
-{
- u16 len, check; /* Extracted hardware header fields */
- u8 chan, seq, doff; /* Extracted software header fields */
- u8 fcbits; /* Extracted fcbits from software header */
-
struct sk_buff *pkt; /* Packet for event or data frames */
u16 pad; /* Number of pad bytes to read */
- u16 rdlen; /* Total number of bytes to read */
- u8 rxseq; /* Next sequence number to expect */
uint rxleft = 0; /* Remaining number of frames allowed */
int sdret; /* Return code from calls */
- u8 txmax; /* Maximum tx sequence offered */
- u8 *rxbuf;
int ifidx = 0;
uint rxcount = 0; /* Total frames read */
+ struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
+ u8 head_read = 0;
brcmf_dbg(TRACE, "Enter\n");
/* Not finished unless we encounter no more frames indication */
- *finished = false;
+ bus->rxpending = true;
- for (rxseq = bus->rx_seq, rxleft = maxframes;
+ for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
!bus->rxskip && rxleft &&
bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
- rxseq++, rxleft--) {
+ rd->seq_num++, rxleft--) {
/* Handle glomming separately */
if (bus->glomd || !skb_queue_empty(&bus->glom)) {
u8 cnt;
brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
bus->glomd, skb_peek(&bus->glom));
- cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
+ cnt = brcmf_sdbrcm_rxglom(bus, rd->seq_num);
brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
- rxseq += cnt - 1;
+ rd->seq_num += cnt - 1;
rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
continue;
}
- /* Try doing single read if we can */
- if (bus->nextlen) {
- u16 nextlen = bus->nextlen;
- bus->nextlen = 0;
-
- rdlen = len = nextlen << 4;
- brcmf_pad(bus, &pad, &rdlen);
-
- /*
- * After the frame is received we have to
- * distinguish whether it is data
- * or non-data frame.
- */
- brcmf_alloc_pkt_and_read(bus, rdlen, &pkt, &rxbuf);
- if (pkt == NULL) {
- /* Give up on data, request rtx of events */
- brcmf_dbg(ERROR, "(nextlen): brcmf_alloc_pkt_and_read failed: len %d rdlen %d expected rxseq %d\n",
- len, rdlen, rxseq);
- continue;
- }
-
- if (brcmf_check_rxbuf(bus, pkt, rxbuf, rxseq, nextlen,
- &len) < 0)
+ rd->len_left = rd->len;
+ /* read header first for unknow frame length */
+ if (!rd->len) {
+ sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
+ bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC,
+ bus->rxhdr,
+ BRCMF_FIRSTREAD);
+ bus->sdcnt.f2rxhdrs++;
+ if (sdret < 0) {
+ brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n",
+ sdret);
+ bus->sdcnt.rx_hdrfail++;
+ brcmf_sdbrcm_rxfail(bus, true, true);
continue;
-
- /* Extract software header fields */
- chan = SDPCM_PACKET_CHANNEL(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- doff = SDPCM_DOFFSET_VALUE(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- txmax = SDPCM_WINDOW_VALUE(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
- bus->nextlen =
- bus->rxhdr[SDPCM_FRAMETAG_LEN +
- SDPCM_NEXTLEN_OFFSET];
- if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
- brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
- bus->nextlen, seq);
- bus->nextlen = 0;
}
- bus->sdcnt.rx_readahead_cnt++;
-
- /* Handle Flow Control */
- fcbits = SDPCM_FCMASK_VALUE(
- &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
- if (bus->flowcontrol != fcbits) {
- if (~bus->flowcontrol & fcbits)
- bus->sdcnt.fc_xoff++;
-
- if (bus->flowcontrol & ~fcbits)
- bus->sdcnt.fc_xon++;
-
- bus->sdcnt.fc_rcvd++;
- bus->flowcontrol = fcbits;
- }
-
- /* Check and update sequence number */
- if (rxseq != seq) {
- brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
- seq, rxseq);
- bus->sdcnt.rx_badseq++;
- rxseq = seq;
- }
-
- /* Check window for sanity */
- if ((u8) (txmax - bus->tx_seq) > 0x40) {
- brcmf_dbg(ERROR, "got unlikely tx max %d with tx_seq %d\n",
- txmax, bus->tx_seq);
- txmax = bus->tx_seq + 2;
- }
- bus->tx_max = txmax;
-
- brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
- rxbuf, len, "Rx Data:\n");
- brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
- BRCMF_DATA_ON()) &&
- BRCMF_HDRS_ON(),
+ brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
bus->rxhdr, SDPCM_HDRLEN,
"RxHdr:\n");
- if (chan == SDPCM_CONTROL_CHANNEL) {
- brcmf_dbg(ERROR, "(nextlen): readahead on control packet %d?\n",
- seq);
- /* Force retry w/normal header read */
- bus->nextlen = 0;
- brcmf_sdbrcm_rxfail(bus, false, true);
- brcmf_sdbrcm_pktfree2(bus, pkt);
- continue;
+ if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd)) {
+ if (!bus->rxpending)
+ break;
+ else
+ continue;
}
- /* Validate data offset */
- if ((doff < SDPCM_HDRLEN) || (doff > len)) {
- brcmf_dbg(ERROR, "(nextlen): bad data offset %d: HW len %d min %d\n",
- doff, len, SDPCM_HDRLEN);
- brcmf_sdbrcm_rxfail(bus, false, false);
- brcmf_sdbrcm_pktfree2(bus, pkt);
+ if (rd->channel == SDPCM_CONTROL_CHANNEL) {
+ brcmf_sdbrcm_read_control(bus, bus->rxhdr,
+ rd->len,
+ rd->dat_offset);
+ /* prepare the descriptor for the next read */
+ rd->len = rd->len_nxtfrm << 4;
+ rd->len_nxtfrm = 0;
+ /* treat all packet as event if we don't know */
+ rd->channel = SDPCM_EVENT_CHANNEL;
continue;
}
-
- /* All done with this one -- now deliver the packet */
- goto deliver;
- }
-
- /* Read frame header (hardware and software) */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, bus->rxhdr,
- BRCMF_FIRSTREAD);
- bus->sdcnt.f2rxhdrs++;
-
- if (sdret < 0) {
- brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
- bus->sdcnt.rx_hdrfail++;
- brcmf_sdbrcm_rxfail(bus, true, true);
- continue;
- }
- brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
- bus->rxhdr, SDPCM_HDRLEN, "RxHdr:\n");
-
-
- /* Extract hardware header fields */
- len = get_unaligned_le16(bus->rxhdr);
- check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
-
- /* All zeros means no more frames */
- if (!(len | check)) {
- *finished = true;
- break;
- }
-
- /* Validate check bytes */
- if ((u16) ~(len ^ check)) {
- brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
- len, check);
- bus->sdcnt.rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- continue;
- }
-
- /* Validate frame length */
- if (len < SDPCM_HDRLEN) {
- brcmf_dbg(ERROR, "HW hdr length invalid: %d\n", len);
- continue;
- }
-
- /* Extract software header fields */
- chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
- txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
- /* Validate data offset */
- if ((doff < SDPCM_HDRLEN) || (doff > len)) {
- brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
- doff, len, SDPCM_HDRLEN, seq);
- bus->sdcnt.rx_badhdr++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- continue;
- }
-
- /* Save the readahead length if there is one */
- bus->nextlen =
- bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
- if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
- brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
- bus->nextlen, seq);
- bus->nextlen = 0;
- }
-
- /* Handle Flow Control */
- fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-
- if (bus->flowcontrol != fcbits) {
- if (~bus->flowcontrol & fcbits)
- bus->sdcnt.fc_xoff++;
-
- if (bus->flowcontrol & ~fcbits)
- bus->sdcnt.fc_xon++;
-
- bus->sdcnt.fc_rcvd++;
- bus->flowcontrol = fcbits;
- }
-
- /* Check and update sequence number */
- if (rxseq != seq) {
- brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
- bus->sdcnt.rx_badseq++;
- rxseq = seq;
- }
-
- /* Check window for sanity */
- if ((u8) (txmax - bus->tx_seq) > 0x40) {
- brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
- txmax, bus->tx_seq);
- txmax = bus->tx_seq + 2;
- }
- bus->tx_max = txmax;
-
- /* Call a separate function for control frames */
- if (chan == SDPCM_CONTROL_CHANNEL) {
- brcmf_sdbrcm_read_control(bus, bus->rxhdr, len, doff);
- continue;
- }
-
- /* precondition: chan is either SDPCM_DATA_CHANNEL,
- SDPCM_EVENT_CHANNEL, SDPCM_TEST_CHANNEL or
- SDPCM_GLOM_CHANNEL */
-
- /* Length to read */
- rdlen = (len > BRCMF_FIRSTREAD) ? (len - BRCMF_FIRSTREAD) : 0;
-
- /* May pad read to blocksize for efficiency */
- if (bus->roundup && bus->blocksize &&
- (rdlen > bus->blocksize)) {
- pad = bus->blocksize - (rdlen % bus->blocksize);
- if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
- ((rdlen + pad + BRCMF_FIRSTREAD) < MAX_RX_DATASZ))
- rdlen += pad;
- } else if (rdlen % BRCMF_SDALIGN) {
- rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+ rd->len_left = rd->len > BRCMF_FIRSTREAD ?
+ rd->len - BRCMF_FIRSTREAD : 0;
+ head_read = BRCMF_FIRSTREAD;
}
- /* Satisfy length-alignment requirements */
- if (rdlen & (ALIGNMENT - 1))
- rdlen = roundup(rdlen, ALIGNMENT);
-
- if ((rdlen + BRCMF_FIRSTREAD) > MAX_RX_DATASZ) {
- /* Too long -- skip this frame */
- brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
- len, rdlen);
- bus->sdiodev->bus_if->dstats.rx_errors++;
- bus->sdcnt.rx_toolong++;
- brcmf_sdbrcm_rxfail(bus, false, false);
- continue;
- }
+ brcmf_pad(bus, &pad, &rd->len_left);
- pkt = brcmu_pkt_buf_get_skb(rdlen +
- BRCMF_FIRSTREAD + BRCMF_SDALIGN);
+ pkt = brcmu_pkt_buf_get_skb(rd->len_left + head_read +
+ BRCMF_SDALIGN);
if (!pkt) {
/* Give up on data, request rtx of events */
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n",
- rdlen, chan);
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed\n");
bus->sdiodev->bus_if->dstats.rx_dropped++;
- brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
+ brcmf_sdbrcm_rxfail(bus, false,
+ RETRYCHAN(rd->channel));
continue;
}
+ skb_pull(pkt, head_read);
+ pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
- /* Leave room for what we already read, and align remainder */
- skb_pull(pkt, BRCMF_FIRSTREAD);
- pkt_align(pkt, rdlen, BRCMF_SDALIGN);
-
- /* Read the remaining frame data */
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
bus->sdcnt.f2rxdata++;
if (sdret < 0) {
- brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
- ((chan == SDPCM_EVENT_CHANNEL) ? "event"
- : ((chan == SDPCM_DATA_CHANNEL) ? "data"
- : "test")), sdret);
+ brcmf_dbg(ERROR, "read %d bytes from channel %d failed: %d\n",
+ rd->len, rd->channel, sdret);
brcmu_pkt_buf_free_skb(pkt);
bus->sdiodev->bus_if->dstats.rx_errors++;
- brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
+ brcmf_sdbrcm_rxfail(bus, true,
+ RETRYCHAN(rd->channel));
continue;
}
- /* Copy the already-read portion */
- skb_push(pkt, BRCMF_FIRSTREAD);
- memcpy(pkt->data, bus->rxhdr, BRCMF_FIRSTREAD);
+ if (head_read) {
+ skb_push(pkt, head_read);
+ memcpy(pkt->data, bus->rxhdr, head_read);
+ head_read = 0;
+ } else {
+ memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
+ rd_new.seq_num = rd->seq_num;
+ if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new)) {
+ rd->len = 0;
+ brcmu_pkt_buf_free_skb(pkt);
+ }
+ bus->sdcnt.rx_readahead_cnt++;
+ if (rd->len != roundup(rd_new.len, 16)) {
+ brcmf_dbg(ERROR, "frame length mismatch:read %d, should be %d\n",
+ rd->len,
+ roundup(rd_new.len, 16) >> 4);
+ rd->len = 0;
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ brcmu_pkt_buf_free_skb(pkt);
+ continue;
+ }
+ rd->len_nxtfrm = rd_new.len_nxtfrm;
+ rd->channel = rd_new.channel;
+ rd->dat_offset = rd_new.dat_offset;
+
+ brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
+ BRCMF_DATA_ON()) &&
+ BRCMF_HDRS_ON(),
+ bus->rxhdr, SDPCM_HDRLEN,
+ "RxHdr:\n");
+
+ if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
+ brcmf_dbg(ERROR, "readahead on control packet %d?\n",
+ rd_new.seq_num);
+ /* Force retry w/normal header read */
+ rd->len = 0;
+ brcmf_sdbrcm_rxfail(bus, false, true);
+ brcmu_pkt_buf_free_skb(pkt);
+ continue;
+ }
+ }
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
- pkt->data, len, "Rx Data:\n");
+ pkt->data, rd->len, "Rx Data:\n");
-deliver:
/* Save superframe descriptor and allocate packet frame */
- if (chan == SDPCM_GLOM_CHANNEL) {
+ if (rd->channel == SDPCM_GLOM_CHANNEL) {
if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
- len);
+ rd->len);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
- pkt->data, len,
+ pkt->data, rd->len,
"Glom Data:\n");
- __skb_trim(pkt, len);
+ __skb_trim(pkt, rd->len);
skb_pull(pkt, SDPCM_HDRLEN);
bus->glomd = pkt;
} else {
@@ -1996,12 +1755,23 @@ deliver:
"descriptor!\n", __func__);
brcmf_sdbrcm_rxfail(bus, false, false);
}
+ /* prepare the descriptor for the next read */
+ rd->len = rd->len_nxtfrm << 4;
+ rd->len_nxtfrm = 0;
+ /* treat all packet as event if we don't know */
+ rd->channel = SDPCM_EVENT_CHANNEL;
continue;
}
/* Fill in packet len and prio, deliver upward */
- __skb_trim(pkt, len);
- skb_pull(pkt, doff);
+ __skb_trim(pkt, rd->len);
+ skb_pull(pkt, rd->dat_offset);
+
+ /* prepare the descriptor for the next read */
+ rd->len = rd->len_nxtfrm << 4;
+ rd->len_nxtfrm = 0;
+ /* treat all packet as event if we don't know */
+ rd->channel = SDPCM_EVENT_CHANNEL;
if (pkt->len == 0) {
brcmu_pkt_buf_free_skb(pkt);
@@ -2019,17 +1789,17 @@ deliver:
brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
down(&bus->sdsem);
}
+
rxcount = maxframes - rxleft;
/* Message if we hit the limit */
if (!rxleft)
- brcmf_dbg(DATA, "hit rx limit of %d frames\n",
- maxframes);
+ brcmf_dbg(DATA, "hit rx limit of %d frames\n", maxframes);
else
brcmf_dbg(DATA, "processed %d frames\n", rxcount);
/* Back off rxseq if awaiting rtx, update rx_seq */
if (bus->rxskip)
- rxseq--;
- bus->rx_seq = rxseq;
+ rd->seq_num--;
+ bus->rx_seq = rd->seq_num;
return rxcount;
}
@@ -2227,7 +1997,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
if (ret != 0)
break;
if (intstatus & bus->hostintmask)
- bus->ipend = true;
+ atomic_set(&bus->ipend, 1);
}
}
@@ -2235,8 +2005,8 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
if (bus->sdiodev->bus_if->drvr_up &&
(bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
- bus->txoff = OFF;
- brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF);
+ bus->txoff = false;
+ brcmf_txflowblock(bus->sdiodev->dev, false);
}
return cnt;
@@ -2259,16 +2029,8 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
bus->watchdog_tsk = NULL;
}
- if (bus->dpc_tsk && bus->dpc_tsk != current) {
- send_sig(SIGTERM, bus->dpc_tsk, 1);
- kthread_stop(bus->dpc_tsk);
- bus->dpc_tsk = NULL;
- }
-
down(&bus->sdsem);
- bus_wake(bus);
-
/* Enable clock for device interrupts */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -2327,7 +2089,7 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
unsigned long flags;
spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
- if (!bus->sdiodev->irq_en && !bus->ipend) {
+ if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
enable_irq(bus->sdiodev->irq);
bus->sdiodev->irq_en = true;
}
@@ -2339,21 +2101,69 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
}
#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
-static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
{
- u32 intstatus, newstatus = 0;
+ struct list_head *new_hd;
+ unsigned long flags;
+
+ if (in_interrupt())
+ new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
+ else
+ new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (new_hd == NULL)
+ return;
+
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+ list_add_tail(new_hd, &bus->dpc_tsklst);
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+}
+
+static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
+{
+ u8 idx;
+ u32 addr;
+ unsigned long val;
+ int n, ret;
+
+ idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ addr = bus->ci->c_inf[idx].base +
+ offsetof(struct sdpcmd_regs, intstatus);
+
+ ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, false);
+ bus->sdcnt.f1regdata++;
+ if (ret != 0)
+ val = 0;
+
+ val &= bus->hostintmask;
+ atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
+
+ /* Clear interrupts */
+ if (val) {
+ ret = brcmf_sdio_regrw_helper(bus->sdiodev, addr, &val, true);
+ bus->sdcnt.f1regdata++;
+ }
+
+ if (ret) {
+ atomic_set(&bus->intstatus, 0);
+ } else if (val) {
+ for_each_set_bit(n, &val, 32)
+ set_bit(n, (unsigned long *)&bus->intstatus.counter);
+ }
+
+ return ret;
+}
+
+static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
+{
+ u32 newstatus = 0;
+ unsigned long intstatus;
uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
uint txlimit = bus->txbound; /* Tx frames to send before resched */
uint framecnt = 0; /* Temporary counter of tx/rx frames */
- bool rxdone = true; /* Flag for no more read data */
- bool resched = false; /* Flag indicating resched wanted */
- int err;
+ int err = 0, n;
brcmf_dbg(TRACE, "Enter\n");
- /* Start with leftover status bits */
- intstatus = bus->intstatus;
-
down(&bus->sdsem);
/* If waiting for HTAVAIL, check status */
@@ -2399,39 +2209,22 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
- } else {
- goto clkwait;
}
}
- bus_wake(bus);
-
/* Make sure backplane clock is on */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
- if (bus->clkstate == CLK_PENDING)
- goto clkwait;
/* Pending interrupt indicates new device status */
- if (bus->ipend) {
- bus->ipend = false;
- err = r_sdreg32(bus, &newstatus,
- offsetof(struct sdpcmd_regs, intstatus));
- bus->sdcnt.f1regdata++;
- if (err != 0)
- newstatus = 0;
- newstatus &= bus->hostintmask;
- bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
- if (newstatus) {
- err = w_sdreg32(bus, newstatus,
- offsetof(struct sdpcmd_regs,
- intstatus));
- bus->sdcnt.f1regdata++;
- }
+ if (atomic_read(&bus->ipend) > 0) {
+ atomic_set(&bus->ipend, 0);
+ sdio_claim_host(bus->sdiodev->func[1]);
+ err = brcmf_sdio_intr_rstatus(bus);
+ sdio_release_host(bus->sdiodev->func[1]);
}
- /* Merge new bits with previous */
- intstatus |= newstatus;
- bus->intstatus = 0;
+ /* Start with leftover status bits */
+ intstatus = atomic_xchg(&bus->intstatus, 0);
/* Handle flow-control change: read new state in case our ack
* crossed another change interrupt. If change still set, assume
@@ -2445,8 +2238,8 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
err = r_sdreg32(bus, &newstatus,
offsetof(struct sdpcmd_regs, intstatus));
bus->sdcnt.f1regdata += 2;
- bus->fcstate =
- !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+ atomic_set(&bus->fcstate,
+ !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
intstatus |= (newstatus & bus->hostintmask);
}
@@ -2483,32 +2276,34 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
intstatus &= ~I_HMB_FRAME_IND;
/* On frame indication, read available frames */
- if (PKT_AVAILABLE()) {
- framecnt = brcmf_sdbrcm_readframes(bus, rxlimit, &rxdone);
- if (rxdone || bus->rxskip)
+ if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
+ framecnt = brcmf_sdio_readframes(bus, rxlimit);
+ if (!bus->rxpending)
intstatus &= ~I_HMB_FRAME_IND;
rxlimit -= min(framecnt, rxlimit);
}
/* Keep still-pending events for next scheduling */
- bus->intstatus = intstatus;
+ if (intstatus) {
+ for_each_set_bit(n, &intstatus, 32)
+ set_bit(n, (unsigned long *)&bus->intstatus.counter);
+ }
-clkwait:
brcmf_sdbrcm_clrintr(bus);
if (data_ok(bus) && bus->ctrl_frame_stat &&
(bus->clkstate == CLK_AVAIL)) {
- int ret, i;
+ int i;
- ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
(u32) bus->ctrl_frame_len);
- if (ret < 0) {
+ if (err < 0) {
/* On failure, abort the command and
terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
- ret);
+ err);
bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
@@ -2530,42 +2325,34 @@ clkwait:
break;
}
- }
- if (ret == 0)
+ } else {
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
-
- brcmf_dbg(INFO, "Return_dpc value is : %d\n", ret);
+ }
bus->ctrl_frame_stat = false;
brcmf_sdbrcm_wait_event_wakeup(bus);
}
/* Send queued frames (limit 1 if rx may still be pending) */
- else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
&& data_ok(bus)) {
- framecnt = rxdone ? txlimit : min(txlimit, bus->txminmax);
+ framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
+ txlimit;
framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
txlimit -= framecnt;
}
- /* Resched if events or tx frames are pending,
- else await next interrupt */
- /* On failed register access, all bets are off:
- no resched or interrupts */
if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n");
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- bus->intstatus = 0;
- } else if (bus->clkstate == CLK_PENDING) {
- brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
- resched = true;
- } else if (bus->intstatus || bus->ipend ||
- (!bus->fcstate && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)
- && data_ok(bus)) || PKT_AVAILABLE()) {
- resched = true;
+ atomic_set(&bus->intstatus, 0);
+ } else if (atomic_read(&bus->intstatus) ||
+ atomic_read(&bus->ipend) > 0 ||
+ (!atomic_read(&bus->fcstate) &&
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
+ data_ok(bus)) || PKT_AVAILABLE()) {
+ brcmf_sdbrcm_adddpctsk(bus);
}
- bus->dpc_sched = resched;
-
/* If we're done for now, turn off clock request. */
if ((bus->clkstate != CLK_PENDING)
&& bus->idletime == BRCMF_IDLE_IMMEDIATE) {
@@ -2574,65 +2361,6 @@ clkwait:
}
up(&bus->sdsem);
-
- return resched;
-}
-
-static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
-{
- struct list_head *new_hd;
- unsigned long flags;
-
- if (in_interrupt())
- new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
- else
- new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
- if (new_hd == NULL)
- return;
-
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
- list_add_tail(new_hd, &bus->dpc_tsklst);
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-}
-
-static int brcmf_sdbrcm_dpc_thread(void *data)
-{
- struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
- struct list_head *cur_hd, *tmp_hd;
- unsigned long flags;
-
- allow_signal(SIGTERM);
- /* Run until signal received */
- while (1) {
- if (kthread_should_stop())
- break;
-
- if (list_empty(&bus->dpc_tsklst))
- if (wait_for_completion_interruptible(&bus->dpc_wait))
- break;
-
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
- list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-
- if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
- /* after stopping the bus, exit thread */
- brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
- bus->dpc_tsk = NULL;
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
- break;
- }
-
- if (brcmf_sdbrcm_dpc(bus))
- brcmf_sdbrcm_adddpctsk(bus);
-
- spin_lock_irqsave(&bus->dpc_tl_lock, flags);
- list_del(cur_hd);
- kfree(cur_hd);
- }
- spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
- }
- return 0;
}
static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
@@ -2642,6 +2370,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ unsigned long flags;
brcmf_dbg(TRACE, "Enter\n");
@@ -2672,21 +2401,23 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
spin_unlock_bh(&bus->txqlock);
if (pktq_len(&bus->txq) >= TXHI) {
- bus->txoff = ON;
- brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON);
+ bus->txoff = true;
+ brcmf_txflowblock(bus->sdiodev->dev, true);
}
#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
qcount[prec] = pktq_plen(&bus->txq, prec);
#endif
- /* Schedule DPC if needed to send queued packet(s) */
- if (!bus->dpc_sched) {
- bus->dpc_sched = true;
- if (bus->dpc_tsk) {
- brcmf_sdbrcm_adddpctsk(bus);
- complete(&bus->dpc_wait);
- }
+
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+ if (list_empty(&bus->dpc_tsklst)) {
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
+ brcmf_sdbrcm_adddpctsk(bus);
+ queue_work(bus->brcmf_wq, &bus->datawork);
+ } else {
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
return ret;
@@ -2707,6 +2438,8 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
else
dsize = size;
+ sdio_claim_host(bus->sdiodev->func[1]);
+
/* Set the backplane window to include the start address */
bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
if (bcmerror) {
@@ -2748,6 +2481,8 @@ xfer_done:
brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n",
bus->sdiodev->sbwad);
+ sdio_release_host(bus->sdiodev->func[1]);
+
return bcmerror;
}
@@ -2882,6 +2617,7 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ unsigned long flags;
brcmf_dbg(TRACE, "Enter\n");
@@ -2918,8 +2654,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
/* Need to lock here to protect txseq and SDIO tx calls */
down(&bus->sdsem);
- bus_wake(bus);
-
/* Make sure backplane clock is on */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -2967,9 +2701,15 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
} while (ret < 0 && retries++ < TXRETRIES);
}
- if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+ if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
+ list_empty(&bus->dpc_tsklst)) {
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
bus->activity = false;
brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+ } else {
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
up(&bus->sdsem);
@@ -3774,23 +3514,20 @@ void brcmf_sdbrcm_isr(void *arg)
}
/* Count the interrupt call */
bus->sdcnt.intrcount++;
- bus->ipend = true;
-
- /* Shouldn't get this interrupt if we're sleeping? */
- if (bus->sleeping) {
- brcmf_dbg(ERROR, "INTERRUPT WHILE SLEEPING??\n");
- return;
- }
+ if (in_interrupt())
+ atomic_set(&bus->ipend, 1);
+ else
+ if (brcmf_sdio_intr_rstatus(bus)) {
+ brcmf_dbg(ERROR, "failed backplane access\n");
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+ }
/* Disable additional interrupts (is this needed now)? */
if (!bus->intr)
brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
- bus->dpc_sched = true;
- if (bus->dpc_tsk) {
- brcmf_sdbrcm_adddpctsk(bus);
- complete(&bus->dpc_wait);
- }
+ brcmf_sdbrcm_adddpctsk(bus);
+ queue_work(bus->brcmf_wq, &bus->datawork);
}
static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
@@ -3798,13 +3535,10 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
#ifdef DEBUG
struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
#endif /* DEBUG */
+ unsigned long flags;
brcmf_dbg(TIMER, "Enter\n");
- /* Ignore the timer if simulating bus down */
- if (bus->sleeping)
- return false;
-
down(&bus->sdsem);
/* Poll period: check device if appropriate. */
@@ -3818,27 +3552,30 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
if (!bus->intr ||
(bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
- if (!bus->dpc_sched) {
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+ if (list_empty(&bus->dpc_tsklst)) {
u8 devpend;
+ spin_unlock_irqrestore(&bus->dpc_tl_lock,
+ flags);
devpend = brcmf_sdio_regrb(bus->sdiodev,
SDIO_CCCR_INTx,
NULL);
intstatus =
devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
+ } else {
+ spin_unlock_irqrestore(&bus->dpc_tl_lock,
+ flags);
}
/* If there is something, make like the ISR and
schedule the DPC */
if (intstatus) {
bus->sdcnt.pollcnt++;
- bus->ipend = true;
+ atomic_set(&bus->ipend, 1);
- bus->dpc_sched = true;
- if (bus->dpc_tsk) {
- brcmf_sdbrcm_adddpctsk(bus);
- complete(&bus->dpc_wait);
- }
+ brcmf_sdbrcm_adddpctsk(bus);
+ queue_work(bus->brcmf_wq, &bus->datawork);
}
}
@@ -3876,11 +3613,13 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
up(&bus->sdsem);
- return bus->ipend;
+ return (atomic_read(&bus->ipend) > 0);
}
static bool brcmf_sdbrcm_chipmatch(u16 chipid)
{
+ if (chipid == BCM43241_CHIP_ID)
+ return true;
if (chipid == BCM4329_CHIP_ID)
return true;
if (chipid == BCM4330_CHIP_ID)
@@ -3890,6 +3629,26 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
return false;
}
+static void brcmf_sdio_dataworker(struct work_struct *work)
+{
+ struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
+ datawork);
+ struct list_head *cur_hd, *tmp_hd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+ list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
+ brcmf_sdbrcm_dpc(bus);
+
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+ list_del(cur_hd);
+ kfree(cur_hd);
+ }
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+}
+
static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -4022,7 +3781,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
SDIO_FUNC_ENABLE_1, NULL);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
- bus->sleeping = false;
bus->rxflow = false;
/* Done with backplane-dependent accesses, can drop clock... */
@@ -4103,6 +3861,9 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
/* De-register interrupt handler */
brcmf_sdio_intr_unregister(bus->sdiodev);
+ cancel_work_sync(&bus->datawork);
+ destroy_workqueue(bus->brcmf_wq);
+
if (bus->sdiodev->bus_if->drvr) {
brcmf_detach(bus->sdiodev->dev);
brcmf_sdbrcm_release_dongle(bus);
@@ -4142,8 +3903,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->rxbound = BRCMF_RXBOUND;
bus->txminmax = BRCMF_TXMINMAX;
bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
- bus->usebufpool = false; /* Use bufpool if allocated,
- else use locally malloced rxbuf */
/* attempt to attach to the dongle */
if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
@@ -4155,6 +3914,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
init_waitqueue_head(&bus->ctrl_wait);
init_waitqueue_head(&bus->dcmd_resp_wait);
+ bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
+ if (bus->brcmf_wq == NULL) {
+ brcmf_dbg(ERROR, "insufficient memory to create txworkqueue\n");
+ goto fail;
+ }
+ INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
+
/* Set up the watchdog timer */
init_timer(&bus->timer);
bus->timer.data = (unsigned long)bus;
@@ -4172,15 +3938,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->watchdog_tsk = NULL;
}
/* Initialize DPC thread */
- init_completion(&bus->dpc_wait);
INIT_LIST_HEAD(&bus->dpc_tsklst);
spin_lock_init(&bus->dpc_tl_lock);
- bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
- bus, "brcmf_dpc");
- if (IS_ERR(bus->dpc_tsk)) {
- pr_warn("brcmf_dpc thread failed to start\n");
- bus->dpc_tsk = NULL;
- }
/* Assign bus interface call back */
bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 58155e23d220..9434440bbc65 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -377,6 +377,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
/* Address of cores for new chips should be added here */
switch (ci->chip) {
+ case BCM43241_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x2a084411;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x0e004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x14080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x07004211;
+ ci->ramsize = 0x90000;
+ break;
case BCM4329_CHIP_ID:
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 29bf78d264e0..0d30afd8c672 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -174,6 +174,8 @@ extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
u8 data, int *ret);
extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
u32 data, int *ret);
+extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ void *data, bool write);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 58f89fa9c9f8..a2b4b1e71017 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -66,7 +66,9 @@
#define BRCMF_USB_CBCTL_READ 1
#define BRCMF_USB_MAX_PKT_SIZE 1600
+#define BRCMF_USB_43143_FW_NAME "brcm/brcmfmac43143.bin"
#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin"
+#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin"
enum usbdev_suspend_state {
USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow
@@ -78,25 +80,13 @@ enum usbdev_suspend_state {
USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */
};
-struct brcmf_usb_probe_info {
- void *usbdev_info;
- struct usb_device *usb; /* USB device pointer from OS */
- uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
- int intr_size; /* Size of interrupt message */
- int interval; /* Interrupt polling interval */
- int vid;
- int pid;
- enum usb_device_speed device_speed;
- enum usbdev_suspend_state suspend_state;
- struct usb_interface *intf;
-};
-static struct brcmf_usb_probe_info usbdev_probe_info;
-
struct brcmf_usb_image {
- void *data;
- u32 len;
+ struct list_head list;
+ s8 *fwname;
+ u8 *image;
+ int image_len;
};
-static struct brcmf_usb_image g_image = { NULL, 0 };
+static struct list_head fw_image_list;
struct intr_transfer_buf {
u32 notification;
@@ -117,9 +107,8 @@ struct brcmf_usbdev_info {
int rx_low_watermark;
int tx_low_watermark;
int tx_high_watermark;
- bool txoff;
- bool rxoff;
- bool txoverride;
+ int tx_freecount;
+ bool tx_flowblock;
struct brcmf_usbreq *tx_reqs;
struct brcmf_usbreq *rx_reqs;
@@ -133,7 +122,6 @@ struct brcmf_usbdev_info {
struct usb_device *usbdev;
struct device *dev;
- enum usb_device_speed device_speed;
int ctl_in_pipe, ctl_out_pipe;
struct urb *ctl_urb; /* URB for control endpoint */
@@ -146,16 +134,11 @@ struct brcmf_usbdev_info {
wait_queue_head_t ctrl_wait;
ulong ctl_op;
- bool rxctl_deferrespok;
-
struct urb *bulk_urb; /* used for FW download */
struct urb *intr_urb; /* URB for interrupt endpoint */
int intr_size; /* Size of interrupt message */
int interval; /* Interrupt polling interval */
struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
-
- struct brcmf_usb_probe_info probe_info;
-
};
static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -177,48 +160,17 @@ static struct brcmf_usbdev_info *brcmf_usb_get_businfo(struct device *dev)
return brcmf_usb_get_buspub(dev)->devinfo;
}
-#if 0
-static void
-brcmf_usb_txflowcontrol(struct brcmf_usbdev_info *devinfo, bool onoff)
+static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo)
{
- dhd_txflowcontrol(devinfo->bus_pub.netdev, 0, onoff);
+ return wait_event_timeout(devinfo->ioctl_resp_wait,
+ devinfo->ctl_completed,
+ msecs_to_jiffies(IOCTL_RESP_TIMEOUT));
}
-#endif
-static int brcmf_usb_ioctl_resp_wait(struct brcmf_usbdev_info *devinfo,
- uint *condition, bool *pending)
-{
- DECLARE_WAITQUEUE(wait, current);
- int timeout = IOCTL_RESP_TIMEOUT;
-
- /* Convert timeout in millsecond to jiffies */
- timeout = msecs_to_jiffies(timeout);
- /* Wait until control frame is available */
- add_wait_queue(&devinfo->ioctl_resp_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- smp_mb();
- while (!(*condition) && (!signal_pending(current) && timeout)) {
- timeout = schedule_timeout(timeout);
- /* Wait until control frame is available */
- smp_mb();
- }
-
- if (signal_pending(current))
- *pending = true;
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&devinfo->ioctl_resp_wait, &wait);
-
- return timeout;
-}
-
-static int brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
+static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
{
if (waitqueue_active(&devinfo->ioctl_resp_wait))
- wake_up_interruptible(&devinfo->ioctl_resp_wait);
-
- return 0;
+ wake_up(&devinfo->ioctl_resp_wait);
}
static void
@@ -324,17 +276,9 @@ brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
devinfo->ctl_read.wLength = cpu_to_le16p(&size);
devinfo->ctl_urb->transfer_buffer_length = size;
- if (devinfo->rxctl_deferrespok) {
- /* BMAC model */
- devinfo->ctl_read.bRequestType = USB_DIR_IN
- | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
- devinfo->ctl_read.bRequest = DL_DEFER_RESP_OK;
- } else {
- /* full dongle model */
- devinfo->ctl_read.bRequestType = USB_DIR_IN
- | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
- devinfo->ctl_read.bRequest = 1;
- }
+ devinfo->ctl_read.bRequestType = USB_DIR_IN
+ | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ devinfo->ctl_read.bRequest = 1;
usb_fill_control_urb(devinfo->ctl_urb,
devinfo->usbdev,
@@ -355,7 +299,6 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
{
int err = 0;
int timeout = 0;
- bool pending;
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -366,15 +309,14 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
if (test_and_set_bit(0, &devinfo->ctl_op))
return -EIO;
+ devinfo->ctl_completed = false;
err = brcmf_usb_send_ctl(devinfo, buf, len);
if (err) {
brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+ clear_bit(0, &devinfo->ctl_op);
return err;
}
-
- devinfo->ctl_completed = false;
- timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
- &pending);
+ timeout = brcmf_usb_ioctl_resp_wait(devinfo);
clear_bit(0, &devinfo->ctl_op);
if (!timeout) {
brcmf_dbg(ERROR, "Txctl wait timed out\n");
@@ -387,7 +329,6 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
{
int err = 0;
int timeout = 0;
- bool pending;
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) {
@@ -397,14 +338,14 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
if (test_and_set_bit(0, &devinfo->ctl_op))
return -EIO;
+ devinfo->ctl_completed = false;
err = brcmf_usb_recv_ctl(devinfo, buf, len);
if (err) {
brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len);
+ clear_bit(0, &devinfo->ctl_op);
return err;
}
- devinfo->ctl_completed = false;
- timeout = brcmf_usb_ioctl_resp_wait(devinfo, &devinfo->ctl_completed,
- &pending);
+ timeout = brcmf_usb_ioctl_resp_wait(devinfo);
err = devinfo->ctl_urb_status;
clear_bit(0, &devinfo->ctl_op);
if (!timeout) {
@@ -418,7 +359,7 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
}
static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
- struct list_head *q)
+ struct list_head *q, int *counter)
{
unsigned long flags;
struct brcmf_usbreq *req;
@@ -429,17 +370,22 @@ static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
}
req = list_entry(q->next, struct brcmf_usbreq, list);
list_del_init(q->next);
+ if (counter)
+ (*counter)--;
spin_unlock_irqrestore(&devinfo->qlock, flags);
return req;
}
static void brcmf_usb_enq(struct brcmf_usbdev_info *devinfo,
- struct list_head *q, struct brcmf_usbreq *req)
+ struct list_head *q, struct brcmf_usbreq *req,
+ int *counter)
{
unsigned long flags;
spin_lock_irqsave(&devinfo->qlock, flags);
list_add_tail(&req->list, q);
+ if (counter)
+ (*counter)++;
spin_unlock_irqrestore(&devinfo->qlock, flags);
}
@@ -519,10 +465,16 @@ static void brcmf_usb_tx_complete(struct urb *urb)
else
devinfo->bus_pub.bus->dstats.tx_errors++;
+ brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
+
brcmu_pkt_buf_free_skb(req->skb);
req->skb = NULL;
- brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
-
+ brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
+ if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
+ devinfo->tx_flowblock) {
+ brcmf_txflowblock(devinfo->dev, false);
+ devinfo->tx_flowblock = false;
+ }
}
static void brcmf_usb_rx_complete(struct urb *urb)
@@ -541,7 +493,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
} else {
devinfo->bus_pub.bus->dstats.rx_errors++;
brcmu_pkt_buf_free_skb(skb);
- brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
return;
}
@@ -550,15 +502,13 @@ static void brcmf_usb_rx_complete(struct urb *urb)
if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
brcmf_dbg(ERROR, "rx protocol error\n");
brcmu_pkt_buf_free_skb(skb);
- brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
devinfo->bus_pub.bus->dstats.rx_errors++;
- } else {
+ } else
brcmf_rx_packet(devinfo->dev, ifidx, skb);
- brcmf_usb_rx_refill(devinfo, req);
- }
+ brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);
- brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
}
return;
@@ -575,7 +525,7 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
skb = dev_alloc_skb(devinfo->bus_pub.bus_mtu);
if (!skb) {
- brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
return;
}
req->skb = skb;
@@ -584,14 +534,14 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
skb->data, skb_tailroom(skb), brcmf_usb_rx_complete,
req);
req->devinfo = devinfo;
- brcmf_usb_enq(devinfo, &devinfo->rx_postq, req);
+ brcmf_usb_enq(devinfo, &devinfo->rx_postq, req, NULL);
ret = usb_submit_urb(req->urb, GFP_ATOMIC);
if (ret) {
brcmf_usb_del_fromq(devinfo, req);
brcmu_pkt_buf_free_skb(req->skb);
req->skb = NULL;
- brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req);
+ brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
}
return;
}
@@ -604,7 +554,7 @@ static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
brcmf_dbg(ERROR, "bus is not up\n");
return;
}
- while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq)) != NULL)
+ while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
brcmf_usb_rx_refill(devinfo, req);
}
@@ -682,7 +632,8 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
return -EIO;
}
- req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq);
+ req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
+ &devinfo->tx_freecount);
if (!req) {
brcmu_pkt_buf_free_skb(skb);
brcmf_dbg(ERROR, "no req to send\n");
@@ -694,14 +645,21 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe,
skb->data, skb->len, brcmf_usb_tx_complete, req);
req->urb->transfer_flags |= URB_ZERO_PACKET;
- brcmf_usb_enq(devinfo, &devinfo->tx_postq, req);
+ brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
ret = usb_submit_urb(req->urb, GFP_ATOMIC);
if (ret) {
brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n");
brcmf_usb_del_fromq(devinfo, req);
brcmu_pkt_buf_free_skb(req->skb);
req->skb = NULL;
- brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req);
+ brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
+ &devinfo->tx_freecount);
+ } else {
+ if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
+ !devinfo->tx_flowblock) {
+ brcmf_txflowblock(dev, true);
+ devinfo->tx_flowblock = true;
+ }
}
return ret;
@@ -1112,10 +1070,14 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
static bool brcmf_usb_chip_support(int chipid, int chiprev)
{
switch(chipid) {
+ case 43143:
+ return true;
case 43235:
case 43236:
case 43238:
return (chiprev == 3);
+ case 43242:
+ return true;
default:
break;
}
@@ -1154,17 +1116,10 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
}
-static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub)
+static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
{
- struct brcmf_usbdev_info *devinfo =
- (struct brcmf_usbdev_info *)bus_pub;
-
brcmf_dbg(TRACE, "devinfo %p\n", devinfo);
- /* store the image globally */
- g_image.data = devinfo->image;
- g_image.len = devinfo->image_len;
-
/* free the URBS */
brcmf_usb_free_q(&devinfo->rx_freeq, false);
brcmf_usb_free_q(&devinfo->tx_freeq, false);
@@ -1175,7 +1130,6 @@ static void brcmf_usb_detach(const struct brcmf_usbdev *bus_pub)
kfree(devinfo->tx_reqs);
kfree(devinfo->rx_reqs);
- kfree(devinfo);
}
#define TRX_MAGIC 0x30524448 /* "HDR0" */
@@ -1217,19 +1171,34 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
{
s8 *fwname;
const struct firmware *fw;
+ struct brcmf_usb_image *fw_image;
int err;
- devinfo->image = g_image.data;
- devinfo->image_len = g_image.len;
-
- /*
- * if we have an image we can leave here.
- */
- if (devinfo->image)
- return 0;
-
- fwname = BRCMF_USB_43236_FW_NAME;
+ switch (devinfo->bus_pub.devid) {
+ case 43143:
+ fwname = BRCMF_USB_43143_FW_NAME;
+ break;
+ case 43235:
+ case 43236:
+ case 43238:
+ fwname = BRCMF_USB_43236_FW_NAME;
+ break;
+ case 43242:
+ fwname = BRCMF_USB_43242_FW_NAME;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ list_for_each_entry(fw_image, &fw_image_list, list) {
+ if (fw_image->fwname == fwname) {
+ devinfo->image = fw_image->image;
+ devinfo->image_len = fw_image->image_len;
+ return 0;
+ }
+ }
+ /* fw image not yet loaded. Load it now and add to list */
err = request_firmware(&fw, fwname, devinfo->dev);
if (!fw) {
brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname);
@@ -1240,27 +1209,32 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
return -EINVAL;
}
- devinfo->image = vmalloc(fw->size); /* plus nvram */
- if (!devinfo->image)
+ fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
+ if (!fw_image)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&fw_image->list);
+ list_add_tail(&fw_image->list, &fw_image_list);
+ fw_image->fwname = fwname;
+ fw_image->image = vmalloc(fw->size);
+ if (!fw_image->image)
return -ENOMEM;
- memcpy(devinfo->image, fw->data, fw->size);
- devinfo->image_len = fw->size;
+ memcpy(fw_image->image, fw->data, fw->size);
+ fw_image->image_len = fw->size;
release_firmware(fw);
+
+ devinfo->image = fw_image->image;
+ devinfo->image_len = fw_image->image_len;
+
return 0;
}
static
-struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
+struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
+ int nrxq, int ntxq)
{
- struct brcmf_usbdev_info *devinfo;
-
- devinfo = kzalloc(sizeof(struct brcmf_usbdev_info), GFP_ATOMIC);
- if (devinfo == NULL)
- return NULL;
-
devinfo->bus_pub.nrxq = nrxq;
devinfo->rx_low_watermark = nrxq / 2;
devinfo->bus_pub.devinfo = devinfo;
@@ -1269,18 +1243,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
/* flow control when too many tx urbs posted */
devinfo->tx_low_watermark = ntxq / 4;
devinfo->tx_high_watermark = devinfo->tx_low_watermark * 3;
- devinfo->dev = dev;
- devinfo->usbdev = usbdev_probe_info.usb;
- devinfo->tx_pipe = usbdev_probe_info.tx_pipe;
- devinfo->rx_pipe = usbdev_probe_info.rx_pipe;
- devinfo->rx_pipe2 = usbdev_probe_info.rx_pipe2;
- devinfo->intr_pipe = usbdev_probe_info.intr_pipe;
-
- devinfo->interval = usbdev_probe_info.interval;
- devinfo->intr_size = usbdev_probe_info.intr_size;
-
- memcpy(&devinfo->probe_info, &usbdev_probe_info,
- sizeof(struct brcmf_usb_probe_info));
devinfo->bus_pub.bus_mtu = BRCMF_USB_MAX_PKT_SIZE;
/* Initialize other structure content */
@@ -1295,6 +1257,8 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
INIT_LIST_HEAD(&devinfo->tx_freeq);
INIT_LIST_HEAD(&devinfo->tx_postq);
+ devinfo->tx_flowblock = false;
+
devinfo->rx_reqs = brcmf_usbdev_qinit(&devinfo->rx_freeq, nrxq);
if (!devinfo->rx_reqs)
goto error;
@@ -1302,6 +1266,7 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
devinfo->tx_reqs = brcmf_usbdev_qinit(&devinfo->tx_freeq, ntxq);
if (!devinfo->tx_reqs)
goto error;
+ devinfo->tx_freecount = ntxq;
devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!devinfo->intr_urb) {
@@ -1313,8 +1278,6 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n");
goto error;
}
- devinfo->rxctl_deferrespok = 0;
-
devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!devinfo->bulk_urb) {
brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n");
@@ -1336,23 +1299,21 @@ struct brcmf_usbdev *brcmf_usb_attach(int nrxq, int ntxq, struct device *dev)
error:
brcmf_dbg(ERROR, "failed!\n");
- brcmf_usb_detach(&devinfo->bus_pub);
+ brcmf_usb_detach(devinfo);
return NULL;
}
-static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
- u32 bustype, u32 hdrlen)
+static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
+ const char *desc, u32 bustype, u32 hdrlen)
{
struct brcmf_bus *bus = NULL;
struct brcmf_usbdev *bus_pub = NULL;
int ret;
+ struct device *dev = devinfo->dev;
-
- bus_pub = brcmf_usb_attach(BRCMF_USB_NRXQ, BRCMF_USB_NTXQ, dev);
- if (!bus_pub) {
- ret = -ENODEV;
- goto fail;
- }
+ bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
+ if (!bus_pub)
+ return -ENODEV;
bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
if (!bus) {
@@ -1387,23 +1348,21 @@ static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
return 0;
fail:
/* Release resources in reverse order */
- if (bus_pub)
- brcmf_usb_detach(bus_pub);
kfree(bus);
+ brcmf_usb_detach(devinfo);
return ret;
}
static void
-brcmf_usb_disconnect_cb(struct brcmf_usbdev *bus_pub)
+brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
{
- if (!bus_pub)
+ if (!devinfo)
return;
- brcmf_dbg(TRACE, "enter: bus_pub %p\n", bus_pub);
-
- brcmf_detach(bus_pub->devinfo->dev);
- kfree(bus_pub->bus);
- brcmf_usb_detach(bus_pub);
+ brcmf_dbg(TRACE, "enter: bus_pub %p\n", devinfo);
+ brcmf_detach(devinfo->dev);
+ kfree(devinfo->bus_pub.bus);
+ brcmf_usb_detach(devinfo);
}
static int
@@ -1415,18 +1374,18 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
struct usb_device *usb = interface_to_usbdev(intf);
int num_of_eps;
u8 endpoint_num;
+ struct brcmf_usbdev_info *devinfo;
brcmf_dbg(TRACE, "enter\n");
- usbdev_probe_info.usb = usb;
- usbdev_probe_info.intf = intf;
+ devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
+ if (devinfo == NULL)
+ return -ENOMEM;
- if (id != NULL) {
- usbdev_probe_info.vid = id->idVendor;
- usbdev_probe_info.pid = id->idProduct;
- }
+ devinfo->usbdev = usb;
+ devinfo->dev = &usb->dev;
- usb_set_intfdata(intf, &usbdev_probe_info);
+ usb_set_intfdata(intf, devinfo);
/* Check that the device supports only one configuration */
if (usb->descriptor.bNumConfigurations != 1) {
@@ -1475,11 +1434,11 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
}
endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
- usbdev_probe_info.intr_pipe = usb_rcvintpipe(usb, endpoint_num);
+ devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
- usbdev_probe_info.rx_pipe = 0;
- usbdev_probe_info.rx_pipe2 = 0;
- usbdev_probe_info.tx_pipe = 0;
+ devinfo->rx_pipe = 0;
+ devinfo->rx_pipe2 = 0;
+ devinfo->tx_pipe = 0;
num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
/* Check data endpoints and get pipes */
@@ -1496,35 +1455,33 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
USB_ENDPOINT_NUMBER_MASK;
if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
== USB_DIR_IN) {
- if (!usbdev_probe_info.rx_pipe) {
- usbdev_probe_info.rx_pipe =
+ if (!devinfo->rx_pipe) {
+ devinfo->rx_pipe =
usb_rcvbulkpipe(usb, endpoint_num);
} else {
- usbdev_probe_info.rx_pipe2 =
+ devinfo->rx_pipe2 =
usb_rcvbulkpipe(usb, endpoint_num);
}
} else {
- usbdev_probe_info.tx_pipe =
- usb_sndbulkpipe(usb, endpoint_num);
+ devinfo->tx_pipe = usb_sndbulkpipe(usb, endpoint_num);
}
}
/* Allocate interrupt URB and data buffer */
/* RNDIS says 8-byte intr, our old drivers used 4-byte */
if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
- usbdev_probe_info.intr_size = 8;
+ devinfo->intr_size = 8;
else
- usbdev_probe_info.intr_size = 4;
+ devinfo->intr_size = 4;
- usbdev_probe_info.interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
+ devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
- usbdev_probe_info.device_speed = usb->speed;
if (usb->speed == USB_SPEED_HIGH)
brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n");
else
brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n");
- ret = brcmf_usb_probe_cb(&usb->dev, "", USB_BUS, 0);
+ ret = brcmf_usb_probe_cb(devinfo, "", USB_BUS, 0);
if (ret)
goto fail;
@@ -1533,6 +1490,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
fail:
brcmf_dbg(ERROR, "failed with errno %d\n", ret);
+ kfree(devinfo);
usb_set_intfdata(intf, NULL);
return ret;
@@ -1541,11 +1499,12 @@ fail:
static void
brcmf_usb_disconnect(struct usb_interface *intf)
{
- struct usb_device *usb = interface_to_usbdev(intf);
+ struct brcmf_usbdev_info *devinfo;
brcmf_dbg(TRACE, "enter\n");
- brcmf_usb_disconnect_cb(brcmf_usb_get_buspub(&usb->dev));
- usb_set_intfdata(intf, NULL);
+ devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
+ brcmf_usb_disconnect_cb(devinfo);
+ kfree(devinfo);
}
/*
@@ -1577,17 +1536,23 @@ static int brcmf_usb_resume(struct usb_interface *intf)
}
#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
+#define BRCMF_USB_DEVICE_ID_43143 0xbd1e
#define BRCMF_USB_DEVICE_ID_43236 0xbd17
+#define BRCMF_USB_DEVICE_ID_43242 0xbd1f
#define BRCMF_USB_DEVICE_ID_BCMFW 0x0bdc
static struct usb_device_id brcmf_usb_devid_table[] = {
+ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43143) },
{ USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
+ { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43242) },
/* special entry for device with firmware loaded and running */
{ USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
{ }
};
MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
+MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
+MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
/* TODO: suspend and resume entries */
static struct usb_driver brcmf_usbdrvr = {
@@ -1601,15 +1566,25 @@ static struct usb_driver brcmf_usbdrvr = {
.disable_hub_initiated_lpm = 1,
};
+static void brcmf_release_fw(struct list_head *q)
+{
+ struct brcmf_usb_image *fw_image, *next;
+
+ list_for_each_entry_safe(fw_image, next, q, list) {
+ vfree(fw_image->image);
+ list_del_init(&fw_image->list);
+ }
+}
+
+
void brcmf_usb_exit(void)
{
usb_deregister(&brcmf_usbdrvr);
- vfree(g_image.data);
- g_image.data = NULL;
- g_image.len = 0;
+ brcmf_release_fw(&fw_image_list);
}
void brcmf_usb_init(void)
{
+ INIT_LIST_HEAD(&fw_image_list);
usb_register(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 50b5553b6964..c1abaa6db59e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -28,6 +28,7 @@
#include <linux/ieee80211.h>
#include <linux/uaccess.h>
#include <net/cfg80211.h>
+#include <net/netlink.h>
#include <brcmu_utils.h>
#include <defs.h>
@@ -35,6 +36,58 @@
#include "dhd.h"
#include "wl_cfg80211.h"
+#define BRCMF_SCAN_IE_LEN_MAX 2048
+#define BRCMF_PNO_VERSION 2
+#define BRCMF_PNO_TIME 30
+#define BRCMF_PNO_REPEAT 4
+#define BRCMF_PNO_FREQ_EXPO_MAX 3
+#define BRCMF_PNO_MAX_PFN_COUNT 16
+#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
+#define BRCMF_PNO_HIDDEN_BIT 2
+#define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
+#define BRCMF_PNO_SCAN_COMPLETE 1
+#define BRCMF_PNO_SCAN_INCOMPLETE 0
+
+#define TLV_LEN_OFF 1 /* length offset */
+#define TLV_HDR_LEN 2 /* header length */
+#define TLV_BODY_OFF 2 /* body offset */
+#define TLV_OUI_LEN 3 /* oui id length */
+#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
+#define WPA_OUI_TYPE 1
+#define RSN_OUI "\x00\x0F\xAC" /* RSN OUI */
+#define WME_OUI_TYPE 2
+
+#define VS_IE_FIXED_HDR_LEN 6
+#define WPA_IE_VERSION_LEN 2
+#define WPA_IE_MIN_OUI_LEN 4
+#define WPA_IE_SUITE_COUNT_LEN 2
+
+#define WPA_CIPHER_NONE 0 /* None */
+#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */
+#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */
+#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */
+#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */
+
+#define RSN_AKM_NONE 0 /* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
+#define RSN_AKM_PSK 2 /* Pre-shared Key */
+#define RSN_CAP_LEN 2 /* Length of RSN capabilities */
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
+
+#define VNDR_IE_CMD_LEN 4 /* length of the set command
+ * string :"add", "del" (+ NUL)
+ */
+#define VNDR_IE_COUNT_OFFSET 4
+#define VNDR_IE_PKTFLAG_OFFSET 8
+#define VNDR_IE_VSIE_OFFSET 12
+#define VNDR_IE_HDR_SIZE 12
+#define VNDR_IE_BEACON_FLAG 0x1
+#define VNDR_IE_PRBRSP_FLAG 0x2
+#define MAX_VNDR_IE_NUMBER 5
+
+#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
+#define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */
+
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
@@ -42,33 +95,12 @@ static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
static u32 brcmf_dbg_level = WL_DBG_ERR;
-static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data)
-{
- dev->driver_data = data;
-}
-
-static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev)
-{
- void *data = NULL;
-
- if (dev)
- data = dev->driver_data;
- return data;
-}
-
-static
-struct brcmf_cfg80211_priv *brcmf_priv_get(struct brcmf_cfg80211_dev *cfg_dev)
-{
- struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg_dev);
- return ci->cfg_priv;
-}
-
static bool check_sys_up(struct wiphy *wiphy)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ if (!test_bit(WL_STATUS_READY, &cfg->status)) {
WL_INFO("device is not ready : status (%d)\n",
- (int)cfg_priv->status);
+ (int)cfg->status);
return false;
}
return true;
@@ -256,6 +288,25 @@ struct brcmf_tlv {
u8 data[1];
};
+/* Vendor specific ie. id = 221, oui and type defines exact ie */
+struct brcmf_vs_tlv {
+ u8 id;
+ u8 len;
+ u8 oui[3];
+ u8 oui_type;
+};
+
+struct parsed_vndr_ie_info {
+ u8 *ie_ptr;
+ u32 ie_len; /* total length including id & length field */
+ struct brcmf_vs_tlv vndrie;
+};
+
+struct parsed_vndr_ies {
+ u32 count;
+ struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+};
+
/* Quarter dBm units to mW
* Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
* Table is offset so the last entry is largest mW value that fits in
@@ -353,6 +404,44 @@ brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
return err;
}
+static s32
+brcmf_dev_iovar_setbuf_bsscfg(struct net_device *ndev, s8 *name,
+ void *param, s32 paramlen,
+ void *buf, s32 buflen, s32 bssidx)
+{
+ s32 err = -ENOMEM;
+ u32 len;
+
+ len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
+ buf, buflen, bssidx);
+ BUG_ON(!len);
+ if (len > 0)
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
+ if (err)
+ WL_ERR("error (%d)\n", err);
+
+ return err;
+}
+
+static s32
+brcmf_dev_iovar_getbuf_bsscfg(struct net_device *ndev, s8 *name,
+ void *param, s32 paramlen,
+ void *buf, s32 buflen, s32 bssidx)
+{
+ s32 err = -ENOMEM;
+ u32 len;
+
+ len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
+ buf, buflen, bssidx);
+ BUG_ON(!len);
+ if (len > 0)
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, buf, len);
+ if (err)
+ WL_ERR("error (%d)\n", err);
+
+ return err;
+}
+
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
struct brcmf_wsec_key_le *key_le)
{
@@ -367,16 +456,22 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
memcpy(key_le->ea, key->ea, sizeof(key->ea));
}
-static int send_key_to_dongle(struct net_device *ndev,
- struct brcmf_wsec_key *key)
+static int
+send_key_to_dongle(struct brcmf_cfg80211_info *cfg, s32 bssidx,
+ struct net_device *ndev, struct brcmf_wsec_key *key)
{
int err;
struct brcmf_wsec_key_le key_le;
convert_key_from_CPU(key, &key_le);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le));
+
+ err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
+ sizeof(key_le),
+ cfg->extra_buf,
+ WL_EXTRA_BUF_MAX, bssidx);
+
if (err)
- WL_ERR("WLC_SET_KEY error (%d)\n", err);
+ WL_ERR("wsec_key error (%d)\n", err);
return err;
}
@@ -385,14 +480,12 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct wireless_dev *wdev;
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
s32 infra = 0;
+ s32 ap = 0;
s32 err = 0;
- WL_TRACE("Enter\n");
- if (!check_sys_up(wiphy))
- return -EIO;
+ WL_TRACE("Enter, ndev=%p, type=%d\n", ndev, type);
switch (type) {
case NL80211_IFTYPE_MONITOR:
@@ -401,29 +494,44 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
type);
return -EOPNOTSUPP;
case NL80211_IFTYPE_ADHOC:
- cfg_priv->conf->mode = WL_MODE_IBSS;
+ cfg->conf->mode = WL_MODE_IBSS;
infra = 0;
break;
case NL80211_IFTYPE_STATION:
- cfg_priv->conf->mode = WL_MODE_BSS;
+ cfg->conf->mode = WL_MODE_BSS;
infra = 1;
break;
+ case NL80211_IFTYPE_AP:
+ cfg->conf->mode = WL_MODE_AP;
+ ap = 1;
+ break;
default:
err = -EINVAL;
goto done;
}
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
- if (err) {
- WL_ERR("WLC_SET_INFRA error (%d)\n", err);
- err = -EAGAIN;
+ if (ap) {
+ set_bit(WL_STATUS_AP_CREATING, &cfg->status);
+ if (!cfg->ap_info)
+ cfg->ap_info = kzalloc(sizeof(*cfg->ap_info),
+ GFP_KERNEL);
+ if (!cfg->ap_info) {
+ err = -ENOMEM;
+ goto done;
+ }
+ WL_INFO("IF Type = AP\n");
} else {
- wdev = ndev->ieee80211_ptr;
- wdev->iftype = type;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra);
+ if (err) {
+ WL_ERR("WLC_SET_INFRA error (%d)\n", err);
+ err = -EAGAIN;
+ goto done;
+ }
+ WL_INFO("IF Type = %s\n",
+ (cfg->conf->mode == WL_MODE_IBSS) ?
+ "Adhoc" : "Infra");
}
-
- WL_INFO("IF Type = %s\n",
- (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra");
+ ndev->ieee80211_ptr->iftype = type;
done:
WL_TRACE("Exit\n");
@@ -474,12 +582,55 @@ brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
return err;
}
+static s32
+brcmf_dev_intvar_set_bsscfg(struct net_device *ndev, s8 *name, u32 val,
+ s32 bssidx)
+{
+ s8 buf[BRCMF_DCMD_SMLEN];
+ __le32 val_le;
+
+ val_le = cpu_to_le32(val);
+
+ return brcmf_dev_iovar_setbuf_bsscfg(ndev, name, &val_le,
+ sizeof(val_le), buf, sizeof(buf),
+ bssidx);
+}
+
+static s32
+brcmf_dev_intvar_get_bsscfg(struct net_device *ndev, s8 *name, s32 *val,
+ s32 bssidx)
+{
+ s8 buf[BRCMF_DCMD_SMLEN];
+ s32 err;
+ __le32 val_le;
+
+ memset(buf, 0, sizeof(buf));
+ err = brcmf_dev_iovar_getbuf_bsscfg(ndev, name, val, sizeof(*val), buf,
+ sizeof(buf), bssidx);
+ if (err == 0) {
+ memcpy(&val_le, buf, sizeof(val_le));
+ *val = le32_to_cpu(val_le);
+ }
+ return err;
+}
+
+
+/*
+ * For now brcmf_find_bssidx will return 0. Once p2p gets implemented this
+ * should return the ndev matching bssidx.
+ */
+static s32
+brcmf_find_bssidx(struct brcmf_cfg80211_info *cfg, struct net_device *ndev)
+{
+ return 0;
+}
+
static void brcmf_set_mpc(struct net_device *ndev, int mpc)
{
s32 err = 0;
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ if (test_bit(WL_STATUS_READY, &cfg->status)) {
err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
if (err) {
WL_ERR("fail to set mpc\n");
@@ -489,8 +640,8 @@ static void brcmf_set_mpc(struct net_device *ndev, int mpc)
}
}
-static void wl_iscan_prep(struct brcmf_scan_params_le *params_le,
- struct brcmf_ssid *ssid)
+static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
+ struct brcmf_ssid *ssid)
{
memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
params_le->bss_type = DOT11_BSSTYPE_ANY;
@@ -546,7 +697,7 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
return -ENOMEM;
BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
- wl_iscan_prep(&params->params_le, ssid);
+ brcmf_iscan_prep(&params->params_le, ssid);
params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
params->action = cpu_to_le16(action);
@@ -565,10 +716,10 @@ brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
return err;
}
-static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
+ struct net_device *ndev = cfg_to_ndev(cfg);
struct brcmf_ssid ssid;
__le32 passive_scan;
s32 err = 0;
@@ -578,19 +729,19 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
iscan->state = WL_ISCAN_STATE_SCANING;
- passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1);
- err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN,
+ passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
+ err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_SET_PASSIVE_SCAN,
&passive_scan, sizeof(passive_scan));
if (err) {
WL_ERR("error (%d)\n", err);
return err;
}
brcmf_set_mpc(ndev, 0);
- cfg_priv->iscan_kickstart = true;
+ cfg->iscan_kickstart = true;
err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
if (err) {
brcmf_set_mpc(ndev, 1);
- cfg_priv->iscan_kickstart = false;
+ cfg->iscan_kickstart = false;
return err;
}
mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -599,31 +750,31 @@ static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
}
static s32
-__brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid)
+brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
struct cfg80211_ssid *ssids;
- struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int;
+ struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
__le32 passive_scan;
bool iscan_req;
bool spec_scan;
s32 err = 0;
u32 SSID_len;
- if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
- WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status);
+ if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ WL_ERR("Scanning already : status (%lu)\n", cfg->status);
return -EAGAIN;
}
- if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) {
+ if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
WL_ERR("Scanning being aborted : status (%lu)\n",
- cfg_priv->status);
+ cfg->status);
return -EAGAIN;
}
- if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
+ if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
WL_ERR("Connecting : status (%lu)\n",
- cfg_priv->status);
+ cfg->status);
return -EAGAIN;
}
@@ -632,7 +783,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
if (request) {
/* scan bss */
ssids = request->ssids;
- if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len))
+ if (cfg->iscan_on && (!ssids || !ssids->ssid_len))
iscan_req = true;
} else {
/* scan in ibss */
@@ -640,10 +791,10 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
ssids = this_ssid;
}
- cfg_priv->scan_request = request;
- set_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+ cfg->scan_request = request;
+ set_bit(WL_STATUS_SCANNING, &cfg->status);
if (iscan_req) {
- err = brcmf_do_iscan(cfg_priv);
+ err = brcmf_do_iscan(cfg);
if (!err)
return err;
else
@@ -662,7 +813,7 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
WL_SCAN("Broadcast scan\n");
}
- passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1);
+ passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
&passive_scan, sizeof(passive_scan));
if (err) {
@@ -687,8 +838,346 @@ __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
return 0;
scan_out:
- clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
- cfg_priv->scan_request = NULL;
+ clear_bit(WL_STATUS_SCANNING, &cfg->status);
+ cfg->scan_request = NULL;
+ return err;
+}
+
+static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
+ struct cfg80211_scan_request *request)
+{
+ u32 n_ssids;
+ u32 n_channels;
+ s32 i;
+ s32 offset;
+ u16 chanspec;
+ u16 channel;
+ struct ieee80211_channel *req_channel;
+ char *ptr;
+ struct brcmf_ssid_le ssid_le;
+
+ memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
+ params_le->bss_type = DOT11_BSSTYPE_ANY;
+ params_le->scan_type = 0;
+ params_le->channel_num = 0;
+ params_le->nprobes = cpu_to_le32(-1);
+ params_le->active_time = cpu_to_le32(-1);
+ params_le->passive_time = cpu_to_le32(-1);
+ params_le->home_time = cpu_to_le32(-1);
+ memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
+
+ /* if request is null exit so it will be all channel broadcast scan */
+ if (!request)
+ return;
+
+ n_ssids = request->n_ssids;
+ n_channels = request->n_channels;
+ /* Copy channel array if applicable */
+ WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels);
+ if (n_channels > 0) {
+ for (i = 0; i < n_channels; i++) {
+ chanspec = 0;
+ req_channel = request->channels[i];
+ channel = ieee80211_frequency_to_channel(
+ req_channel->center_freq);
+ if (req_channel->band == IEEE80211_BAND_2GHZ)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ if (req_channel->flags & IEEE80211_CHAN_NO_HT40) {
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ } else {
+ chanspec |= WL_CHANSPEC_BW_40;
+ if (req_channel->flags &
+ IEEE80211_CHAN_NO_HT40PLUS)
+ chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
+ else
+ chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
+ }
+
+ chanspec |= (channel & WL_CHANSPEC_CHAN_MASK);
+ WL_SCAN("Chan : %d, Channel spec: %x\n",
+ channel, chanspec);
+ params_le->channel_list[i] = cpu_to_le16(chanspec);
+ }
+ } else {
+ WL_SCAN("Scanning all channels\n");
+ }
+ /* Copy ssid array if applicable */
+ WL_SCAN("### List of SSIDs to scan ### %d\n", n_ssids);
+ if (n_ssids > 0) {
+ offset = offsetof(struct brcmf_scan_params_le, channel_list) +
+ n_channels * sizeof(u16);
+ offset = roundup(offset, sizeof(u32));
+ ptr = (char *)params_le + offset;
+ for (i = 0; i < n_ssids; i++) {
+ memset(&ssid_le, 0, sizeof(ssid_le));
+ ssid_le.SSID_len =
+ cpu_to_le32(request->ssids[i].ssid_len);
+ memcpy(ssid_le.SSID, request->ssids[i].ssid,
+ request->ssids[i].ssid_len);
+ if (!ssid_le.SSID_len)
+ WL_SCAN("%d: Broadcast scan\n", i);
+ else
+ WL_SCAN("%d: scan for %s size =%d\n", i,
+ ssid_le.SSID, ssid_le.SSID_len);
+ memcpy(ptr, &ssid_le, sizeof(ssid_le));
+ ptr += sizeof(ssid_le);
+ }
+ } else {
+ WL_SCAN("Broadcast scan %p\n", request->ssids);
+ if ((request->ssids) && request->ssids->ssid_len) {
+ WL_SCAN("SSID %s len=%d\n", params_le->ssid_le.SSID,
+ request->ssids->ssid_len);
+ params_le->ssid_le.SSID_len =
+ cpu_to_le32(request->ssids->ssid_len);
+ memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
+ request->ssids->ssid_len);
+ }
+ }
+ /* Adding mask to channel numbers */
+ params_le->channel_num =
+ cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
+ (n_channels & BRCMF_SCAN_PARAMS_COUNT_MASK));
+}
+
+static s32
+brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort)
+{
+ struct brcmf_scan_params_le params_le;
+ struct cfg80211_scan_request *scan_request;
+ s32 err = 0;
+
+ WL_SCAN("Enter\n");
+
+ /* clear scan request, because the FW abort can cause a second call */
+ /* to this functon and might cause a double cfg80211_scan_done */
+ scan_request = cfg->scan_request;
+ cfg->scan_request = NULL;
+
+ if (timer_pending(&cfg->escan_timeout))
+ del_timer_sync(&cfg->escan_timeout);
+
+ if (fw_abort) {
+ /* Do a scan abort to stop the driver's scan engine */
+ WL_SCAN("ABORT scan in firmware\n");
+ memset(&params_le, 0, sizeof(params_le));
+ memcpy(params_le.bssid, ether_bcast, ETH_ALEN);
+ params_le.bss_type = DOT11_BSSTYPE_ANY;
+ params_le.scan_type = 0;
+ params_le.channel_num = cpu_to_le32(1);
+ params_le.nprobes = cpu_to_le32(1);
+ params_le.active_time = cpu_to_le32(-1);
+ params_le.passive_time = cpu_to_le32(-1);
+ params_le.home_time = cpu_to_le32(-1);
+ /* Scan is aborted by setting channel_list[0] to -1 */
+ params_le.channel_list[0] = cpu_to_le16(-1);
+ /* E-Scan (or anyother type) can be aborted by SCAN */
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &params_le,
+ sizeof(params_le));
+ if (err)
+ WL_ERR("Scan abort failed\n");
+ }
+ /*
+ * e-scan can be initiated by scheduled scan
+ * which takes precedence.
+ */
+ if (cfg->sched_escan) {
+ WL_SCAN("scheduled scan completed\n");
+ cfg->sched_escan = false;
+ if (!aborted)
+ cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+ brcmf_set_mpc(ndev, 1);
+ } else if (scan_request) {
+ WL_SCAN("ESCAN Completed scan: %s\n",
+ aborted ? "Aborted" : "Done");
+ cfg80211_scan_done(scan_request, aborted);
+ brcmf_set_mpc(ndev, 1);
+ }
+ if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ WL_ERR("Scan complete while device not scanning\n");
+ return -EPERM;
+ }
+
+ return err;
+}
+
+static s32
+brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
+ struct cfg80211_scan_request *request, u16 action)
+{
+ s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
+ offsetof(struct brcmf_escan_params_le, params_le);
+ struct brcmf_escan_params_le *params;
+ s32 err = 0;
+
+ WL_SCAN("E-SCAN START\n");
+
+ if (request != NULL) {
+ /* Allocate space for populating ssids in struct */
+ params_size += sizeof(u32) * ((request->n_channels + 1) / 2);
+
+ /* Allocate space for populating ssids in struct */
+ params_size += sizeof(struct brcmf_ssid) * request->n_ssids;
+ }
+
+ params = kzalloc(params_size, GFP_KERNEL);
+ if (!params) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN);
+ brcmf_escan_prep(&params->params_le, request);
+ params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+ params->action = cpu_to_le16(action);
+ params->sync_id = cpu_to_le16(0x1234);
+
+ err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size,
+ cfg->escan_ioctl_buf, BRCMF_DCMD_MEDLEN);
+ if (err) {
+ if (err == -EBUSY)
+ WL_INFO("system busy : escan canceled\n");
+ else
+ WL_ERR("error (%d)\n", err);
+ }
+
+ kfree(params);
+exit:
+ return err;
+}
+
+static s32
+brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
+ struct net_device *ndev, struct cfg80211_scan_request *request)
+{
+ s32 err;
+ __le32 passive_scan;
+ struct brcmf_scan_results *results;
+
+ WL_SCAN("Enter\n");
+ cfg->escan_info.ndev = ndev;
+ cfg->escan_info.wiphy = wiphy;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
+ passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan));
+ if (err) {
+ WL_ERR("error (%d)\n", err);
+ return err;
+ }
+ brcmf_set_mpc(ndev, 0);
+ results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
+ results->version = 0;
+ results->count = 0;
+ results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
+
+ err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START);
+ if (err)
+ brcmf_set_mpc(ndev, 1);
+ return err;
+}
+
+static s32
+brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
+{
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+ struct cfg80211_ssid *ssids;
+ struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
+ __le32 passive_scan;
+ bool escan_req;
+ bool spec_scan;
+ s32 err;
+ u32 SSID_len;
+
+ WL_SCAN("START ESCAN\n");
+
+ if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+ return -EAGAIN;
+ }
+ if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
+ WL_ERR("Scanning being aborted : status (%lu)\n",
+ cfg->status);
+ return -EAGAIN;
+ }
+ if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
+ WL_ERR("Connecting : status (%lu)\n",
+ cfg->status);
+ return -EAGAIN;
+ }
+
+ /* Arm scan timeout timer */
+ mod_timer(&cfg->escan_timeout, jiffies +
+ WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
+
+ escan_req = false;
+ if (request) {
+ /* scan bss */
+ ssids = request->ssids;
+ escan_req = true;
+ } else {
+ /* scan in ibss */
+ /* we don't do escan in ibss */
+ ssids = this_ssid;
+ }
+
+ cfg->scan_request = request;
+ set_bit(WL_STATUS_SCANNING, &cfg->status);
+ if (escan_req) {
+ err = brcmf_do_escan(cfg, wiphy, ndev, request);
+ if (!err)
+ return err;
+ else
+ goto scan_out;
+ } else {
+ WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
+ ssids->ssid, ssids->ssid_len);
+ memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
+ SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
+ sr->ssid_le.SSID_len = cpu_to_le32(0);
+ spec_scan = false;
+ if (SSID_len) {
+ memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
+ sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
+ spec_scan = true;
+ } else
+ WL_SCAN("Broadcast scan\n");
+
+ passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan));
+ if (err) {
+ WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
+ goto scan_out;
+ }
+ brcmf_set_mpc(ndev, 0);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
+ sizeof(sr->ssid_le));
+ if (err) {
+ if (err == -EBUSY)
+ WL_INFO("BUSY: scan for \"%s\" canceled\n",
+ sr->ssid_le.SSID);
+ else
+ WL_ERR("WLC_SCAN error (%d)\n", err);
+
+ brcmf_set_mpc(ndev, 1);
+ goto scan_out;
+ }
+ }
+
+ return 0;
+
+scan_out:
+ clear_bit(WL_STATUS_SCANNING, &cfg->status);
+ if (timer_pending(&cfg->escan_timeout))
+ del_timer_sync(&cfg->escan_timeout);
+ cfg->scan_request = NULL;
return err;
}
@@ -697,6 +1186,7 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
struct net_device *ndev = request->wdev->netdev;
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
s32 err = 0;
WL_TRACE("Enter\n");
@@ -704,7 +1194,11 @@ brcmf_cfg80211_scan(struct wiphy *wiphy,
if (!check_sys_up(wiphy))
return -EIO;
- err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL);
+ if (cfg->iscan_on)
+ err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL);
+ else if (cfg->escan_on)
+ err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
+
if (err)
WL_ERR("scan error (%d)\n", err);
@@ -749,8 +1243,8 @@ static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg);
s32 err = 0;
WL_TRACE("Enter\n");
@@ -758,30 +1252,30 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
return -EIO;
if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
- (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) {
- cfg_priv->conf->rts_threshold = wiphy->rts_threshold;
- err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold);
+ (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+ cfg->conf->rts_threshold = wiphy->rts_threshold;
+ err = brcmf_set_rts(ndev, cfg->conf->rts_threshold);
if (!err)
goto done;
}
if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
- (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) {
- cfg_priv->conf->frag_threshold = wiphy->frag_threshold;
- err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold);
+ (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+ cfg->conf->frag_threshold = wiphy->frag_threshold;
+ err = brcmf_set_frag(ndev, cfg->conf->frag_threshold);
if (!err)
goto done;
}
if (changed & WIPHY_PARAM_RETRY_LONG
- && (cfg_priv->conf->retry_long != wiphy->retry_long)) {
- cfg_priv->conf->retry_long = wiphy->retry_long;
- err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true);
+ && (cfg->conf->retry_long != wiphy->retry_long)) {
+ cfg->conf->retry_long = wiphy->retry_long;
+ err = brcmf_set_retry(ndev, cfg->conf->retry_long, true);
if (!err)
goto done;
}
if (changed & WIPHY_PARAM_RETRY_SHORT
- && (cfg_priv->conf->retry_short != wiphy->retry_short)) {
- cfg_priv->conf->retry_short = wiphy->retry_short;
- err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false);
+ && (cfg->conf->retry_short != wiphy->retry_short)) {
+ cfg->conf->retry_short = wiphy->retry_short;
+ err = brcmf_set_retry(ndev, cfg->conf->retry_short, false);
if (!err)
goto done;
}
@@ -791,61 +1285,6 @@ done:
return err;
}
-static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item)
-{
- switch (item) {
- case WL_PROF_SEC:
- return &cfg_priv->profile->sec;
- case WL_PROF_BSSID:
- return &cfg_priv->profile->bssid;
- case WL_PROF_SSID:
- return &cfg_priv->profile->ssid;
- }
- WL_ERR("invalid item (%d)\n", item);
- return NULL;
-}
-
-static s32
-brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
- const struct brcmf_event_msg *e, void *data, s32 item)
-{
- s32 err = 0;
- struct brcmf_ssid *ssid;
-
- switch (item) {
- case WL_PROF_SSID:
- ssid = (struct brcmf_ssid *) data;
- memset(cfg_priv->profile->ssid.SSID, 0,
- sizeof(cfg_priv->profile->ssid.SSID));
- memcpy(cfg_priv->profile->ssid.SSID,
- ssid->SSID, ssid->SSID_len);
- cfg_priv->profile->ssid.SSID_len = ssid->SSID_len;
- break;
- case WL_PROF_BSSID:
- if (data)
- memcpy(cfg_priv->profile->bssid, data, ETH_ALEN);
- else
- memset(cfg_priv->profile->bssid, 0, ETH_ALEN);
- break;
- case WL_PROF_SEC:
- memcpy(&cfg_priv->profile->sec, data,
- sizeof(cfg_priv->profile->sec));
- break;
- case WL_PROF_BEACONINT:
- cfg_priv->profile->beacon_interval = *(u16 *)data;
- break;
- case WL_PROF_DTIMPERIOD:
- cfg_priv->profile->dtim_period = *(u8 *)data;
- break;
- default:
- WL_ERR("unsupported item (%d)\n", item);
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
{
memset(prof, 0, sizeof(*prof));
@@ -878,20 +1317,20 @@ static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
}
}
-static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_link_down(struct brcmf_cfg80211_info *cfg)
{
struct net_device *ndev = NULL;
s32 err = 0;
WL_TRACE("Enter\n");
- if (cfg_priv->link_up) {
- ndev = cfg_to_ndev(cfg_priv);
+ if (cfg->link_up) {
+ ndev = cfg_to_ndev(cfg);
WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0);
if (err)
WL_ERR("WLC_DISASSOC failed (%d)\n", err);
- cfg_priv->link_up = false;
+ cfg->link_up = false;
}
WL_TRACE("Exit\n");
}
@@ -900,13 +1339,13 @@ static s32
brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ibss_params *params)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_join_params join_params;
size_t join_params_size = 0;
s32 err = 0;
s32 wsec = 0;
s32 bcnprd;
- struct brcmf_ssid ssid;
WL_TRACE("Enter\n");
if (!check_sys_up(wiphy))
@@ -919,7 +1358,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
return -EOPNOTSUPP;
}
- set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ set_bit(WL_STATUS_CONNECTING, &cfg->status);
if (params->bssid)
WL_CONN("BSSID: %pM\n", params->bssid);
@@ -982,40 +1421,38 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
memset(&join_params, 0, sizeof(struct brcmf_join_params));
/* SSID */
- ssid.SSID_len = min_t(u32, params->ssid_len, 32);
- memcpy(ssid.SSID, params->ssid, ssid.SSID_len);
- memcpy(join_params.ssid_le.SSID, params->ssid, ssid.SSID_len);
- join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+ profile->ssid.SSID_len = min_t(u32, params->ssid_len, 32);
+ memcpy(profile->ssid.SSID, params->ssid, profile->ssid.SSID_len);
+ memcpy(join_params.ssid_le.SSID, params->ssid, profile->ssid.SSID_len);
+ join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
join_params_size = sizeof(join_params.ssid_le);
- brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
/* BSSID */
if (params->bssid) {
memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN);
join_params_size = sizeof(join_params.ssid_le) +
BRCMF_ASSOC_PARAMS_FIXED_SIZE;
+ memcpy(profile->bssid, params->bssid, ETH_ALEN);
} else {
memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
+ memset(profile->bssid, 0, ETH_ALEN);
}
- brcmf_update_prof(cfg_priv, NULL,
- &join_params.params_le.bssid, WL_PROF_BSSID);
-
/* Channel */
if (params->channel) {
u32 target_channel;
- cfg_priv->channel =
+ cfg->channel =
ieee80211_frequency_to_channel(
params->channel->center_freq);
if (params->channel_fixed) {
/* adding chanspec */
- brcmf_ch_to_chanspec(cfg_priv->channel,
+ brcmf_ch_to_chanspec(cfg->channel,
&join_params, &join_params_size);
}
/* set channel for starter */
- target_channel = cfg_priv->channel;
+ target_channel = cfg->channel;
err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL,
&target_channel);
if (err) {
@@ -1023,9 +1460,9 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
} else
- cfg_priv->channel = 0;
+ cfg->channel = 0;
- cfg_priv->ibss_starter = false;
+ cfg->ibss_starter = false;
err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
@@ -1037,7 +1474,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
done:
if (err)
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ clear_bit(WL_STATUS_CONNECTING, &cfg->status);
WL_TRACE("Exit\n");
return err;
}
@@ -1045,14 +1482,14 @@ done:
static s32
brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
s32 err = 0;
WL_TRACE("Enter\n");
if (!check_sys_up(wiphy))
return -EIO;
- brcmf_link_down(cfg_priv);
+ brcmf_link_down(cfg);
WL_TRACE("Exit\n");
@@ -1062,7 +1499,8 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
static s32 brcmf_set_wpa_version(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1079,7 +1517,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
WL_ERR("set wpa_auth failed (%d)\n", err);
return err;
}
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec = &profile->sec;
sec->wpa_versions = sme->crypto.wpa_versions;
return err;
}
@@ -1087,7 +1525,8 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
static s32 brcmf_set_auth_type(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1118,7 +1557,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
WL_ERR("set auth failed (%d)\n", err);
return err;
}
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec = &profile->sec;
sec->auth_type = sme->auth_type;
return err;
}
@@ -1127,7 +1566,8 @@ static s32
brcmf_set_set_cipher(struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_cfg80211_security *sec;
s32 pval = 0;
s32 gval = 0;
@@ -1183,7 +1623,7 @@ brcmf_set_set_cipher(struct net_device *ndev,
return err;
}
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec = &profile->sec;
sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
sec->cipher_group = sme->crypto.cipher_group;
@@ -1193,7 +1633,8 @@ brcmf_set_set_cipher(struct net_device *ndev,
static s32
brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1239,74 +1680,76 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
return err;
}
}
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec = &profile->sec;
sec->wpa_auth = sme->crypto.akm_suites[0];
return err;
}
static s32
-brcmf_set_wep_sharedkey(struct net_device *ndev,
- struct cfg80211_connect_params *sme)
+brcmf_set_sharedkey(struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_cfg80211_security *sec;
struct brcmf_wsec_key key;
s32 val;
s32 err = 0;
+ s32 bssidx;
WL_CONN("key len (%d)\n", sme->key_len);
if (sme->key_len == 0)
return 0;
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec = &profile->sec;
WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n",
sec->wpa_versions, sec->cipher_pairwise);
if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
return 0;
- if (sec->cipher_pairwise &
- (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)) {
- memset(&key, 0, sizeof(key));
- key.len = (u32) sme->key_len;
- key.index = (u32) sme->key_idx;
- if (key.len > sizeof(key.data)) {
- WL_ERR("Too long key length (%u)\n", key.len);
- return -EINVAL;
- }
- memcpy(key.data, sme->key, key.len);
- key.flags = BRCMF_PRIMARY_KEY;
- switch (sec->cipher_pairwise) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- break;
- default:
- WL_ERR("Invalid algorithm (%d)\n",
- sme->crypto.ciphers_pairwise[0]);
- return -EINVAL;
- }
- /* Set the new key/index */
- WL_CONN("key length (%d) key index (%d) algo (%d)\n",
- key.len, key.index, key.algo);
- WL_CONN("key \"%s\"\n", key.data);
- err = send_key_to_dongle(ndev, &key);
- if (err)
- return err;
+ if (!(sec->cipher_pairwise &
+ (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)))
+ return 0;
- if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
- WL_CONN("set auth_type to shared key\n");
- val = 1; /* shared key */
- err = brcmf_dev_intvar_set(ndev, "auth", val);
- if (err) {
- WL_ERR("set auth failed (%d)\n", err);
- return err;
- }
- }
+ memset(&key, 0, sizeof(key));
+ key.len = (u32) sme->key_len;
+ key.index = (u32) sme->key_idx;
+ if (key.len > sizeof(key.data)) {
+ WL_ERR("Too long key length (%u)\n", key.len);
+ return -EINVAL;
+ }
+ memcpy(key.data, sme->key, key.len);
+ key.flags = BRCMF_PRIMARY_KEY;
+ switch (sec->cipher_pairwise) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+ default:
+ WL_ERR("Invalid algorithm (%d)\n",
+ sme->crypto.ciphers_pairwise[0]);
+ return -EINVAL;
+ }
+ /* Set the new key/index */
+ WL_CONN("key length (%d) key index (%d) algo (%d)\n",
+ key.len, key.index, key.algo);
+ WL_CONN("key \"%s\"\n", key.data);
+ bssidx = brcmf_find_bssidx(cfg, ndev);
+ err = send_key_to_dongle(cfg, bssidx, ndev, &key);
+ if (err)
+ return err;
+
+ if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
+ WL_CONN("set auth_type to shared key\n");
+ val = WL_AUTH_SHARED_KEY; /* shared key */
+ err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", val, bssidx);
+ if (err)
+ WL_ERR("set auth failed (%d)\n", err);
}
return err;
}
@@ -1315,7 +1758,8 @@ static s32
brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
@@ -1332,15 +1776,15 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
return -EOPNOTSUPP;
}
- set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ set_bit(WL_STATUS_CONNECTING, &cfg->status);
if (chan) {
- cfg_priv->channel =
+ cfg->channel =
ieee80211_frequency_to_channel(chan->center_freq);
WL_CONN("channel (%d), center_req (%d)\n",
- cfg_priv->channel, chan->center_freq);
+ cfg->channel, chan->center_freq);
} else
- cfg_priv->channel = 0;
+ cfg->channel = 0;
WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
@@ -1368,20 +1812,20 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- err = brcmf_set_wep_sharedkey(ndev, sme);
+ err = brcmf_set_sharedkey(ndev, sme);
if (err) {
- WL_ERR("brcmf_set_wep_sharedkey failed (%d)\n", err);
+ WL_ERR("brcmf_set_sharedkey failed (%d)\n", err);
goto done;
}
memset(&join_params, 0, sizeof(join_params));
join_params_size = sizeof(join_params.ssid_le);
- ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), (u32)sme->ssid_len);
- memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid.SSID_len);
- memcpy(&ssid.SSID, sme->ssid, ssid.SSID_len);
- join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
- brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID);
+ profile->ssid.SSID_len = min_t(u32,
+ sizeof(ssid.SSID), (u32)sme->ssid_len);
+ memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
+ memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
+ join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN);
@@ -1389,7 +1833,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
WL_CONN("ssid \"%s\", len (%d)\n",
ssid.SSID, ssid.SSID_len);
- brcmf_ch_to_chanspec(cfg_priv->channel,
+ brcmf_ch_to_chanspec(cfg->channel,
&join_params, &join_params_size);
err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID,
&join_params, join_params_size);
@@ -1398,7 +1842,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
done:
if (err)
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ clear_bit(WL_STATUS_CONNECTING, &cfg->status);
WL_TRACE("Exit\n");
return err;
}
@@ -1407,7 +1851,8 @@ static s32
brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
u16 reason_code)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_scb_val_le scbval;
s32 err = 0;
@@ -1415,16 +1860,16 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
if (!check_sys_up(wiphy))
return -EIO;
- clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+ clear_bit(WL_STATUS_CONNECTED, &cfg->status);
- memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN);
+ memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
scbval.val = cpu_to_le32(reason_code);
err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval,
sizeof(struct brcmf_scb_val_le));
if (err)
WL_ERR("error (%d)\n", err);
- cfg_priv->link_up = false;
+ cfg->link_up = false;
WL_TRACE("Exit\n");
return err;
@@ -1435,8 +1880,8 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type, s32 mbm)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg);
u16 txpwrmw;
s32 err = 0;
s32 disable = 0;
@@ -1472,7 +1917,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
(s32) (brcmf_mw_to_qdbm(txpwrmw)));
if (err)
WL_ERR("qtxpower error (%d)\n", err);
- cfg_priv->conf->tx_power = dbm;
+ cfg->conf->tx_power = dbm;
done:
WL_TRACE("Exit\n");
@@ -1481,8 +1926,8 @@ done:
static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg);
s32 txpwrdbm;
u8 result;
s32 err = 0;
@@ -1509,16 +1954,19 @@ static s32
brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool unicast, bool multicast)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
u32 index;
u32 wsec;
s32 err = 0;
+ s32 bssidx;
WL_TRACE("Enter\n");
WL_CONN("key index (%d)\n", key_idx);
if (!check_sys_up(wiphy))
return -EIO;
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec);
+ bssidx = brcmf_find_bssidx(cfg, ndev);
+ err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
if (err) {
WL_ERR("WLC_GET_WSEC error (%d)\n", err);
goto done;
@@ -1541,9 +1989,11 @@ static s32
brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, const u8 *mac_addr, struct key_params *params)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_wsec_key key;
struct brcmf_wsec_key_le key_le;
s32 err = 0;
+ s32 bssidx;
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
@@ -1552,12 +2002,13 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
if (!is_multicast_ether_addr(mac_addr))
memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
key.len = (u32) params->key_len;
+ bssidx = brcmf_find_bssidx(cfg, ndev);
/* check for key index change */
if (key.len == 0) {
/* key delete */
- err = send_key_to_dongle(ndev, &key);
+ err = send_key_to_dongle(cfg, bssidx, ndev, &key);
if (err)
- return err;
+ WL_ERR("key delete error (%d)\n", err);
} else {
if (key.len > sizeof(key.data)) {
WL_ERR("Invalid key length (%d)\n", key.len);
@@ -1613,12 +2064,12 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
convert_key_from_CPU(&key, &key_le);
brcmf_netdev_wait_pend8021x(ndev);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le,
- sizeof(key_le));
- if (err) {
- WL_ERR("WLC_SET_KEY error (%d)\n", err);
- return err;
- }
+ err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
+ sizeof(key_le),
+ cfg->extra_buf,
+ WL_EXTRA_BUF_MAX, bssidx);
+ if (err)
+ WL_ERR("wsec_key error (%d)\n", err);
}
return err;
}
@@ -1628,11 +2079,13 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
struct key_params *params)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_wsec_key key;
s32 val;
s32 wsec;
s32 err = 0;
u8 keybuf[8];
+ s32 bssidx;
WL_TRACE("Enter\n");
WL_CONN("key index (%d)\n", key_idx);
@@ -1659,25 +2112,33 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key.algo = CRYPTO_ALGO_WEP1;
+ val = WEP_ENABLED;
WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
key.algo = CRYPTO_ALGO_WEP128;
+ val = WEP_ENABLED;
WL_CONN("WLAN_CIPHER_SUITE_WEP104\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
- memcpy(keybuf, &key.data[24], sizeof(keybuf));
- memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
- memcpy(&key.data[16], keybuf, sizeof(keybuf));
+ if (cfg->conf->mode != WL_MODE_AP) {
+ WL_CONN("Swapping key\n");
+ memcpy(keybuf, &key.data[24], sizeof(keybuf));
+ memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
+ memcpy(&key.data[16], keybuf, sizeof(keybuf));
+ }
key.algo = CRYPTO_ALGO_TKIP;
+ val = TKIP_ENABLED;
WL_CONN("WLAN_CIPHER_SUITE_TKIP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
WL_CONN("WLAN_CIPHER_SUITE_CCMP\n");
break;
default:
@@ -1686,28 +2147,23 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- err = send_key_to_dongle(ndev, &key); /* Set the new key/index */
+ bssidx = brcmf_find_bssidx(cfg, ndev);
+ err = send_key_to_dongle(cfg, bssidx, ndev, &key);
if (err)
goto done;
- val = WEP_ENABLED;
- err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
+ err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
if (err) {
WL_ERR("get wsec error (%d)\n", err);
goto done;
}
- wsec &= ~(WEP_ENABLED);
wsec |= val;
- err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
+ err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
if (err) {
WL_ERR("set wsec error (%d)\n", err);
goto done;
}
- val = 1; /* assume shared key. otherwise 0 */
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
- if (err)
- WL_ERR("WLC_SET_AUTH error (%d)\n", err);
done:
WL_TRACE("Exit\n");
return err;
@@ -1717,10 +2173,10 @@ static s32
brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool pairwise, const u8 *mac_addr)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_wsec_key key;
s32 err = 0;
- s32 val;
- s32 wsec;
+ s32 bssidx;
WL_TRACE("Enter\n");
if (!check_sys_up(wiphy))
@@ -1735,7 +2191,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
WL_CONN("key index (%d)\n", key_idx);
/* Set the new key/index */
- err = send_key_to_dongle(ndev, &key);
+ bssidx = brcmf_find_bssidx(cfg, ndev);
+ err = send_key_to_dongle(cfg, bssidx, ndev, &key);
if (err) {
if (err == -EINVAL) {
if (key.index >= DOT11_MAX_DEFAULT_KEYS)
@@ -1744,35 +2201,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
}
/* Ignore this error, may happen during DISASSOC */
err = -EAGAIN;
- goto done;
}
- val = 0;
- err = brcmf_dev_intvar_get(ndev, "wsec", &wsec);
- if (err) {
- WL_ERR("get wsec error (%d)\n", err);
- /* Ignore this error, may happen during DISASSOC */
- err = -EAGAIN;
- goto done;
- }
- wsec &= ~(WEP_ENABLED);
- wsec |= val;
- err = brcmf_dev_intvar_set(ndev, "wsec", wsec);
- if (err) {
- WL_ERR("set wsec error (%d)\n", err);
- /* Ignore this error, may happen during DISASSOC */
- err = -EAGAIN;
- goto done;
- }
-
- val = 0; /* assume open key. otherwise 1 */
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val);
- if (err) {
- WL_ERR("WLC_SET_AUTH error (%d)\n", err);
- /* Ignore this error, may happen during DISASSOC */
- err = -EAGAIN;
- }
-done:
WL_TRACE("Exit\n");
return err;
}
@@ -1783,10 +2213,12 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
void (*callback) (void *cookie, struct key_params * params))
{
struct key_params params;
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_cfg80211_security *sec;
s32 wsec;
s32 err = 0;
+ s32 bssidx;
WL_TRACE("Enter\n");
WL_CONN("key index (%d)\n", key_idx);
@@ -1795,16 +2227,17 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
memset(&params, 0, sizeof(params));
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec);
+ bssidx = brcmf_find_bssidx(cfg, ndev);
+ err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
if (err) {
WL_ERR("WLC_GET_WSEC error (%d)\n", err);
/* Ignore this error, may happen during DISASSOC */
err = -EAGAIN;
goto done;
}
- switch (wsec) {
+ switch (wsec & ~SES_OW_ENABLED) {
case WEP_ENABLED:
- sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
+ sec = &profile->sec;
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
params.cipher = WLAN_CIPHER_SUITE_WEP40;
WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
@@ -1844,53 +2277,73 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
static s32
brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
+ u8 *mac, struct station_info *sinfo)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_scb_val_le scb_val;
int rssi;
s32 rate;
s32 err = 0;
- u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID);
+ u8 *bssid = profile->bssid;
+ struct brcmf_sta_info_le *sta_info_le;
- WL_TRACE("Enter\n");
+ WL_TRACE("Enter, MAC %pM\n", mac);
if (!check_sys_up(wiphy))
return -EIO;
- if (memcmp(mac, bssid, ETH_ALEN)) {
- WL_ERR("Wrong Mac address cfg_mac-%X:%X:%X:%X:%X:%X"
- "wl_bssid-%X:%X:%X:%X:%X:%X\n",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
- bssid[0], bssid[1], bssid[2], bssid[3],
- bssid[4], bssid[5]);
- err = -ENOENT;
- goto done;
- }
-
- /* Report the current tx rate */
- err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
- if (err) {
- WL_ERR("Could not get rate (%d)\n", err);
- } else {
- sinfo->filled |= STATION_INFO_TX_BITRATE;
- sinfo->txrate.legacy = rate * 5;
- WL_CONN("Rate %d Mbps\n", rate / 2);
- }
+ if (cfg->conf->mode == WL_MODE_AP) {
+ err = brcmf_dev_iovar_getbuf(ndev, "sta_info", mac, ETH_ALEN,
+ cfg->dcmd_buf,
+ WL_DCMD_LEN_MAX);
+ if (err < 0) {
+ WL_ERR("GET STA INFO failed, %d\n", err);
+ goto done;
+ }
+ sta_info_le = (struct brcmf_sta_info_le *)cfg->dcmd_buf;
- if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) {
- memset(&scb_val, 0, sizeof(scb_val));
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
- sizeof(struct brcmf_scb_val_le));
+ sinfo->filled = STATION_INFO_INACTIVE_TIME;
+ sinfo->inactive_time = le32_to_cpu(sta_info_le->idle) * 1000;
+ if (le32_to_cpu(sta_info_le->flags) & BRCMF_STA_ASSOC) {
+ sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+ sinfo->connected_time = le32_to_cpu(sta_info_le->in);
+ }
+ WL_TRACE("STA idle time : %d ms, connected time :%d sec\n",
+ sinfo->inactive_time, sinfo->connected_time);
+ } else if (cfg->conf->mode == WL_MODE_BSS) {
+ if (memcmp(mac, bssid, ETH_ALEN)) {
+ WL_ERR("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
+ mac, bssid);
+ err = -ENOENT;
+ goto done;
+ }
+ /* Report the current tx rate */
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate);
if (err) {
- WL_ERR("Could not get rssi (%d)\n", err);
+ WL_ERR("Could not get rate (%d)\n", err);
+ goto done;
} else {
- rssi = le32_to_cpu(scb_val.val);
- sinfo->filled |= STATION_INFO_SIGNAL;
- sinfo->signal = rssi;
- WL_CONN("RSSI %d dBm\n", rssi);
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.legacy = rate * 5;
+ WL_CONN("Rate %d Mbps\n", rate / 2);
}
- }
+ if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) {
+ memset(&scb_val, 0, sizeof(scb_val));
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val,
+ sizeof(scb_val));
+ if (err) {
+ WL_ERR("Could not get rssi (%d)\n", err);
+ goto done;
+ } else {
+ rssi = le32_to_cpu(scb_val.val);
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = rssi;
+ WL_CONN("RSSI %d dBm\n", rssi);
+ }
+ }
+ } else
+ err = -EPERM;
done:
WL_TRACE("Exit\n");
return err;
@@ -1902,7 +2355,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
{
s32 pm;
s32 err = 0;
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
WL_TRACE("Enter\n");
@@ -1910,14 +2363,13 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
* Powersave enable/disable request is coming from the
* cfg80211 even before the interface is up. In that
* scenario, driver will be storing the power save
- * preference in cfg_priv struct to apply this to
+ * preference in cfg struct to apply this to
* FW later while initializing the dongle
*/
- cfg_priv->pwr_save = enabled;
- if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ cfg->pwr_save = enabled;
+ if (!test_bit(WL_STATUS_READY, &cfg->status)) {
- WL_INFO("Device is not ready,"
- "storing the value in cfg_priv struct\n");
+ WL_INFO("Device is not ready, storing the value in cfg_info struct\n");
goto done;
}
@@ -1995,10 +2447,10 @@ done:
return err;
}
-static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
+static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
struct brcmf_bss_info_le *bi)
{
- struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg);
struct ieee80211_channel *notify_channel;
struct cfg80211_bss *bss;
struct ieee80211_supported_band *band;
@@ -2062,14 +2514,14 @@ next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
le32_to_cpu(bss->length));
}
-static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_scan_results *bss_list;
struct brcmf_bss_info_le *bi = NULL; /* must be initialized */
s32 err = 0;
int i;
- bss_list = cfg_priv->bss_list;
+ bss_list = cfg->bss_list;
if (bss_list->version != BRCMF_BSS_INFO_VERSION) {
WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
bss_list->version);
@@ -2078,17 +2530,17 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
WL_SCAN("scanned AP count (%d)\n", bss_list->count);
for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
bi = next_bss_le(bss_list, bi);
- err = brcmf_inform_single_bss(cfg_priv, bi);
+ err = brcmf_inform_single_bss(cfg, bi);
if (err)
break;
}
return err;
}
-static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
+static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev, const u8 *bssid)
{
- struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg);
struct ieee80211_channel *notify_channel;
struct brcmf_bss_info_le *bi = NULL;
struct ieee80211_supported_band *band;
@@ -2163,9 +2615,9 @@ CleanUp:
return err;
}
-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv)
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_info *cfg)
{
- return cfg_priv->conf->mode == WL_MODE_IBSS;
+ return cfg->conf->mode == WL_MODE_IBSS;
}
/*
@@ -2182,22 +2634,62 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
totlen = buflen;
/* find tagged parameter */
- while (totlen >= 2) {
+ while (totlen >= TLV_HDR_LEN) {
int len = elt->len;
/* validate remaining totlen */
- if ((elt->id == key) && (totlen >= (len + 2)))
+ if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
return elt;
- elt = (struct brcmf_tlv *) ((u8 *) elt + (len + 2));
- totlen -= (len + 2);
+ elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
}
return NULL;
}
-static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
+ u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return true;
+ }
+
+ if (tlvs == NULL)
+ return false;
+ /* point to the next ie */
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+
+ return false;
+}
+
+struct brcmf_vs_tlv *
+brcmf_find_wpaie(u8 *parse, u32 len)
+{
+ struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_WPA))) {
+ if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+ return (struct brcmf_vs_tlv *)ie;
+ }
+ return NULL;
+}
+
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
{
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
@@ -2208,21 +2700,21 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
s32 err = 0;
WL_TRACE("Enter\n");
- if (brcmf_is_ibssmode(cfg_priv))
+ if (brcmf_is_ibssmode(cfg))
return err;
- ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID);
+ ssid = &profile->ssid;
- *(__le32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
- err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO,
- cfg_priv->extra_buf, WL_EXTRA_BUF_MAX);
+ *(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
+ err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_GET_BSS_INFO,
+ cfg->extra_buf, WL_EXTRA_BUF_MAX);
if (err) {
WL_ERR("Could not get bss info %d\n", err);
goto update_bss_info_out;
}
- bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4);
- err = brcmf_inform_single_bss(cfg_priv, bi);
+ bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
+ err = brcmf_inform_single_bss(cfg, bi);
if (err)
goto update_bss_info_out;
@@ -2240,7 +2732,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
* so we speficially query dtim information to dongle.
*/
u32 var;
- err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv),
+ err = brcmf_dev_intvar_get(cfg_to_ndev(cfg),
"dtim_assoc", &var);
if (err) {
WL_ERR("wl dtim_assoc failed (%d)\n", err);
@@ -2249,20 +2741,22 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
dtim_period = (u8)var;
}
- brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT);
- brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+ profile->beacon_interval = beacon_interval;
+ profile->dtim_period = dtim_period;
update_bss_info_out:
WL_TRACE("Exit");
return err;
}
-static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
+ struct escan_info *escan = &cfg->escan_info;
struct brcmf_ssid ssid;
- if (cfg_priv->iscan_on) {
+ set_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
+ if (cfg->iscan_on) {
iscan->state = WL_ISCAN_STATE_IDLE;
if (iscan->timer_on) {
@@ -2275,27 +2769,40 @@ static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
/* Abort iscan running in FW */
memset(&ssid, 0, sizeof(ssid));
brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT);
+
+ if (cfg->scan_request) {
+ /* Indidate scan abort to cfg80211 layer */
+ WL_INFO("Terminating scan in progress\n");
+ cfg80211_scan_done(cfg->scan_request, true);
+ cfg->scan_request = NULL;
+ }
+ }
+ if (cfg->escan_on && cfg->scan_request) {
+ escan->escan_state = WL_ESCAN_STATE_IDLE;
+ brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
}
+ clear_bit(WL_STATUS_SCANNING, &cfg->status);
+ clear_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
}
static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
bool aborted)
{
- struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
+ struct net_device *ndev = cfg_to_ndev(cfg);
- if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
+ if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
WL_ERR("Scan complete while device not scanning\n");
return;
}
- if (cfg_priv->scan_request) {
+ if (cfg->scan_request) {
WL_SCAN("ISCAN Completed scan: %s\n",
aborted ? "Aborted" : "Done");
- cfg80211_scan_done(cfg_priv->scan_request, aborted);
+ cfg80211_scan_done(cfg->scan_request, aborted);
brcmf_set_mpc(ndev, 1);
- cfg_priv->scan_request = NULL;
+ cfg->scan_request = NULL;
}
- cfg_priv->iscan_kickstart = false;
+ cfg->iscan_kickstart = false;
}
static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
@@ -2348,21 +2855,21 @@ brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
return err;
}
-static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_done(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
s32 err = 0;
iscan->state = WL_ISCAN_STATE_IDLE;
- brcmf_inform_bss(cfg_priv);
+ brcmf_inform_bss(cfg);
brcmf_notify_iscan_complete(iscan, false);
return err;
}
-static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_pending(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
s32 err = 0;
/* Reschedule the timer */
@@ -2372,12 +2879,12 @@ static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
return err;
}
-static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
s32 err = 0;
- brcmf_inform_bss(cfg_priv);
+ brcmf_inform_bss(cfg);
brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
/* Reschedule the timer */
mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -2386,9 +2893,9 @@ static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
return err;
}
-static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
s32 err = 0;
iscan->state = WL_ISCAN_STATE_IDLE;
@@ -2402,7 +2909,7 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
struct brcmf_cfg80211_iscan_ctrl *iscan =
container_of(work, struct brcmf_cfg80211_iscan_ctrl,
work);
- struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
+ struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
u32 status = BRCMF_SCAN_RESULTS_PARTIAL;
@@ -2411,12 +2918,12 @@ static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
iscan->timer_on = 0;
}
- if (brcmf_get_iscan_results(iscan, &status, &cfg_priv->bss_list)) {
+ if (brcmf_get_iscan_results(iscan, &status, &cfg->bss_list)) {
status = BRCMF_SCAN_RESULTS_ABORTED;
WL_ERR("Abort iscan\n");
}
- el->handler[status](cfg_priv);
+ el->handler[status](cfg);
}
static void brcmf_iscan_timer(unsigned long data)
@@ -2431,11 +2938,11 @@ static void brcmf_iscan_timer(unsigned long data)
}
}
-static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
- if (cfg_priv->iscan_on) {
+ if (cfg->iscan_on) {
iscan->state = WL_ISCAN_STATE_IDLE;
INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler);
}
@@ -2453,26 +2960,192 @@ static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
}
-static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_init_iscan(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
int err = 0;
- if (cfg_priv->iscan_on) {
- iscan->ndev = cfg_to_ndev(cfg_priv);
+ if (cfg->iscan_on) {
+ iscan->ndev = cfg_to_ndev(cfg);
brcmf_init_iscan_eloop(&iscan->el);
iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
init_timer(&iscan->timer);
iscan->timer.data = (unsigned long) iscan;
iscan->timer.function = brcmf_iscan_timer;
- err = brcmf_invoke_iscan(cfg_priv);
+ err = brcmf_invoke_iscan(cfg);
if (!err)
- iscan->data = cfg_priv;
+ iscan->data = cfg;
}
return err;
}
+static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
+{
+ struct brcmf_cfg80211_info *cfg =
+ container_of(work, struct brcmf_cfg80211_info,
+ escan_timeout_work);
+
+ brcmf_notify_escan_complete(cfg,
+ cfg->escan_info.ndev, true, true);
+}
+
+static void brcmf_escan_timeout(unsigned long data)
+{
+ struct brcmf_cfg80211_info *cfg =
+ (struct brcmf_cfg80211_info *)data;
+
+ if (cfg->scan_request) {
+ WL_ERR("timer expired\n");
+ if (cfg->escan_on)
+ schedule_work(&cfg->escan_timeout_work);
+ }
+}
+
+static s32
+brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss,
+ struct brcmf_bss_info_le *bss_info_le)
+{
+ if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) &&
+ (CHSPEC_BAND(le16_to_cpu(bss_info_le->chanspec)) ==
+ CHSPEC_BAND(le16_to_cpu(bss->chanspec))) &&
+ bss_info_le->SSID_len == bss->SSID_len &&
+ !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) {
+ if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) ==
+ (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL)) {
+ s16 bss_rssi = le16_to_cpu(bss->RSSI);
+ s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI);
+
+ /* preserve max RSSI if the measurements are
+ * both on-channel or both off-channel
+ */
+ if (bss_info_rssi > bss_rssi)
+ bss->RSSI = bss_info_le->RSSI;
+ } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) &&
+ (bss_info_le->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) {
+ /* preserve the on-channel rssi measurement
+ * if the new measurement is off channel
+ */
+ bss->RSSI = bss_info_le->RSSI;
+ bss->flags |= WLC_BSS_RSSI_ON_CHANNEL;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static s32
+brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
+{
+ s32 status;
+ s32 err = 0;
+ struct brcmf_escan_result_le *escan_result_le;
+ struct brcmf_bss_info_le *bss_info_le;
+ struct brcmf_bss_info_le *bss = NULL;
+ u32 bi_length;
+ struct brcmf_scan_results *list;
+ u32 i;
+ bool aborted;
+
+ status = be32_to_cpu(e->status);
+
+ if (!ndev || !cfg->escan_on ||
+ !test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n",
+ ndev, cfg->escan_on,
+ !test_bit(WL_STATUS_SCANNING, &cfg->status));
+ return -EPERM;
+ }
+
+ if (status == BRCMF_E_STATUS_PARTIAL) {
+ WL_SCAN("ESCAN Partial result\n");
+ escan_result_le = (struct brcmf_escan_result_le *) data;
+ if (!escan_result_le) {
+ WL_ERR("Invalid escan result (NULL pointer)\n");
+ goto exit;
+ }
+ if (!cfg->scan_request) {
+ WL_SCAN("result without cfg80211 request\n");
+ goto exit;
+ }
+
+ if (le16_to_cpu(escan_result_le->bss_count) != 1) {
+ WL_ERR("Invalid bss_count %d: ignoring\n",
+ escan_result_le->bss_count);
+ goto exit;
+ }
+ bss_info_le = &escan_result_le->bss_info_le;
+
+ bi_length = le32_to_cpu(bss_info_le->length);
+ if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
+ WL_ESCAN_RESULTS_FIXED_SIZE)) {
+ WL_ERR("Invalid bss_info length %d: ignoring\n",
+ bi_length);
+ goto exit;
+ }
+
+ if (!(cfg_to_wiphy(cfg)->interface_modes &
+ BIT(NL80211_IFTYPE_ADHOC))) {
+ if (le16_to_cpu(bss_info_le->capability) &
+ WLAN_CAPABILITY_IBSS) {
+ WL_ERR("Ignoring IBSS result\n");
+ goto exit;
+ }
+ }
+
+ list = (struct brcmf_scan_results *)
+ cfg->escan_info.escan_buf;
+ if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) {
+ WL_ERR("Buffer is too small: ignoring\n");
+ goto exit;
+ }
+
+ for (i = 0; i < list->count; i++) {
+ bss = bss ? (struct brcmf_bss_info_le *)
+ ((unsigned char *)bss +
+ le32_to_cpu(bss->length)) : list->bss_info_le;
+ if (brcmf_compare_update_same_bss(bss, bss_info_le))
+ goto exit;
+ }
+ memcpy(&(cfg->escan_info.escan_buf[list->buflen]),
+ bss_info_le, bi_length);
+ list->version = le32_to_cpu(bss_info_le->version);
+ list->buflen += bi_length;
+ list->count++;
+ } else {
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (cfg->scan_request) {
+ cfg->bss_list = (struct brcmf_scan_results *)
+ cfg->escan_info.escan_buf;
+ brcmf_inform_bss(cfg);
+ aborted = status != BRCMF_E_STATUS_SUCCESS;
+ brcmf_notify_escan_complete(cfg, ndev, aborted,
+ false);
+ } else
+ WL_ERR("Unexpected scan result 0x%x\n", status);
+ }
+exit:
+ return err;
+}
+
+static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
+{
+
+ if (cfg->escan_on) {
+ cfg->el.handler[BRCMF_E_ESCAN_RESULT] =
+ brcmf_cfg80211_escan_handler;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ /* Init scan_timeout timer */
+ init_timer(&cfg->escan_timeout);
+ cfg->escan_timeout.data = (unsigned long) cfg;
+ cfg->escan_timeout.function = brcmf_escan_timeout;
+ INIT_WORK(&cfg->escan_timeout_work,
+ brcmf_cfg80211_escan_timeout_worker);
+ }
+}
+
static __always_inline void brcmf_delay(u32 ms)
{
if (ms < 1000 / HZ) {
@@ -2485,7 +3158,7 @@ static __always_inline void brcmf_delay(u32 ms)
static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
/*
* Check for WL_STATUS_READY before any function call which
@@ -2494,7 +3167,7 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
*/
WL_TRACE("Enter\n");
- if (test_bit(WL_STATUS_READY, &cfg_priv->status))
+ if (test_bit(WL_STATUS_READY, &cfg->status))
brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
WL_TRACE("Exit\n");
@@ -2504,8 +3177,8 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wow)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg);
WL_TRACE("Enter\n");
@@ -2519,12 +3192,12 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
* While going to suspend if associated with AP disassociate
* from AP to save power while system is in suspended state
*/
- if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
- test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
- test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
+ test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
+ test_bit(WL_STATUS_READY, &cfg->status)) {
WL_INFO("Disassociating from AP"
" while entering suspend state\n");
- brcmf_link_down(cfg_priv);
+ brcmf_link_down(cfg);
/*
* Make sure WPA_Supplicant receives all the event
@@ -2534,24 +3207,14 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
brcmf_delay(500);
}
- set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
- if (test_bit(WL_STATUS_READY, &cfg_priv->status))
- brcmf_term_iscan(cfg_priv);
-
- if (cfg_priv->scan_request) {
- /* Indidate scan abort to cfg80211 layer */
- WL_INFO("Terminating scan in progress\n");
- cfg80211_scan_done(cfg_priv->scan_request, true);
- cfg_priv->scan_request = NULL;
- }
- clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
- clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+ if (test_bit(WL_STATUS_READY, &cfg->status))
+ brcmf_abort_scanning(cfg);
+ else
+ clear_bit(WL_STATUS_SCANNING, &cfg->status);
/* Turn off watchdog timer */
- if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
- WL_INFO("Enable MPC\n");
+ if (test_bit(WL_STATUS_READY, &cfg->status))
brcmf_set_mpc(ndev, 1);
- }
WL_TRACE("Exit\n");
@@ -2561,14 +3224,14 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
static __used s32
brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
u32 buflen;
- buflen = brcmf_c_mkiovar(name, buf, len, cfg_priv->dcmd_buf,
+ buflen = brcmf_c_mkiovar(name, buf, len, cfg->dcmd_buf,
WL_DCMD_LEN_MAX);
BUG_ON(!buflen);
- return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg_priv->dcmd_buf,
+ return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg->dcmd_buf,
buflen);
}
@@ -2576,20 +3239,20 @@ static s32
brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
s32 buf_len)
{
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
u32 len;
s32 err = 0;
- len = brcmf_c_mkiovar(name, NULL, 0, cfg_priv->dcmd_buf,
+ len = brcmf_c_mkiovar(name, NULL, 0, cfg->dcmd_buf,
WL_DCMD_LEN_MAX);
BUG_ON(!len);
- err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg_priv->dcmd_buf,
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg->dcmd_buf,
WL_DCMD_LEN_MAX);
if (err) {
WL_ERR("error (%d)\n", err);
return err;
}
- memcpy(buf, cfg_priv->dcmd_buf, buf_len);
+ memcpy(buf, cfg->dcmd_buf, buf_len);
return err;
}
@@ -2622,8 +3285,8 @@ static s32
brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_pmksa *pmksa)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
- struct pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids;
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
s32 err = 0;
int i;
int pmkid_len;
@@ -2651,7 +3314,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
for (i = 0; i < WLAN_PMKID_LEN; i++)
WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
- err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+ err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
WL_TRACE("Exit\n");
return err;
@@ -2661,7 +3324,7 @@ static s32
brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_pmksa *pmksa)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct pmkid_list pmkid;
s32 err = 0;
int i, pmkid_len;
@@ -2678,30 +3341,30 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
for (i = 0; i < WLAN_PMKID_LEN; i++)
WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
- pmkid_len = le32_to_cpu(cfg_priv->pmk_list->pmkids.npmkid);
+ pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid);
for (i = 0; i < pmkid_len; i++)
if (!memcmp
- (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
+ (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
ETH_ALEN))
break;
if ((pmkid_len > 0)
&& (i < pmkid_len)) {
- memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0,
+ memset(&cfg->pmk_list->pmkids.pmkid[i], 0,
sizeof(struct pmkid));
for (; i < (pmkid_len - 1); i++) {
- memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
- &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID,
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+ &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
ETH_ALEN);
- memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID,
- &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID,
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+ &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
WLAN_PMKID_LEN);
}
- cfg_priv->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
+ cfg->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1);
} else
err = -EINVAL;
- err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+ err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
WL_TRACE("Exit\n");
return err;
@@ -2711,21 +3374,979 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
{
- struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
s32 err = 0;
WL_TRACE("Enter\n");
if (!check_sys_up(wiphy))
return -EIO;
- memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list));
- err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err);
+ memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+ err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
WL_TRACE("Exit\n");
return err;
}
+/*
+ * PFN result doesn't have all the info which are
+ * required by the supplicant
+ * (For e.g IEs) Do a target Escan so that sched scan results are reported
+ * via wl_inform_single_bss in the required format. Escan does require the
+ * scan request in the form of cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+static s32
+brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
+ struct cfg80211_scan_request *request = NULL;
+ struct cfg80211_ssid *ssid = NULL;
+ struct ieee80211_channel *channel = NULL;
+ struct wiphy *wiphy = cfg_to_wiphy(cfg);
+ int err = 0;
+ int channel_req = 0;
+ int band = 0;
+ struct brcmf_pno_scanresults_le *pfn_result;
+ u32 result_count;
+ u32 status;
+
+ WL_SCAN("Enter\n");
+
+ if (e->event_type == cpu_to_be32(BRCMF_E_PFN_NET_LOST)) {
+ WL_SCAN("PFN NET LOST event. Do Nothing\n");
+ return 0;
+ }
+
+ pfn_result = (struct brcmf_pno_scanresults_le *)data;
+ result_count = le32_to_cpu(pfn_result->count);
+ status = le32_to_cpu(pfn_result->status);
+
+ /*
+ * PFN event is limited to fit 512 bytes so we may get
+ * multiple NET_FOUND events. For now place a warning here.
+ */
+ WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
+ WL_SCAN("PFN NET FOUND event. count: %d\n", result_count);
+ if (result_count > 0) {
+ int i;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ ssid = kcalloc(result_count, sizeof(*ssid), GFP_KERNEL);
+ channel = kcalloc(result_count, sizeof(*channel), GFP_KERNEL);
+ if (!request || !ssid || !channel) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ request->wiphy = wiphy;
+ data += sizeof(struct brcmf_pno_scanresults_le);
+ netinfo_start = (struct brcmf_pno_net_info_le *)data;
+
+ for (i = 0; i < result_count; i++) {
+ netinfo = &netinfo_start[i];
+ if (!netinfo) {
+ WL_ERR("Invalid netinfo ptr. index: %d\n", i);
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ WL_SCAN("SSID:%s Channel:%d\n",
+ netinfo->SSID, netinfo->channel);
+ memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
+ ssid[i].ssid_len = netinfo->SSID_len;
+ request->n_ssids++;
+
+ channel_req = netinfo->channel;
+ if (channel_req <= CH_MAX_2G_CHANNEL)
+ band = NL80211_BAND_2GHZ;
+ else
+ band = NL80211_BAND_5GHZ;
+ channel[i].center_freq =
+ ieee80211_channel_to_frequency(channel_req,
+ band);
+ channel[i].band = band;
+ channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+ request->channels[i] = &channel[i];
+ request->n_channels++;
+ }
+
+ /* assign parsed ssid array */
+ if (request->n_ssids)
+ request->ssids = &ssid[0];
+
+ if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ /* Abort any on-going scan */
+ brcmf_abort_scanning(cfg);
+ }
+
+ set_bit(WL_STATUS_SCANNING, &cfg->status);
+ err = brcmf_do_escan(cfg, wiphy, ndev, request);
+ if (err) {
+ clear_bit(WL_STATUS_SCANNING, &cfg->status);
+ goto out_err;
+ }
+ cfg->sched_escan = true;
+ cfg->scan_request = request;
+ } else {
+ WL_ERR("FALSE PNO Event. (pfn_count == 0)\n");
+ goto out_err;
+ }
+
+ kfree(ssid);
+ kfree(channel);
+ kfree(request);
+ return 0;
+
+out_err:
+ kfree(ssid);
+ kfree(channel);
+ kfree(request);
+ cfg80211_sched_scan_stopped(wiphy);
+ return err;
+}
+
+#ifndef CONFIG_BRCMISCAN
+static int brcmf_dev_pno_clean(struct net_device *ndev)
+{
+ char iovbuf[128];
+ int ret;
+
+ /* Disable pfn */
+ ret = brcmf_dev_intvar_set(ndev, "pfn", 0);
+ if (ret == 0) {
+ /* clear pfn */
+ ret = brcmf_dev_iovar_setbuf(ndev, "pfnclear", NULL, 0,
+ iovbuf, sizeof(iovbuf));
+ }
+ if (ret < 0)
+ WL_ERR("failed code %d\n", ret);
+
+ return ret;
+}
+
+static int brcmf_dev_pno_config(struct net_device *ndev)
+{
+ struct brcmf_pno_param_le pfn_param;
+ char iovbuf[128];
+
+ memset(&pfn_param, 0, sizeof(pfn_param));
+ pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
+
+ /* set extra pno params */
+ pfn_param.flags = cpu_to_le16(1 << BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
+ pfn_param.repeat = BRCMF_PNO_REPEAT;
+ pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
+
+ /* set up pno scan fr */
+ pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
+
+ return brcmf_dev_iovar_setbuf(ndev, "pfn_set",
+ &pfn_param, sizeof(pfn_param),
+ iovbuf, sizeof(iovbuf));
+}
+
+static int
+brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_sched_scan_request *request)
+{
+ char iovbuf[128];
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct brcmf_pno_net_param_le pfn;
+ int i;
+ int ret = 0;
+
+ WL_SCAN("Enter n_match_sets:%d n_ssids:%d\n",
+ request->n_match_sets, request->n_ssids);
+ if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
+ WL_ERR("Scanning already : status (%lu)\n", cfg->status);
+ return -EAGAIN;
+ }
+
+ if (!request || !request->n_ssids || !request->n_match_sets) {
+ WL_ERR("Invalid sched scan req!! n_ssids:%d\n",
+ request->n_ssids);
+ return -EINVAL;
+ }
+
+ if (request->n_ssids > 0) {
+ for (i = 0; i < request->n_ssids; i++) {
+ /* Active scan req for ssids */
+ WL_SCAN(">>> Active scan req for ssid (%s)\n",
+ request->ssids[i].ssid);
+
+ /*
+ * match_set ssids is a supert set of n_ssid list,
+ * so we need not add these set seperately.
+ */
+ }
+ }
+
+ if (request->n_match_sets > 0) {
+ /* clean up everything */
+ ret = brcmf_dev_pno_clean(ndev);
+ if (ret < 0) {
+ WL_ERR("failed error=%d\n", ret);
+ return ret;
+ }
+
+ /* configure pno */
+ ret = brcmf_dev_pno_config(ndev);
+ if (ret < 0) {
+ WL_ERR("PNO setup failed!! ret=%d\n", ret);
+ return -EINVAL;
+ }
+
+ /* configure each match set */
+ for (i = 0; i < request->n_match_sets; i++) {
+ struct cfg80211_ssid *ssid;
+ u32 ssid_len;
+
+ ssid = &request->match_sets[i].ssid;
+ ssid_len = ssid->ssid_len;
+
+ if (!ssid_len) {
+ WL_ERR("skip broadcast ssid\n");
+ continue;
+ }
+ pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
+ pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
+ pfn.wsec = cpu_to_le32(0);
+ pfn.infra = cpu_to_le32(1);
+ pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
+ pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
+ memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
+ ret = brcmf_dev_iovar_setbuf(ndev, "pfn_add",
+ &pfn, sizeof(pfn),
+ iovbuf, sizeof(iovbuf));
+ WL_SCAN(">>> PNO filter %s for ssid (%s)\n",
+ ret == 0 ? "set" : "failed",
+ ssid->ssid);
+ }
+ /* Enable the PNO */
+ if (brcmf_dev_intvar_set(ndev, "pfn", 1) < 0) {
+ WL_ERR("PNO enable failed!! ret=%d\n", ret);
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+
+ WL_SCAN("enter\n");
+ brcmf_dev_pno_clean(ndev);
+ if (cfg->sched_escan)
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+ return 0;
+}
+#endif /* CONFIG_BRCMISCAN */
+
+#ifdef CONFIG_NL80211_TESTMODE
+static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg->wdev->netdev;
+ struct brcmf_dcmd *dcmd = data;
+ struct sk_buff *reply;
+ int ret;
+
+ ret = brcmf_netlink_dcmd(ndev, dcmd);
+ if (ret == 0) {
+ reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
+ nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
+ ret = cfg80211_testmode_reply(reply);
+ }
+ return ret;
+}
+#endif
+
+static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
+{
+ s32 err;
+
+ /* set auth */
+ err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", 0, bssidx);
+ if (err < 0) {
+ WL_ERR("auth error %d\n", err);
+ return err;
+ }
+ /* set wsec */
+ err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", 0, bssidx);
+ if (err < 0) {
+ WL_ERR("wsec error %d\n", err);
+ return err;
+ }
+ /* set upper-layer auth */
+ err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth",
+ WPA_AUTH_NONE, bssidx);
+ if (err < 0) {
+ WL_ERR("wpa_auth error %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
+{
+ if (is_rsn_ie)
+ return (memcmp(oui, RSN_OUI, TLV_OUI_LEN) == 0);
+
+ return (memcmp(oui, WPA_OUI, TLV_OUI_LEN) == 0);
+}
+
+static s32
+brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
+ bool is_rsn_ie, s32 bssidx)
+{
+ u32 auth = 0; /* d11 open authentication */
+ u16 count;
+ s32 err = 0;
+ s32 len = 0;
+ u32 i;
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u32 offset;
+ u8 *data;
+ u16 rsn_cap;
+ u32 wme_bss_disable;
+
+ WL_TRACE("Enter\n");
+ if (wpa_ie == NULL)
+ goto exit;
+
+ len = wpa_ie->len + TLV_HDR_LEN;
+ data = (u8 *)wpa_ie;
+ offset = 0;
+ if (!is_rsn_ie)
+ offset += VS_IE_FIXED_HDR_LEN;
+ offset += WPA_IE_VERSION_LEN;
+
+ /* check for multicast cipher suite */
+ if (offset + WPA_IE_MIN_OUI_LEN > len) {
+ err = -EINVAL;
+ WL_ERR("no multicast cipher suite\n");
+ goto exit;
+ }
+
+ if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+ err = -EINVAL;
+ WL_ERR("ivalid OUI\n");
+ goto exit;
+ }
+ offset += TLV_OUI_LEN;
+
+ /* pick up multicast cipher */
+ switch (data[offset]) {
+ case WPA_CIPHER_NONE:
+ gval = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ gval = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ gval = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ gval = AES_ENABLED;
+ break;
+ default:
+ err = -EINVAL;
+ WL_ERR("Invalid multi cast cipher info\n");
+ goto exit;
+ }
+
+ offset++;
+ /* walk thru unicast cipher list and pick up what we recognize */
+ count = data[offset] + (data[offset + 1] << 8);
+ offset += WPA_IE_SUITE_COUNT_LEN;
+ /* Check for unicast suite(s) */
+ if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
+ err = -EINVAL;
+ WL_ERR("no unicast cipher suite\n");
+ goto exit;
+ }
+ for (i = 0; i < count; i++) {
+ if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+ err = -EINVAL;
+ WL_ERR("ivalid OUI\n");
+ goto exit;
+ }
+ offset += TLV_OUI_LEN;
+ switch (data[offset]) {
+ case WPA_CIPHER_NONE:
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ pval |= WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ pval |= TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ pval |= AES_ENABLED;
+ break;
+ default:
+ WL_ERR("Ivalid unicast security info\n");
+ }
+ offset++;
+ }
+ /* walk thru auth management suite list and pick up what we recognize */
+ count = data[offset] + (data[offset + 1] << 8);
+ offset += WPA_IE_SUITE_COUNT_LEN;
+ /* Check for auth key management suite(s) */
+ if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
+ err = -EINVAL;
+ WL_ERR("no auth key mgmt suite\n");
+ goto exit;
+ }
+ for (i = 0; i < count; i++) {
+ if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
+ err = -EINVAL;
+ WL_ERR("ivalid OUI\n");
+ goto exit;
+ }
+ offset += TLV_OUI_LEN;
+ switch (data[offset]) {
+ case RSN_AKM_NONE:
+ WL_TRACE("RSN_AKM_NONE\n");
+ wpa_auth |= WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ WL_TRACE("RSN_AKM_UNSPECIFIED\n");
+ is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) :
+ (wpa_auth |= WPA_AUTH_UNSPECIFIED);
+ break;
+ case RSN_AKM_PSK:
+ WL_TRACE("RSN_AKM_PSK\n");
+ is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) :
+ (wpa_auth |= WPA_AUTH_PSK);
+ break;
+ default:
+ WL_ERR("Ivalid key mgmt info\n");
+ }
+ offset++;
+ }
+
+ if (is_rsn_ie) {
+ wme_bss_disable = 1;
+ if ((offset + RSN_CAP_LEN) <= len) {
+ rsn_cap = data[offset] + (data[offset + 1] << 8);
+ if (rsn_cap & RSN_CAP_PTK_REPLAY_CNTR_MASK)
+ wme_bss_disable = 0;
+ }
+ /* set wme_bss_disable to sync RSN Capabilities */
+ err = brcmf_dev_intvar_set_bsscfg(ndev, "wme_bss_disable",
+ wme_bss_disable, bssidx);
+ if (err < 0) {
+ WL_ERR("wme_bss_disable error %d\n", err);
+ goto exit;
+ }
+ }
+ /* FOR WPS , set SES_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+
+ /* set auth */
+ err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR("auth error %d\n", err);
+ goto exit;
+ }
+ /* set wsec */
+ err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR("wsec error %d\n", err);
+ goto exit;
+ }
+ /* set upper-layer auth */
+ err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR("wpa_auth error %d\n", err);
+ goto exit;
+ }
+
+exit:
+ return err;
+}
+
+static s32
+brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
+ struct parsed_vndr_ies *vndr_ies)
+{
+ s32 err = 0;
+ struct brcmf_vs_tlv *vndrie;
+ struct brcmf_tlv *ie;
+ struct parsed_vndr_ie_info *parsed_info;
+ s32 remaining_len;
+
+ remaining_len = (s32)vndr_ie_len;
+ memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+ ie = (struct brcmf_tlv *)vndr_ie_buf;
+ while (ie) {
+ if (ie->id != WLAN_EID_VENDOR_SPECIFIC)
+ goto next;
+ vndrie = (struct brcmf_vs_tlv *)ie;
+ /* len should be bigger than OUI length + one */
+ if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) {
+ WL_ERR("invalid vndr ie. length is too small %d\n",
+ vndrie->len);
+ goto next;
+ }
+ /* if wpa or wme ie, do not add ie */
+ if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) &&
+ ((vndrie->oui_type == WPA_OUI_TYPE) ||
+ (vndrie->oui_type == WME_OUI_TYPE))) {
+ WL_TRACE("Found WPA/WME oui. Do not add it\n");
+ goto next;
+ }
+
+ parsed_info = &vndr_ies->ie_info[vndr_ies->count];
+
+ /* save vndr ie information */
+ parsed_info->ie_ptr = (char *)vndrie;
+ parsed_info->ie_len = vndrie->len + TLV_HDR_LEN;
+ memcpy(&parsed_info->vndrie, vndrie, sizeof(*vndrie));
+
+ vndr_ies->count++;
+
+ WL_TRACE("** OUI %02x %02x %02x, type 0x%02x\n",
+ parsed_info->vndrie.oui[0],
+ parsed_info->vndrie.oui[1],
+ parsed_info->vndrie.oui[2],
+ parsed_info->vndrie.oui_type);
+
+ if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
+ break;
+next:
+ remaining_len -= ie->len;
+ if (remaining_len <= 2)
+ ie = NULL;
+ else
+ ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len);
+ }
+ return err;
+}
+
+static u32
+brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
+{
+
+ __le32 iecount_le;
+ __le32 pktflag_le;
+
+ strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+ iebuf[VNDR_IE_CMD_LEN - 1] = '\0';
+
+ iecount_le = cpu_to_le32(1);
+ memcpy(&iebuf[VNDR_IE_COUNT_OFFSET], &iecount_le, sizeof(iecount_le));
+
+ pktflag_le = cpu_to_le32(pktflag);
+ memcpy(&iebuf[VNDR_IE_PKTFLAG_OFFSET], &pktflag_le, sizeof(pktflag_le));
+
+ memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len);
+
+ return ie_len + VNDR_IE_HDR_SIZE;
+}
+
+s32
+brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev, s32 bssidx, s32 pktflag,
+ u8 *vndr_ie_buf, u32 vndr_ie_len)
+{
+ s32 err = 0;
+ u8 *iovar_ie_buf;
+ u8 *curr_ie_buf;
+ u8 *mgmt_ie_buf = NULL;
+ u32 mgmt_ie_buf_len = 0;
+ u32 *mgmt_ie_len = 0;
+ u32 del_add_ie_buf_len = 0;
+ u32 total_ie_buf_len = 0;
+ u32 parsed_ie_buf_len = 0;
+ struct parsed_vndr_ies old_vndr_ies;
+ struct parsed_vndr_ies new_vndr_ies;
+ struct parsed_vndr_ie_info *vndrie_info;
+ s32 i;
+ u8 *ptr;
+ u32 remained_buf_len;
+
+ WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag);
+ iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (!iovar_ie_buf)
+ return -ENOMEM;
+ curr_ie_buf = iovar_ie_buf;
+ if (test_bit(WL_STATUS_AP_CREATING, &cfg->status) ||
+ test_bit(WL_STATUS_AP_CREATED, &cfg->status)) {
+ switch (pktflag) {
+ case VNDR_IE_PRBRSP_FLAG:
+ mgmt_ie_buf = cfg->ap_info->probe_res_ie;
+ mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
+ mgmt_ie_buf_len =
+ sizeof(cfg->ap_info->probe_res_ie);
+ break;
+ case VNDR_IE_BEACON_FLAG:
+ mgmt_ie_buf = cfg->ap_info->beacon_ie;
+ mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+ break;
+ default:
+ err = -EPERM;
+ WL_ERR("not suitable type\n");
+ goto exit;
+ }
+ bssidx = 0;
+ } else {
+ err = -EPERM;
+ WL_ERR("not suitable type\n");
+ goto exit;
+ }
+
+ if (vndr_ie_len > mgmt_ie_buf_len) {
+ err = -ENOMEM;
+ WL_ERR("extra IE size too big\n");
+ goto exit;
+ }
+
+ /* parse and save new vndr_ie in curr_ie_buff before comparing it */
+ if (vndr_ie_buf && vndr_ie_len && curr_ie_buf) {
+ ptr = curr_ie_buf;
+ brcmf_parse_vndr_ies(vndr_ie_buf, vndr_ie_len, &new_vndr_ies);
+ for (i = 0; i < new_vndr_ies.count; i++) {
+ vndrie_info = &new_vndr_ies.ie_info[i];
+ memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+ vndrie_info->ie_len);
+ parsed_ie_buf_len += vndrie_info->ie_len;
+ }
+ }
+
+ if (mgmt_ie_buf != NULL) {
+ if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+ (memcmp(mgmt_ie_buf, curr_ie_buf,
+ parsed_ie_buf_len) == 0)) {
+ WL_TRACE("Previous mgmt IE is equals to current IE");
+ goto exit;
+ }
+
+ /* parse old vndr_ie */
+ brcmf_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, &old_vndr_ies);
+
+ /* make a command to delete old ie */
+ for (i = 0; i < old_vndr_ies.count; i++) {
+ vndrie_info = &old_vndr_ies.ie_info[i];
+
+ WL_TRACE("DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+ vndrie_info->vndrie.id,
+ vndrie_info->vndrie.len,
+ vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.oui[1],
+ vndrie_info->vndrie.oui[2]);
+
+ del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
+ vndrie_info->ie_ptr,
+ vndrie_info->ie_len,
+ "del");
+ curr_ie_buf += del_add_ie_buf_len;
+ total_ie_buf_len += del_add_ie_buf_len;
+ }
+ }
+
+ *mgmt_ie_len = 0;
+ /* Add if there is any extra IE */
+ if (mgmt_ie_buf && parsed_ie_buf_len) {
+ ptr = mgmt_ie_buf;
+
+ remained_buf_len = mgmt_ie_buf_len;
+
+ /* make a command to add new ie */
+ for (i = 0; i < new_vndr_ies.count; i++) {
+ vndrie_info = &new_vndr_ies.ie_info[i];
+
+ WL_TRACE("ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n",
+ vndrie_info->vndrie.id,
+ vndrie_info->vndrie.len,
+ vndrie_info->vndrie.oui[0],
+ vndrie_info->vndrie.oui[1],
+ vndrie_info->vndrie.oui[2]);
+
+ del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
+ vndrie_info->ie_ptr,
+ vndrie_info->ie_len,
+ "add");
+ /* verify remained buf size before copy data */
+ remained_buf_len -= vndrie_info->ie_len;
+ if (remained_buf_len < 0) {
+ WL_ERR("no space in mgmt_ie_buf: len left %d",
+ remained_buf_len);
+ break;
+ }
+
+ /* save the parsed IE in wl struct */
+ memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+ vndrie_info->ie_len);
+ *mgmt_ie_len += vndrie_info->ie_len;
+
+ curr_ie_buf += del_add_ie_buf_len;
+ total_ie_buf_len += del_add_ie_buf_len;
+ }
+ }
+ if (total_ie_buf_len) {
+ err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "vndr_ie",
+ iovar_ie_buf,
+ total_ie_buf_len,
+ cfg->extra_buf,
+ WL_EXTRA_BUF_MAX, bssidx);
+ if (err)
+ WL_ERR("vndr ie set error : %d\n", err);
+ }
+
+exit:
+ kfree(iovar_ie_buf);
+ return err;
+}
+
+static s32
+brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_ap_settings *settings)
+{
+ s32 ie_offset;
+ struct brcmf_tlv *ssid_ie;
+ struct brcmf_ssid_le ssid_le;
+ s32 ioctl_value;
+ s32 err = -EPERM;
+ struct brcmf_tlv *rsn_ie;
+ struct brcmf_vs_tlv *wpa_ie;
+ struct brcmf_join_params join_params;
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ s32 bssidx = 0;
+
+ WL_TRACE("channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
+ settings->channel_type, settings->beacon_interval,
+ settings->dtim_period);
+ WL_TRACE("ssid=%s(%d), auth_type=%d, inactivity_timeout=%d\n",
+ settings->ssid, settings->ssid_len, settings->auth_type,
+ settings->inactivity_timeout);
+
+ if (!test_bit(WL_STATUS_AP_CREATING, &cfg->status)) {
+ WL_ERR("Not in AP creation mode\n");
+ return -EPERM;
+ }
+
+ memset(&ssid_le, 0, sizeof(ssid_le));
+ if (settings->ssid == NULL || settings->ssid_len == 0) {
+ ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
+ ssid_ie = brcmf_parse_tlvs(
+ (u8 *)&settings->beacon.head[ie_offset],
+ settings->beacon.head_len - ie_offset,
+ WLAN_EID_SSID);
+ if (!ssid_ie)
+ return -EINVAL;
+
+ memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
+ ssid_le.SSID_len = cpu_to_le32(ssid_ie->len);
+ WL_TRACE("SSID is (%s) in Head\n", ssid_le.SSID);
+ } else {
+ memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len);
+ ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
+ }
+
+ brcmf_set_mpc(ndev, 0);
+ ioctl_value = 1;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_DOWN, &ioctl_value);
+ if (err < 0) {
+ WL_ERR("BRCMF_C_DOWN error %d\n", err);
+ goto exit;
+ }
+ ioctl_value = 1;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &ioctl_value);
+ if (err < 0) {
+ WL_ERR("SET INFRA error %d\n", err);
+ goto exit;
+ }
+ ioctl_value = 1;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+ if (err < 0) {
+ WL_ERR("setting AP mode failed %d\n", err);
+ goto exit;
+ }
+
+ /* find the RSN_IE */
+ rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+ settings->beacon.tail_len, WLAN_EID_RSN);
+
+ /* find the WPA_IE */
+ wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
+ settings->beacon.tail_len);
+
+ kfree(cfg->ap_info->rsn_ie);
+ cfg->ap_info->rsn_ie = NULL;
+ kfree(cfg->ap_info->wpa_ie);
+ cfg->ap_info->wpa_ie = NULL;
+
+ if ((wpa_ie != NULL || rsn_ie != NULL)) {
+ WL_TRACE("WPA(2) IE is found\n");
+ if (wpa_ie != NULL) {
+ /* WPA IE */
+ err = brcmf_configure_wpaie(ndev, wpa_ie, false,
+ bssidx);
+ if (err < 0)
+ goto exit;
+ cfg->ap_info->wpa_ie = kmemdup(wpa_ie,
+ wpa_ie->len +
+ TLV_HDR_LEN,
+ GFP_KERNEL);
+ } else {
+ /* RSN IE */
+ err = brcmf_configure_wpaie(ndev,
+ (struct brcmf_vs_tlv *)rsn_ie, true, bssidx);
+ if (err < 0)
+ goto exit;
+ cfg->ap_info->rsn_ie = kmemdup(rsn_ie,
+ rsn_ie->len +
+ TLV_HDR_LEN,
+ GFP_KERNEL);
+ }
+ cfg->ap_info->security_mode = true;
+ } else {
+ WL_TRACE("No WPA(2) IEs found\n");
+ brcmf_configure_opensecurity(ndev, bssidx);
+ cfg->ap_info->security_mode = false;
+ }
+ /* Set Beacon IEs to FW */
+ err = brcmf_set_management_ie(cfg, ndev, bssidx,
+ VNDR_IE_BEACON_FLAG,
+ (u8 *)settings->beacon.tail,
+ settings->beacon.tail_len);
+ if (err)
+ WL_ERR("Set Beacon IE Failed\n");
+ else
+ WL_TRACE("Applied Vndr IEs for Beacon\n");
+
+ /* Set Probe Response IEs to FW */
+ err = brcmf_set_management_ie(cfg, ndev, bssidx,
+ VNDR_IE_PRBRSP_FLAG,
+ (u8 *)settings->beacon.proberesp_ies,
+ settings->beacon.proberesp_ies_len);
+ if (err)
+ WL_ERR("Set Probe Resp IE Failed\n");
+ else
+ WL_TRACE("Applied Vndr IEs for Probe Resp\n");
+
+ if (settings->beacon_interval) {
+ ioctl_value = settings->beacon_interval;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_BCNPRD,
+ &ioctl_value);
+ if (err < 0) {
+ WL_ERR("Beacon Interval Set Error, %d\n", err);
+ goto exit;
+ }
+ }
+ if (settings->dtim_period) {
+ ioctl_value = settings->dtim_period;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_DTIMPRD,
+ &ioctl_value);
+ if (err < 0) {
+ WL_ERR("DTIM Interval Set Error, %d\n", err);
+ goto exit;
+ }
+ }
+ ioctl_value = 1;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+ if (err < 0) {
+ WL_ERR("BRCMF_C_UP error (%d)\n", err);
+ goto exit;
+ }
+
+ memset(&join_params, 0, sizeof(join_params));
+ /* join parameters starts with ssid */
+ memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
+ /* create softap */
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params,
+ sizeof(join_params));
+ if (err < 0) {
+ WL_ERR("SET SSID error (%d)\n", err);
+ goto exit;
+ }
+ clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
+ set_bit(WL_STATUS_AP_CREATED, &cfg->status);
+
+exit:
+ if (err)
+ brcmf_set_mpc(ndev, 1);
+ return err;
+}
+
+static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ s32 ioctl_value;
+ s32 err = -EPERM;
+
+ WL_TRACE("Enter\n");
+
+ if (cfg->conf->mode == WL_MODE_AP) {
+ /* Due to most likely deauths outstanding we sleep */
+ /* first to make sure they get processed by fw. */
+ msleep(400);
+ ioctl_value = 0;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
+ if (err < 0) {
+ WL_ERR("setting AP mode failed %d\n", err);
+ goto exit;
+ }
+ ioctl_value = 0;
+ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
+ if (err < 0) {
+ WL_ERR("BRCMF_C_UP error %d\n", err);
+ goto exit;
+ }
+ brcmf_set_mpc(ndev, 1);
+ clear_bit(WL_STATUS_AP_CREATING, &cfg->status);
+ clear_bit(WL_STATUS_AP_CREATED, &cfg->status);
+ }
+exit:
+ return err;
+}
+
+static int
+brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
+ u8 *mac)
+{
+ struct brcmf_scb_val_le scbval;
+ s32 err;
+
+ if (!mac)
+ return -EFAULT;
+
+ WL_TRACE("Enter %pM\n", mac);
+
+ if (!check_sys_up(wiphy))
+ return -EIO;
+
+ memcpy(&scbval.ea, mac, ETH_ALEN);
+ scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
+ err = brcmf_exec_dcmd(ndev, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
+ &scbval, sizeof(scbval));
+ if (err)
+ WL_ERR("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
+
+ WL_TRACE("Exit\n");
+ return err;
+}
+
static struct cfg80211_ops wl_cfg80211_ops = {
.change_virtual_intf = brcmf_cfg80211_change_iface,
.scan = brcmf_cfg80211_scan,
@@ -2748,7 +4369,18 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.resume = brcmf_cfg80211_resume,
.set_pmksa = brcmf_cfg80211_set_pmksa,
.del_pmksa = brcmf_cfg80211_del_pmksa,
- .flush_pmksa = brcmf_cfg80211_flush_pmksa
+ .flush_pmksa = brcmf_cfg80211_flush_pmksa,
+ .start_ap = brcmf_cfg80211_start_ap,
+ .stop_ap = brcmf_cfg80211_stop_ap,
+ .del_station = brcmf_cfg80211_del_station,
+#ifndef CONFIG_BRCMISCAN
+ /* scheduled scan need e-scan, which is mutual exclusive with i-scan */
+ .sched_scan_start = brcmf_cfg80211_sched_scan_start,
+ .sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
+#endif
+#ifdef CONFIG_NL80211_TESTMODE
+ .testmode_cmd = brcmf_cfg80211_testmode
+#endif
};
static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
@@ -2767,8 +4399,18 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
return err;
}
-static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
- struct device *ndev)
+static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
+{
+#ifndef CONFIG_BRCMFISCAN
+ /* scheduled scan settings */
+ wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
+ wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
+ wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+#endif
+}
+
+static struct wireless_dev *brcmf_alloc_wdev(struct device *ndev)
{
struct wireless_dev *wdev;
s32 err = 0;
@@ -2777,9 +4419,8 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
if (!wdev)
return ERR_PTR(-ENOMEM);
- wdev->wiphy =
- wiphy_new(&wl_cfg80211_ops,
- sizeof(struct brcmf_cfg80211_priv) + sizeof_iface);
+ wdev->wiphy = wiphy_new(&wl_cfg80211_ops,
+ sizeof(struct brcmf_cfg80211_info));
if (!wdev->wiphy) {
WL_ERR("Could not allocate wiphy device\n");
err = -ENOMEM;
@@ -2788,8 +4429,9 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
set_wiphy_dev(wdev->wiphy, ndev);
wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
- wdev->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+ wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
* it as 11a by default.
@@ -2805,6 +4447,7 @@ static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
* save mode
* by default
*/
+ brcmf_wiphy_pno_params(wdev->wiphy);
err = wiphy_register(wdev->wiphy);
if (err < 0) {
WL_ERR("Could not register wiphy device (%d)\n", err);
@@ -2821,9 +4464,9 @@ wiphy_new_out:
return ERR_PTR(err);
}
-static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_free_wdev(struct brcmf_cfg80211_info *cfg)
{
- struct wireless_dev *wdev = cfg_priv->wdev;
+ struct wireless_dev *wdev = cfg->wdev;
if (!wdev) {
WL_ERR("wdev is invalid\n");
@@ -2832,10 +4475,10 @@ static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
wiphy_unregister(wdev->wiphy);
wiphy_free(wdev->wiphy);
kfree(wdev);
- cfg_priv->wdev = NULL;
+ cfg->wdev = NULL;
}
-static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
+static bool brcmf_is_linkup(struct brcmf_cfg80211_info *cfg,
const struct brcmf_event_msg *e)
{
u32 event = be32_to_cpu(e->event_type);
@@ -2843,14 +4486,14 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
WL_CONN("Processing set ssid\n");
- cfg_priv->link_up = true;
+ cfg->link_up = true;
return true;
}
return false;
}
-static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
+static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg,
const struct brcmf_event_msg *e)
{
u32 event = be32_to_cpu(e->event_type);
@@ -2863,7 +4506,7 @@ static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
return false;
}
-static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
+static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg,
const struct brcmf_event_msg *e)
{
u32 event = be32_to_cpu(e->event_type);
@@ -2884,9 +4527,9 @@ static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
return false;
}
-static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
kfree(conn_info->req_ie);
conn_info->req_ie = NULL;
@@ -2896,30 +4539,30 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
conn_info->resp_ie_len = 0;
}
-static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
{
- struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct net_device *ndev = cfg_to_ndev(cfg);
struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
- struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
u32 req_len;
u32 resp_len;
s32 err = 0;
- brcmf_clear_assoc_ies(cfg_priv);
+ brcmf_clear_assoc_ies(cfg);
- err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf,
+ err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg->extra_buf,
WL_ASSOC_INFO_MAX);
if (err) {
WL_ERR("could not get assoc info (%d)\n", err);
return err;
}
assoc_info =
- (struct brcmf_cfg80211_assoc_ielen_le *)cfg_priv->extra_buf;
+ (struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
req_len = le32_to_cpu(assoc_info->req_len);
resp_len = le32_to_cpu(assoc_info->resp_len);
if (req_len) {
err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
- cfg_priv->extra_buf,
+ cfg->extra_buf,
WL_ASSOC_INFO_MAX);
if (err) {
WL_ERR("could not get assoc req (%d)\n", err);
@@ -2927,7 +4570,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
}
conn_info->req_ie_len = req_len;
conn_info->req_ie =
- kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len,
+ kmemdup(cfg->extra_buf, conn_info->req_ie_len,
GFP_KERNEL);
} else {
conn_info->req_ie_len = 0;
@@ -2935,7 +4578,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
}
if (resp_len) {
err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
- cfg_priv->extra_buf,
+ cfg->extra_buf,
WL_ASSOC_INFO_MAX);
if (err) {
WL_ERR("could not get assoc resp (%d)\n", err);
@@ -2943,7 +4586,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
}
conn_info->resp_ie_len = resp_len;
conn_info->resp_ie =
- kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len,
+ kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
GFP_KERNEL);
} else {
conn_info->resp_ie_len = 0;
@@ -2956,12 +4599,13 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
}
static s32
-brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e)
{
- struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
- struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg);
struct brcmf_channel_info_le channel_le;
struct ieee80211_channel *notify_channel;
struct ieee80211_supported_band *band;
@@ -2971,9 +4615,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
WL_TRACE("Enter\n");
- brcmf_get_assoc_ies(cfg_priv);
- brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID);
- brcmf_update_bss_info(cfg_priv);
+ brcmf_get_assoc_ies(cfg);
+ memcpy(profile->bssid, e->addr, ETH_ALEN);
+ brcmf_update_bss_info(cfg);
brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le,
sizeof(channel_le));
@@ -2989,37 +4633,35 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
freq = ieee80211_channel_to_frequency(target_channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
- cfg80211_roamed(ndev, notify_channel,
- (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID),
+ cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
conn_info->req_ie, conn_info->req_ie_len,
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
WL_CONN("Report roaming result\n");
- set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+ set_bit(WL_STATUS_CONNECTED, &cfg->status);
WL_TRACE("Exit\n");
return err;
}
static s32
-brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev, const struct brcmf_event_msg *e,
bool completed)
{
- struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
s32 err = 0;
WL_TRACE("Enter\n");
- if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
+ if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg->status)) {
if (completed) {
- brcmf_get_assoc_ies(cfg_priv);
- brcmf_update_prof(cfg_priv, NULL, &e->addr,
- WL_PROF_BSSID);
- brcmf_update_bss_info(cfg_priv);
+ brcmf_get_assoc_ies(cfg);
+ memcpy(profile->bssid, e->addr, ETH_ALEN);
+ brcmf_update_bss_info(cfg);
}
cfg80211_connect_result(ndev,
- (u8 *)brcmf_read_prof(cfg_priv,
- WL_PROF_BSSID),
+ (u8 *)profile->bssid,
conn_info->req_ie,
conn_info->req_ie_len,
conn_info->resp_ie,
@@ -3028,7 +4670,7 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
WLAN_STATUS_AUTH_TIMEOUT,
GFP_KERNEL);
if (completed)
- set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+ set_bit(WL_STATUS_CONNECTED, &cfg->status);
WL_CONN("Report connect result - connection %s\n",
completed ? "succeeded" : "failed");
}
@@ -3037,52 +4679,93 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
}
static s32
-brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
+{
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ u32 reason = be32_to_cpu(e->reason);
+ u32 len = be32_to_cpu(e->datalen);
+ static int generation;
+
+ struct station_info sinfo;
+
+ WL_CONN("event %d, reason %d\n", event, reason);
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ sinfo.filled = 0;
+ if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
+ reason == BRCMF_E_STATUS_SUCCESS) {
+ sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+ if (!data) {
+ WL_ERR("No IEs present in ASSOC/REASSOC_IND");
+ return -EINVAL;
+ }
+ sinfo.assoc_req_ies = data;
+ sinfo.assoc_req_ies_len = len;
+ generation++;
+ sinfo.generation = generation;
+ cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC);
+ } else if ((event == BRCMF_E_DISASSOC_IND) ||
+ (event == BRCMF_E_DEAUTH_IND) ||
+ (event == BRCMF_E_DEAUTH)) {
+ generation++;
+ sinfo.generation = generation;
+ cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC);
+ }
+ return err;
+}
+
+static s32
+brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
+ struct brcmf_cfg80211_profile *profile = cfg->profile;
s32 err = 0;
- if (brcmf_is_linkup(cfg_priv, e)) {
+ if (cfg->conf->mode == WL_MODE_AP) {
+ err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
+ } else if (brcmf_is_linkup(cfg, e)) {
WL_CONN("Linkup\n");
- if (brcmf_is_ibssmode(cfg_priv)) {
- brcmf_update_prof(cfg_priv, NULL, (void *)e->addr,
- WL_PROF_BSSID);
- wl_inform_ibss(cfg_priv, ndev, e->addr);
+ if (brcmf_is_ibssmode(cfg)) {
+ memcpy(profile->bssid, e->addr, ETH_ALEN);
+ wl_inform_ibss(cfg, ndev, e->addr);
cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
- set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
+ clear_bit(WL_STATUS_CONNECTING, &cfg->status);
+ set_bit(WL_STATUS_CONNECTED, &cfg->status);
} else
- brcmf_bss_connect_done(cfg_priv, ndev, e, true);
- } else if (brcmf_is_linkdown(cfg_priv, e)) {
+ brcmf_bss_connect_done(cfg, ndev, e, true);
+ } else if (brcmf_is_linkdown(cfg, e)) {
WL_CONN("Linkdown\n");
- if (brcmf_is_ibssmode(cfg_priv)) {
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ if (brcmf_is_ibssmode(cfg)) {
+ clear_bit(WL_STATUS_CONNECTING, &cfg->status);
if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &cfg_priv->status))
- brcmf_link_down(cfg_priv);
+ &cfg->status))
+ brcmf_link_down(cfg);
} else {
- brcmf_bss_connect_done(cfg_priv, ndev, e, false);
+ brcmf_bss_connect_done(cfg, ndev, e, false);
if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &cfg_priv->status)) {
+ &cfg->status)) {
cfg80211_disconnected(ndev, 0, NULL, 0,
GFP_KERNEL);
- brcmf_link_down(cfg_priv);
+ brcmf_link_down(cfg);
}
}
- brcmf_init_prof(cfg_priv->profile);
- } else if (brcmf_is_nonetwork(cfg_priv, e)) {
- if (brcmf_is_ibssmode(cfg_priv))
- clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ brcmf_init_prof(cfg->profile);
+ } else if (brcmf_is_nonetwork(cfg, e)) {
+ if (brcmf_is_ibssmode(cfg))
+ clear_bit(WL_STATUS_CONNECTING, &cfg->status);
else
- brcmf_bss_connect_done(cfg_priv, ndev, e, false);
+ brcmf_bss_connect_done(cfg, ndev, e, false);
}
return err;
}
static s32
-brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_roaming_status(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
@@ -3091,17 +4774,17 @@ brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
u32 status = be32_to_cpu(e->status);
if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
- if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status))
- brcmf_bss_roaming_done(cfg_priv, ndev, e);
+ if (test_bit(WL_STATUS_CONNECTED, &cfg->status))
+ brcmf_bss_roaming_done(cfg, ndev, e);
else
- brcmf_bss_connect_done(cfg_priv, ndev, e, true);
+ brcmf_bss_connect_done(cfg, ndev, e, true);
}
return err;
}
static s32
-brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
@@ -3120,7 +4803,7 @@ brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
}
static s32
-brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
+brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
@@ -3133,12 +4816,12 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
WL_TRACE("Enter\n");
- if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) {
+ if (cfg->iscan_on && cfg->iscan_kickstart) {
WL_TRACE("Exit\n");
- return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv));
+ return brcmf_wakeup_iscan(cfg_to_iscan(cfg));
}
- if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) {
+ if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
WL_ERR("Scan complete while device not scanning\n");
scan_abort = true;
err = -EINVAL;
@@ -3155,35 +4838,33 @@ brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
scan_channel = le32_to_cpu(channel_inform_le.scan_channel);
if (scan_channel)
WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel);
- cfg_priv->bss_list = cfg_priv->scan_results;
- bss_list_le = (struct brcmf_scan_results_le *) cfg_priv->bss_list;
+ cfg->bss_list = cfg->scan_results;
+ bss_list_le = (struct brcmf_scan_results_le *) cfg->bss_list;
- memset(cfg_priv->scan_results, 0, len);
+ memset(cfg->scan_results, 0, len);
bss_list_le->buflen = cpu_to_le32(len);
err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
- cfg_priv->scan_results, len);
+ cfg->scan_results, len);
if (err) {
WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
err = -EINVAL;
scan_abort = true;
goto scan_done_out;
}
- cfg_priv->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
- cfg_priv->scan_results->version = le32_to_cpu(bss_list_le->version);
- cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count);
+ cfg->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
+ cfg->scan_results->version = le32_to_cpu(bss_list_le->version);
+ cfg->scan_results->count = le32_to_cpu(bss_list_le->count);
- err = brcmf_inform_bss(cfg_priv);
- if (err) {
+ err = brcmf_inform_bss(cfg);
+ if (err)
scan_abort = true;
- goto scan_done_out;
- }
scan_done_out:
- if (cfg_priv->scan_request) {
+ if (cfg->scan_request) {
WL_SCAN("calling cfg80211_scan_done\n");
- cfg80211_scan_done(cfg_priv->scan_request, scan_abort);
+ cfg80211_scan_done(cfg->scan_request, scan_abort);
brcmf_set_mpc(ndev, 1);
- cfg_priv->scan_request = NULL;
+ cfg->scan_request = NULL;
}
WL_TRACE("Exit\n");
@@ -3206,68 +4887,85 @@ static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
memset(el, 0, sizeof(*el));
el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
+ el->handler[BRCMF_E_DEAUTH_IND] = brcmf_notify_connect_status;
+ el->handler[BRCMF_E_DEAUTH] = brcmf_notify_connect_status;
+ el->handler[BRCMF_E_DISASSOC_IND] = brcmf_notify_connect_status;
+ el->handler[BRCMF_E_ASSOC_IND] = brcmf_notify_connect_status;
+ el->handler[BRCMF_E_REASSOC_IND] = brcmf_notify_connect_status;
el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
+ el->handler[BRCMF_E_PFN_NET_FOUND] = brcmf_notify_sched_scan_results;
+}
+
+static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
+{
+ kfree(cfg->scan_results);
+ cfg->scan_results = NULL;
+ kfree(cfg->bss_info);
+ cfg->bss_info = NULL;
+ kfree(cfg->conf);
+ cfg->conf = NULL;
+ kfree(cfg->profile);
+ cfg->profile = NULL;
+ kfree(cfg->scan_req_int);
+ cfg->scan_req_int = NULL;
+ kfree(cfg->escan_ioctl_buf);
+ cfg->escan_ioctl_buf = NULL;
+ kfree(cfg->dcmd_buf);
+ cfg->dcmd_buf = NULL;
+ kfree(cfg->extra_buf);
+ cfg->extra_buf = NULL;
+ kfree(cfg->iscan);
+ cfg->iscan = NULL;
+ kfree(cfg->pmk_list);
+ cfg->pmk_list = NULL;
+ if (cfg->ap_info) {
+ kfree(cfg->ap_info->wpa_ie);
+ kfree(cfg->ap_info->rsn_ie);
+ kfree(cfg->ap_info);
+ cfg->ap_info = NULL;
+ }
}
-static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
-{
- kfree(cfg_priv->scan_results);
- cfg_priv->scan_results = NULL;
- kfree(cfg_priv->bss_info);
- cfg_priv->bss_info = NULL;
- kfree(cfg_priv->conf);
- cfg_priv->conf = NULL;
- kfree(cfg_priv->profile);
- cfg_priv->profile = NULL;
- kfree(cfg_priv->scan_req_int);
- cfg_priv->scan_req_int = NULL;
- kfree(cfg_priv->dcmd_buf);
- cfg_priv->dcmd_buf = NULL;
- kfree(cfg_priv->extra_buf);
- cfg_priv->extra_buf = NULL;
- kfree(cfg_priv->iscan);
- cfg_priv->iscan = NULL;
- kfree(cfg_priv->pmk_list);
- cfg_priv->pmk_list = NULL;
-}
-
-static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
-{
- cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
- if (!cfg_priv->scan_results)
+static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
+{
+ cfg->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+ if (!cfg->scan_results)
goto init_priv_mem_out;
- cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL);
- if (!cfg_priv->conf)
+ cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
+ if (!cfg->conf)
goto init_priv_mem_out;
- cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL);
- if (!cfg_priv->profile)
+ cfg->profile = kzalloc(sizeof(*cfg->profile), GFP_KERNEL);
+ if (!cfg->profile)
goto init_priv_mem_out;
- cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
- if (!cfg_priv->bss_info)
+ cfg->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (!cfg->bss_info)
goto init_priv_mem_out;
- cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int),
+ cfg->scan_req_int = kzalloc(sizeof(*cfg->scan_req_int),
GFP_KERNEL);
- if (!cfg_priv->scan_req_int)
+ if (!cfg->scan_req_int)
+ goto init_priv_mem_out;
+ cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
+ if (!cfg->escan_ioctl_buf)
goto init_priv_mem_out;
- cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
- if (!cfg_priv->dcmd_buf)
+ cfg->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
+ if (!cfg->dcmd_buf)
goto init_priv_mem_out;
- cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
- if (!cfg_priv->extra_buf)
+ cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (!cfg->extra_buf)
goto init_priv_mem_out;
- cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL);
- if (!cfg_priv->iscan)
+ cfg->iscan = kzalloc(sizeof(*cfg->iscan), GFP_KERNEL);
+ if (!cfg->iscan)
goto init_priv_mem_out;
- cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL);
- if (!cfg_priv->pmk_list)
+ cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
+ if (!cfg->pmk_list)
goto init_priv_mem_out;
return 0;
init_priv_mem_out:
- brcmf_deinit_priv_mem(cfg_priv);
+ brcmf_deinit_priv_mem(cfg);
return -ENOMEM;
}
@@ -3277,17 +4975,17 @@ init_priv_mem_out:
*/
static struct brcmf_cfg80211_event_q *brcmf_deq_event(
- struct brcmf_cfg80211_priv *cfg_priv)
+ struct brcmf_cfg80211_info *cfg)
{
struct brcmf_cfg80211_event_q *e = NULL;
- spin_lock_irq(&cfg_priv->evt_q_lock);
- if (!list_empty(&cfg_priv->evt_q_list)) {
- e = list_first_entry(&cfg_priv->evt_q_list,
+ spin_lock_irq(&cfg->evt_q_lock);
+ if (!list_empty(&cfg->evt_q_list)) {
+ e = list_first_entry(&cfg->evt_q_list,
struct brcmf_cfg80211_event_q, evt_q_list);
list_del(&e->evt_q_list);
}
- spin_unlock_irq(&cfg_priv->evt_q_lock);
+ spin_unlock_irq(&cfg->evt_q_lock);
return e;
}
@@ -3299,23 +4997,33 @@ static struct brcmf_cfg80211_event_q *brcmf_deq_event(
*/
static s32
-brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event,
- const struct brcmf_event_msg *msg)
+brcmf_enq_event(struct brcmf_cfg80211_info *cfg, u32 event,
+ const struct brcmf_event_msg *msg, void *data)
{
struct brcmf_cfg80211_event_q *e;
s32 err = 0;
ulong flags;
+ u32 data_len;
+ u32 total_len;
- e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC);
+ total_len = sizeof(struct brcmf_cfg80211_event_q);
+ if (data)
+ data_len = be32_to_cpu(msg->datalen);
+ else
+ data_len = 0;
+ total_len += data_len;
+ e = kzalloc(total_len, GFP_ATOMIC);
if (!e)
return -ENOMEM;
e->etype = event;
memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
+ if (data)
+ memcpy(&e->edata, data, data_len);
- spin_lock_irqsave(&cfg_priv->evt_q_lock, flags);
- list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list);
- spin_unlock_irqrestore(&cfg_priv->evt_q_lock, flags);
+ spin_lock_irqsave(&cfg->evt_q_lock, flags);
+ list_add_tail(&e->evt_q_list, &cfg->evt_q_list);
+ spin_unlock_irqrestore(&cfg->evt_q_lock, flags);
return err;
}
@@ -3327,12 +5035,12 @@ static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
static void brcmf_cfg80211_event_handler(struct work_struct *work)
{
- struct brcmf_cfg80211_priv *cfg_priv =
- container_of(work, struct brcmf_cfg80211_priv,
+ struct brcmf_cfg80211_info *cfg =
+ container_of(work, struct brcmf_cfg80211_info,
event_work);
struct brcmf_cfg80211_event_q *e;
- e = brcmf_deq_event(cfg_priv);
+ e = brcmf_deq_event(cfg);
if (unlikely(!e)) {
WL_ERR("event queue empty...\n");
return;
@@ -3340,137 +5048,131 @@ static void brcmf_cfg80211_event_handler(struct work_struct *work)
do {
WL_INFO("event type (%d)\n", e->etype);
- if (cfg_priv->el.handler[e->etype])
- cfg_priv->el.handler[e->etype](cfg_priv,
- cfg_to_ndev(cfg_priv),
+ if (cfg->el.handler[e->etype])
+ cfg->el.handler[e->etype](cfg,
+ cfg_to_ndev(cfg),
&e->emsg, e->edata);
else
WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
brcmf_put_event(e);
- } while ((e = brcmf_deq_event(cfg_priv)));
+ } while ((e = brcmf_deq_event(cfg)));
}
-static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_init_eq(struct brcmf_cfg80211_info *cfg)
{
- spin_lock_init(&cfg_priv->evt_q_lock);
- INIT_LIST_HEAD(&cfg_priv->evt_q_list);
+ spin_lock_init(&cfg->evt_q_lock);
+ INIT_LIST_HEAD(&cfg->evt_q_list);
}
-static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_flush_eq(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_cfg80211_event_q *e;
- spin_lock_irq(&cfg_priv->evt_q_lock);
- while (!list_empty(&cfg_priv->evt_q_list)) {
- e = list_first_entry(&cfg_priv->evt_q_list,
+ spin_lock_irq(&cfg->evt_q_lock);
+ while (!list_empty(&cfg->evt_q_list)) {
+ e = list_first_entry(&cfg->evt_q_list,
struct brcmf_cfg80211_event_q, evt_q_list);
list_del(&e->evt_q_list);
kfree(e);
}
- spin_unlock_irq(&cfg_priv->evt_q_lock);
+ spin_unlock_irq(&cfg->evt_q_lock);
}
-static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
{
s32 err = 0;
- cfg_priv->scan_request = NULL;
- cfg_priv->pwr_save = true;
- cfg_priv->iscan_on = true; /* iscan on & off switch.
+ cfg->scan_request = NULL;
+ cfg->pwr_save = true;
+#ifdef CONFIG_BRCMISCAN
+ cfg->iscan_on = true; /* iscan on & off switch.
we enable iscan per default */
- cfg_priv->roam_on = true; /* roam on & off switch.
+ cfg->escan_on = false; /* escan on & off switch.
+ we disable escan per default */
+#else
+ cfg->iscan_on = false; /* iscan on & off switch.
+ we disable iscan per default */
+ cfg->escan_on = true; /* escan on & off switch.
+ we enable escan per default */
+#endif
+ cfg->roam_on = true; /* roam on & off switch.
we enable roam per default */
- cfg_priv->iscan_kickstart = false;
- cfg_priv->active_scan = true; /* we do active scan for
+ cfg->iscan_kickstart = false;
+ cfg->active_scan = true; /* we do active scan for
specific scan per default */
- cfg_priv->dongle_up = false; /* dongle is not up yet */
- brcmf_init_eq(cfg_priv);
- err = brcmf_init_priv_mem(cfg_priv);
+ cfg->dongle_up = false; /* dongle is not up yet */
+ brcmf_init_eq(cfg);
+ err = brcmf_init_priv_mem(cfg);
if (err)
return err;
- INIT_WORK(&cfg_priv->event_work, brcmf_cfg80211_event_handler);
- brcmf_init_eloop_handler(&cfg_priv->el);
- mutex_init(&cfg_priv->usr_sync);
- err = brcmf_init_iscan(cfg_priv);
+ INIT_WORK(&cfg->event_work, brcmf_cfg80211_event_handler);
+ brcmf_init_eloop_handler(&cfg->el);
+ mutex_init(&cfg->usr_sync);
+ err = brcmf_init_iscan(cfg);
if (err)
return err;
- brcmf_init_conf(cfg_priv->conf);
- brcmf_init_prof(cfg_priv->profile);
- brcmf_link_down(cfg_priv);
+ brcmf_init_escan(cfg);
+ brcmf_init_conf(cfg->conf);
+ brcmf_init_prof(cfg->profile);
+ brcmf_link_down(cfg);
return err;
}
-static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv)
+static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
{
- cancel_work_sync(&cfg_priv->event_work);
- cfg_priv->dongle_up = false; /* dongle down */
- brcmf_flush_eq(cfg_priv);
- brcmf_link_down(cfg_priv);
- brcmf_term_iscan(cfg_priv);
- brcmf_deinit_priv_mem(cfg_priv);
+ cancel_work_sync(&cfg->event_work);
+ cfg->dongle_up = false; /* dongle down */
+ brcmf_flush_eq(cfg);
+ brcmf_link_down(cfg);
+ brcmf_abort_scanning(cfg);
+ brcmf_deinit_priv_mem(cfg);
}
-struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
- struct device *busdev,
- void *data)
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
+ struct device *busdev,
+ struct brcmf_pub *drvr)
{
struct wireless_dev *wdev;
- struct brcmf_cfg80211_priv *cfg_priv;
- struct brcmf_cfg80211_iface *ci;
- struct brcmf_cfg80211_dev *cfg_dev;
+ struct brcmf_cfg80211_info *cfg;
s32 err = 0;
if (!ndev) {
WL_ERR("ndev is invalid\n");
return NULL;
}
- cfg_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL);
- if (!cfg_dev)
- return NULL;
- wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface), busdev);
+ wdev = brcmf_alloc_wdev(busdev);
if (IS_ERR(wdev)) {
- kfree(cfg_dev);
return NULL;
}
wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
- cfg_priv = wdev_to_cfg(wdev);
- cfg_priv->wdev = wdev;
- cfg_priv->pub = data;
- ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci;
- ci->cfg_priv = cfg_priv;
+ cfg = wdev_to_cfg(wdev);
+ cfg->wdev = wdev;
+ cfg->pub = drvr;
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
- err = wl_init_priv(cfg_priv);
+ err = wl_init_priv(cfg);
if (err) {
WL_ERR("Failed to init iwm_priv (%d)\n", err);
goto cfg80211_attach_out;
}
- brcmf_set_drvdata(cfg_dev, ci);
- return cfg_dev;
+ return cfg;
cfg80211_attach_out:
- brcmf_free_wdev(cfg_priv);
- kfree(cfg_dev);
+ brcmf_free_wdev(cfg);
return NULL;
}
-void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg_dev)
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_priv *cfg_priv;
-
- cfg_priv = brcmf_priv_get(cfg_dev);
-
- wl_deinit_priv(cfg_priv);
- brcmf_free_wdev(cfg_priv);
- brcmf_set_drvdata(cfg_dev, NULL);
- kfree(cfg_dev);
+ wl_deinit_priv(cfg);
+ brcmf_free_wdev(cfg);
}
void
@@ -3478,10 +5180,10 @@ brcmf_cfg80211_event(struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
u32 event_type = be32_to_cpu(e->event_type);
- struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
+ struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
- if (!brcmf_enq_event(cfg_priv, event_type, e))
- schedule_work(&cfg_priv->event_work);
+ if (!brcmf_enq_event(cfg, event_type, e, data))
+ schedule_work(&cfg->event_work);
}
static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
@@ -3502,6 +5204,9 @@ static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
case NL80211_IFTYPE_STATION:
infra = 1;
break;
+ case NL80211_IFTYPE_AP:
+ infra = 1;
+ break;
default:
err = -EINVAL;
WL_ERR("invalid type (%d)\n", iftype);
@@ -3554,6 +5259,8 @@ static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
setbit(eventmask, BRCMF_E_TXFAIL);
setbit(eventmask, BRCMF_E_JOIN_START);
setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
+ setbit(eventmask, BRCMF_E_ESCAN_RESULT);
+ setbit(eventmask, BRCMF_E_PFN_NET_FOUND);
brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
iovbuf, sizeof(iovbuf));
@@ -3672,46 +5379,46 @@ dongle_scantime_out:
return err;
}
-static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
{
struct wiphy *wiphy;
s32 phy_list;
s8 phy;
s32 err = 0;
- err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST,
+ err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCM_GET_PHYLIST,
&phy_list, sizeof(phy_list));
if (err) {
WL_ERR("error (%d)\n", err);
return err;
}
- phy = ((char *)&phy_list)[1];
+ phy = ((char *)&phy_list)[0];
WL_INFO("%c phy\n", phy);
if (phy == 'n' || phy == 'a') {
- wiphy = cfg_to_wiphy(cfg_priv);
+ wiphy = cfg_to_wiphy(cfg);
wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
}
return err;
}
-static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_info *cfg)
{
- return wl_update_wiphybands(cfg_priv);
+ return wl_update_wiphybands(cfg);
}
-static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
{
struct net_device *ndev;
struct wireless_dev *wdev;
s32 power_mode;
s32 err = 0;
- if (cfg_priv->dongle_up)
+ if (cfg->dongle_up)
return err;
- ndev = cfg_to_ndev(cfg_priv);
+ ndev = cfg_to_ndev(cfg);
wdev = ndev->ieee80211_ptr;
brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
@@ -3721,21 +5428,21 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
if (err)
goto default_conf_out;
- power_mode = cfg_priv->pwr_save ? PM_FAST : PM_OFF;
+ power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode);
if (err)
goto default_conf_out;
WL_INFO("power save set to %s\n",
(power_mode ? "enabled" : "disabled"));
- err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1),
+ err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1),
WL_BEACON_TIMEOUT);
if (err)
goto default_conf_out;
err = brcmf_dongle_mode(ndev, wdev->iftype);
if (err && err != -EINPROGRESS)
goto default_conf_out;
- err = brcmf_dongle_probecap(cfg_priv);
+ err = brcmf_dongle_probecap(cfg);
if (err)
goto default_conf_out;
@@ -3743,31 +5450,31 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv)
default_conf_out:
- cfg_priv->dongle_up = true;
+ cfg->dongle_up = true;
return err;
}
-static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv)
+static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_info *cfg)
{
char buf[10+IFNAMSIZ];
struct dentry *fd;
s32 err = 0;
- sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name);
- cfg_priv->debugfsdir = debugfs_create_dir(buf,
- cfg_to_wiphy(cfg_priv)->debugfsdir);
+ sprintf(buf, "netdev:%s", cfg_to_ndev(cfg)->name);
+ cfg->debugfsdir = debugfs_create_dir(buf,
+ cfg_to_wiphy(cfg)->debugfsdir);
- fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir,
- (u16 *)&cfg_priv->profile->beacon_interval);
+ fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg->debugfsdir,
+ (u16 *)&cfg->profile->beacon_interval);
if (!fd) {
err = -ENOMEM;
goto err_out;
}
- fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir,
- (u8 *)&cfg_priv->profile->dtim_period);
+ fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg->debugfsdir,
+ (u8 *)&cfg->profile->dtim_period);
if (!fd) {
err = -ENOMEM;
goto err_out;
@@ -3777,40 +5484,40 @@ err_out:
return err;
}
-static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv)
+static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_info *cfg)
{
- debugfs_remove_recursive(cfg_priv->debugfsdir);
- cfg_priv->debugfsdir = NULL;
+ debugfs_remove_recursive(cfg->debugfsdir);
+ cfg->debugfsdir = NULL;
}
-static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
{
s32 err = 0;
- set_bit(WL_STATUS_READY, &cfg_priv->status);
+ set_bit(WL_STATUS_READY, &cfg->status);
- brcmf_debugfs_add_netdev_params(cfg_priv);
+ brcmf_debugfs_add_netdev_params(cfg);
- err = brcmf_config_dongle(cfg_priv);
+ err = brcmf_config_dongle(cfg);
if (err)
return err;
- brcmf_invoke_iscan(cfg_priv);
+ brcmf_invoke_iscan(cfg);
return err;
}
-static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
+static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
{
/*
* While going down, if associated with AP disassociate
* from AP to save power
*/
- if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
- test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
- test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
+ test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
+ test_bit(WL_STATUS_READY, &cfg->status)) {
WL_INFO("Disassociating from AP");
- brcmf_link_down(cfg_priv);
+ brcmf_link_down(cfg);
/* Make sure WPA_Supplicant receives all the event
generated due to DISASSOC call to the fw to keep
@@ -3819,63 +5526,33 @@ static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
brcmf_delay(500);
}
- set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
- brcmf_term_iscan(cfg_priv);
- if (cfg_priv->scan_request) {
- cfg80211_scan_done(cfg_priv->scan_request, true);
- /* May need to perform this to cover rmmod */
- /* wl_set_mpc(cfg_to_ndev(wl), 1); */
- cfg_priv->scan_request = NULL;
- }
- clear_bit(WL_STATUS_READY, &cfg_priv->status);
- clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
- clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+ brcmf_abort_scanning(cfg);
+ clear_bit(WL_STATUS_READY, &cfg->status);
- brcmf_debugfs_remove_netdev(cfg_priv);
+ brcmf_debugfs_remove_netdev(cfg);
return 0;
}
-s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev)
+s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_priv *cfg_priv;
s32 err = 0;
- cfg_priv = brcmf_priv_get(cfg_dev);
- mutex_lock(&cfg_priv->usr_sync);
- err = __brcmf_cfg80211_up(cfg_priv);
- mutex_unlock(&cfg_priv->usr_sync);
+ mutex_lock(&cfg->usr_sync);
+ err = __brcmf_cfg80211_up(cfg);
+ mutex_unlock(&cfg->usr_sync);
return err;
}
-s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev)
+s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_cfg80211_priv *cfg_priv;
s32 err = 0;
- cfg_priv = brcmf_priv_get(cfg_dev);
- mutex_lock(&cfg_priv->usr_sync);
- err = __brcmf_cfg80211_down(cfg_priv);
- mutex_unlock(&cfg_priv->usr_sync);
+ mutex_lock(&cfg->usr_sync);
+ err = __brcmf_cfg80211_down(cfg);
+ mutex_unlock(&cfg->usr_sync);
return err;
}
-static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
- u8 t, u8 l, u8 *v)
-{
- struct brcmf_cfg80211_ie *ie = &cfg_priv->ie;
- s32 err = 0;
-
- if (ie->offset + l + 2 > WL_TLV_INFO_MAX) {
- WL_ERR("ei crosses buffer boundary\n");
- return -ENOSPC;
- }
- ie->buf[ie->offset] = t;
- ie->buf[ie->offset + 1] = l;
- memcpy(&ie->buf[ie->offset + 2], v, l);
- ie->offset += l + 2;
-
- return err;
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index b5d9b36df3d0..71ced174748a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -17,12 +17,6 @@
#ifndef _wl_cfg80211_h_
#define _wl_cfg80211_h_
-struct brcmf_cfg80211_conf;
-struct brcmf_cfg80211_iface;
-struct brcmf_cfg80211_priv;
-struct brcmf_cfg80211_security;
-struct brcmf_cfg80211_ibss;
-
#define WL_DBG_NONE 0
#define WL_DBG_CONN (1 << 5)
#define WL_DBG_SCAN (1 << 4)
@@ -123,13 +117,25 @@ do { \
#define WL_SCAN_UNASSOC_TIME 40
#define WL_SCAN_PASSIVE_TIME 120
+#define WL_ESCAN_BUF_SIZE (1024 * 64)
+#define WL_ESCAN_TIMER_INTERVAL_MS 8000 /* E-Scan timeout */
+
+#define WL_ESCAN_ACTION_START 1
+#define WL_ESCAN_ACTION_CONTINUE 2
+#define WL_ESCAN_ACTION_ABORT 3
+
+#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
+#define IE_MAX_LEN 512
+
/* dongle status */
enum wl_status {
WL_STATUS_READY,
WL_STATUS_SCANNING,
WL_STATUS_SCAN_ABORTING,
WL_STATUS_CONNECTING,
- WL_STATUS_CONNECTED
+ WL_STATUS_CONNECTED,
+ WL_STATUS_AP_CREATING,
+ WL_STATUS_AP_CREATED
};
/* wi-fi mode */
@@ -169,23 +175,17 @@ struct brcmf_cfg80211_conf {
struct ieee80211_channel channel;
};
+/* forward declaration */
+struct brcmf_cfg80211_info;
+
/* cfg80211 main event loop */
struct brcmf_cfg80211_event_loop {
- s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_priv *cfg_priv,
+ s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e,
void *data);
};
-/* representing interface of cfg80211 plane */
-struct brcmf_cfg80211_iface {
- struct brcmf_cfg80211_priv *cfg_priv;
-};
-
-struct brcmf_cfg80211_dev {
- void *driver_data; /* to store cfg80211 object information */
-};
-
/* basic structure of scan request */
struct brcmf_cfg80211_scan_req {
struct brcmf_ssid_le ssid_le;
@@ -238,7 +238,7 @@ struct brcmf_cfg80211_profile {
/* dongle iscan event loop */
struct brcmf_cfg80211_iscan_eloop {
s32 (*handler[WL_SCAN_ERSULTS_LAST])
- (struct brcmf_cfg80211_priv *cfg_priv);
+ (struct brcmf_cfg80211_info *cfg);
};
/* dongle iscan controller */
@@ -275,92 +275,240 @@ struct brcmf_cfg80211_pmk_list {
struct pmkid foo[MAXPMKID - 1];
};
-/* dongle private data of cfg80211 interface */
-struct brcmf_cfg80211_priv {
- struct wireless_dev *wdev; /* representing wl cfg80211 device */
- struct brcmf_cfg80211_conf *conf; /* dongle configuration */
- struct cfg80211_scan_request *scan_request; /* scan request
- object */
- struct brcmf_cfg80211_event_loop el; /* main event loop */
- struct list_head evt_q_list; /* used for event queue */
- spinlock_t evt_q_lock; /* for event queue synchronization */
- struct mutex usr_sync; /* maily for dongle up/down synchronization */
- struct brcmf_scan_results *bss_list; /* bss_list holding scanned
- ap information */
+/* dongle escan state */
+enum wl_escan_state {
+ WL_ESCAN_STATE_IDLE,
+ WL_ESCAN_STATE_SCANNING
+};
+
+struct escan_info {
+ u32 escan_state;
+ u8 escan_buf[WL_ESCAN_BUF_SIZE];
+ struct wiphy *wiphy;
+ struct net_device *ndev;
+};
+
+/* Structure to hold WPS, WPA IEs for a AP */
+struct ap_info {
+ u8 probe_res_ie[IE_MAX_LEN];
+ u8 beacon_ie[IE_MAX_LEN];
+ u32 probe_res_ie_len;
+ u32 beacon_ie_len;
+ u8 *wpa_ie;
+ u8 *rsn_ie;
+ bool security_mode;
+};
+
+/**
+ * struct brcmf_pno_param_le - PNO scan configuration parameters
+ *
+ * @version: PNO parameters version.
+ * @scan_freq: scan frequency.
+ * @lost_network_timeout: #sec. to declare discovered network as lost.
+ * @flags: Bit field to control features of PFN such as sort criteria auto
+ * enable switch and background scan.
+ * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort
+ * criteria.
+ * @bestn: number of best networks in each scan.
+ * @mscan: number of scans recorded.
+ * @repeat: minimum number of scan intervals before scan frequency changes
+ * in adaptive scan.
+ * @exp: exponent of 2 for maximum scan interval.
+ * @slow_freq: slow scan period.
+ */
+struct brcmf_pno_param_le {
+ __le32 version;
+ __le32 scan_freq;
+ __le32 lost_network_timeout;
+ __le16 flags;
+ __le16 rssi_margin;
+ u8 bestn;
+ u8 mscan;
+ u8 repeat;
+ u8 exp;
+ __le32 slow_freq;
+};
+
+/**
+ * struct brcmf_pno_net_param_le - scan parameters per preferred network.
+ *
+ * @ssid: ssid name and its length.
+ * @flags: bit2: hidden.
+ * @infra: BSS vs IBSS.
+ * @auth: Open vs Closed.
+ * @wpa_auth: WPA type.
+ * @wsec: wsec value.
+ */
+struct brcmf_pno_net_param_le {
+ struct brcmf_ssid_le ssid;
+ __le32 flags;
+ __le32 infra;
+ __le32 auth;
+ __le32 wpa_auth;
+ __le32 wsec;
+};
+
+/**
+ * struct brcmf_pno_net_info_le - information per found network.
+ *
+ * @bssid: BSS network identifier.
+ * @channel: channel number only.
+ * @SSID_len: length of ssid.
+ * @SSID: ssid characters.
+ * @RSSI: receive signal strength (in dBm).
+ * @timestamp: age in seconds.
+ */
+struct brcmf_pno_net_info_le {
+ u8 bssid[ETH_ALEN];
+ u8 channel;
+ u8 SSID_len;
+ u8 SSID[32];
+ __le16 RSSI;
+ __le16 timestamp;
+};
+
+/**
+ * struct brcmf_pno_scanresults_le - result returned in PNO NET FOUND event.
+ *
+ * @version: PNO version identifier.
+ * @status: indicates completion status of PNO scan.
+ * @count: amount of brcmf_pno_net_info_le entries appended.
+ */
+struct brcmf_pno_scanresults_le {
+ __le32 version;
+ __le32 status;
+ __le32 count;
+};
+
+/**
+ * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
+ *
+ * @wdev: representing wl cfg80211 device.
+ * @conf: dongle configuration.
+ * @scan_request: cfg80211 scan request object.
+ * @el: main event loop.
+ * @evt_q_list: used for event queue.
+ * @evt_q_lock: for event queue synchronization.
+ * @usr_sync: mainly for dongle up/down synchronization.
+ * @bss_list: bss_list holding scanned ap information.
+ * @scan_results: results of the last scan.
+ * @scan_req_int: internal scan request object.
+ * @bss_info: bss information for cfg80211 layer.
+ * @ie: information element object for internal purpose.
+ * @profile: holding dongle profile.
+ * @iscan: iscan controller information.
+ * @conn_info: association info.
+ * @pmk_list: wpa2 pmk list.
+ * @event_work: event handler work struct.
+ * @status: current dongle status.
+ * @pub: common driver information.
+ * @channel: current channel.
+ * @iscan_on: iscan on/off switch.
+ * @iscan_kickstart: indicate iscan already started.
+ * @active_scan: current scan mode.
+ * @sched_escan: e-scan for scheduled scan support running.
+ * @ibss_starter: indicates this sta is ibss starter.
+ * @link_up: link/connection up flag.
+ * @pwr_save: indicate whether dongle to support power save mode.
+ * @dongle_up: indicate whether dongle up or not.
+ * @roam_on: on/off switch for dongle self-roaming.
+ * @scan_tried: indicates if first scan attempted.
+ * @dcmd_buf: dcmd buffer.
+ * @extra_buf: mainly to grab assoc information.
+ * @debugfsdir: debugfs folder for this device.
+ * @escan_on: escan on/off switch.
+ * @escan_info: escan information.
+ * @escan_timeout: Timer for catch scan timeout.
+ * @escan_timeout_work: scan timeout worker.
+ * @escan_ioctl_buf: dongle command buffer for escan commands.
+ * @ap_info: host ap information.
+ * @ci: used to link this structure to netdev private data.
+ */
+struct brcmf_cfg80211_info {
+ struct wireless_dev *wdev;
+ struct brcmf_cfg80211_conf *conf;
+ struct cfg80211_scan_request *scan_request;
+ struct brcmf_cfg80211_event_loop el;
+ struct list_head evt_q_list;
+ spinlock_t evt_q_lock;
+ struct mutex usr_sync;
+ struct brcmf_scan_results *bss_list;
struct brcmf_scan_results *scan_results;
- struct brcmf_cfg80211_scan_req *scan_req_int; /* scan request object
- for internal purpose */
- struct wl_cfg80211_bss_info *bss_info; /* bss information for
- cfg80211 layer */
- struct brcmf_cfg80211_ie ie; /* information element object for
- internal purpose */
- struct brcmf_cfg80211_profile *profile; /* holding dongle profile */
- struct brcmf_cfg80211_iscan_ctrl *iscan; /* iscan controller */
- struct brcmf_cfg80211_connect_info conn_info; /* association info */
- struct brcmf_cfg80211_pmk_list *pmk_list; /* wpa2 pmk list */
- struct work_struct event_work; /* event handler work struct */
- unsigned long status; /* current dongle status */
- void *pub;
- u32 channel; /* current channel */
- bool iscan_on; /* iscan on/off switch */
- bool iscan_kickstart; /* indicate iscan already started */
- bool active_scan; /* current scan mode */
- bool ibss_starter; /* indicates this sta is ibss starter */
- bool link_up; /* link/connection up flag */
- bool pwr_save; /* indicate whether dongle to support
- power save mode */
- bool dongle_up; /* indicate whether dongle up or not */
- bool roam_on; /* on/off switch for dongle self-roaming */
- bool scan_tried; /* indicates if first scan attempted */
- u8 *dcmd_buf; /* dcmd buffer */
- u8 *extra_buf; /* maily to grab assoc information */
+ struct brcmf_cfg80211_scan_req *scan_req_int;
+ struct wl_cfg80211_bss_info *bss_info;
+ struct brcmf_cfg80211_ie ie;
+ struct brcmf_cfg80211_profile *profile;
+ struct brcmf_cfg80211_iscan_ctrl *iscan;
+ struct brcmf_cfg80211_connect_info conn_info;
+ struct brcmf_cfg80211_pmk_list *pmk_list;
+ struct work_struct event_work;
+ unsigned long status;
+ struct brcmf_pub *pub;
+ u32 channel;
+ bool iscan_on;
+ bool iscan_kickstart;
+ bool active_scan;
+ bool sched_escan;
+ bool ibss_starter;
+ bool link_up;
+ bool pwr_save;
+ bool dongle_up;
+ bool roam_on;
+ bool scan_tried;
+ u8 *dcmd_buf;
+ u8 *extra_buf;
struct dentry *debugfsdir;
- u8 ci[0] __aligned(NETDEV_ALIGN);
+ bool escan_on;
+ struct escan_info escan_info;
+ struct timer_list escan_timeout;
+ struct work_struct escan_timeout_work;
+ u8 *escan_ioctl_buf;
+ struct ap_info *ap_info;
};
-static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_priv *w)
+static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *w)
{
return w->wdev->wiphy;
}
-static inline struct brcmf_cfg80211_priv *wiphy_to_cfg(struct wiphy *w)
+static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
{
- return (struct brcmf_cfg80211_priv *)(wiphy_priv(w));
+ return (struct brcmf_cfg80211_info *)(wiphy_priv(w));
}
-static inline struct brcmf_cfg80211_priv *wdev_to_cfg(struct wireless_dev *wd)
+static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd)
{
- return (struct brcmf_cfg80211_priv *)(wdev_priv(wd));
+ return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
}
-static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_priv *cfg)
+static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
{
return cfg->wdev->netdev;
}
-static inline struct brcmf_cfg80211_priv *ndev_to_cfg(struct net_device *ndev)
+static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
{
return wdev_to_cfg(ndev->ieee80211_ptr);
}
-#define iscan_to_cfg(i) ((struct brcmf_cfg80211_priv *)(i->data))
+#define iscan_to_cfg(i) ((struct brcmf_cfg80211_info *)(i->data))
#define cfg_to_iscan(w) (w->iscan)
static inline struct
-brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg)
+brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
{
return &cfg->conn_info;
}
-extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
- struct device *busdev,
- void *data);
-extern void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg);
+struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev,
+ struct device *busdev,
+ struct brcmf_pub *drvr);
+void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
/* event handler from dongle */
-extern void brcmf_cfg80211_event(struct net_device *ndev,
- const struct brcmf_event_msg *e, void *data);
-extern s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev);
-extern s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev);
+void brcmf_cfg80211_event(struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data);
+s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg);
+s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg);
#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 8c9345dd37d2..b89f1272b93f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -535,9 +535,6 @@ void ai_detach(struct si_pub *sih)
{
struct si_info *sii;
- struct si_pub *si_local = NULL;
- memcpy(&si_local, &sih, sizeof(struct si_pub **));
-
sii = container_of(sih, struct si_info, pub);
if (sii == NULL)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index a5edebeb0b4f..a744ea5a9559 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -86,7 +86,9 @@ MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-
+/* This needs to be adjusted when brcms_firmwares changes */
+MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
+MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw");
/* recognized BCMA Core IDs */
static struct bcma_device_id brcms_coreid_table[] = {
@@ -265,7 +267,9 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
}
}
-static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void brcms_ops_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct brcms_info *wl = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -277,7 +281,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto done;
}
brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
- tx_info->rate_driver_data[0] = tx_info->control.sta;
+ tx_info->rate_driver_data[0] = control->sta;
done:
spin_unlock_bh(&wl->lock);
}
@@ -300,7 +304,10 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
wl->mute_tx = true;
if (!wl->pub->up)
- err = brcms_up(wl);
+ if (!blocked)
+ err = brcms_up(wl);
+ else
+ err = -ERFKILL;
else
err = -ENODEV;
spin_unlock_bh(&wl->lock);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 03ca65324845..75086b37c817 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7512,15 +7512,10 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
- if (channel > 14) {
- rx_status->band = IEEE80211_BAND_5GHZ;
- rx_status->freq = ieee80211_ofdm_chan_to_freq(
- WF_CHAN_FACTOR_5_G/2, channel);
-
- } else {
- rx_status->band = IEEE80211_BAND_2GHZ;
- rx_status->freq = ieee80211_dsss_chan_to_freq(channel);
- }
+ rx_status->band =
+ channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+ rx_status->freq =
+ ieee80211_channel_to_frequency(channel, rx_status->band);
rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh);
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index bcc79b4e3267..e8682855b73a 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -34,6 +34,7 @@
#define BCM43235_CHIP_ID 43235
#define BCM43236_CHIP_ID 43236
#define BCM43238_CHIP_ID 43238
+#define BCM43241_CHIP_ID 0x4324
#define BCM4329_CHIP_ID 0x4329
#define BCM4330_CHIP_ID 0x4330
#define BCM4331_CHIP_ID 0x4331
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index f10d30274c23..c11a290a1edf 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -67,11 +67,6 @@
#define WL_CHANSPEC_BAND_2G 0x2000
#define INVCHANSPEC 255
-/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */
-#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */
-#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */
-#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */
-
#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK))
#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index e1f410277242..c6ea995750db 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -860,10 +860,10 @@ void hostap_free_data(struct ap_data *ap)
return;
}
- flush_work_sync(&ap->add_sta_proc_queue);
+ flush_work(&ap->add_sta_proc_queue);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- flush_work_sync(&ap->wds_oper_queue);
+ flush_work(&ap->wds_oper_queue);
if (ap->crypt)
ap->crypt->deinit(ap->crypt_priv);
ap->crypt = ap->crypt_priv = NULL;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 50f87b60b0bd..8e7000fd4414 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3311,13 +3311,13 @@ static void prism2_free_local_data(struct net_device *dev)
unregister_netdev(local->dev);
- flush_work_sync(&local->reset_queue);
- flush_work_sync(&local->set_multicast_list_queue);
- flush_work_sync(&local->set_tim_queue);
+ flush_work(&local->reset_queue);
+ flush_work(&local->set_multicast_list_queue);
+ flush_work(&local->set_tim_queue);
#ifndef PRISM2_NO_STATION_MODES
- flush_work_sync(&local->info_queue);
+ flush_work(&local->info_queue);
#endif
- flush_work_sync(&local->comms_qual_update);
+ flush_work(&local->comms_qual_update);
lib80211_crypt_info_free(&local->crypt_info);
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 47932b28aac1..970a48baaf80 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/etherdevice.h>
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
@@ -463,8 +464,7 @@ static void handle_info_queue_scanresults(local_info_t *local)
prism2_host_roaming(local);
if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA &&
- memcmp(local->preferred_ap, "\x00\x00\x00\x00\x00\x00",
- ETH_ALEN) != 0) {
+ !is_zero_ether_addr(local->preferred_ap)) {
/*
* Firmware seems to be getting into odd state in host_roaming
* mode 2 when hostscan is used without join command, so try
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 18054d9c6688..ac074731335a 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -6,6 +6,7 @@
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include <net/lib80211.h>
#include "hostap_wlan.h"
@@ -3221,8 +3222,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
return -EINVAL;
addr = ext->addr.sa_data;
- if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
- addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
+ if (is_broadcast_ether_addr(addr)) {
sta_ptr = NULL;
crypt = &local->crypt_info.crypt[i];
} else {
@@ -3394,8 +3394,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev,
i--;
addr = ext->addr.sa_data;
- if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
- addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
+ if (is_broadcast_ether_addr(addr)) {
sta_ptr = NULL;
crypt = &local->crypt_info.crypt[i];
} else {
@@ -3458,9 +3457,7 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
param->u.crypt.key_len)
return -EINVAL;
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+ if (is_broadcast_ether_addr(param->sta_addr)) {
if (param->u.crypt.idx >= WEP_KEYS)
return -EINVAL;
sta_ptr = NULL;
@@ -3593,9 +3590,7 @@ static int prism2_ioctl_get_encryption(local_info_t *local,
if (max_key_len < 0)
return -EINVAL;
- if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
- param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
- param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+ if (is_broadcast_ether_addr(param->sta_addr)) {
sta_ptr = NULL;
if (param->u.crypt.idx >= WEP_KEYS)
param->u.crypt.idx = local->crypt_info.tx_keyidx;
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 627bc12074c7..15f0fad39add 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -1084,7 +1084,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason)
__le16 val = cpu_to_le16(reason);
if (local->iw_mode != IW_MODE_INFRA ||
- memcmp(local->bssid, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) == 0 ||
+ is_zero_ether_addr(local->bssid) ||
memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
return 0;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 83324b321652..29b8fa1adefd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2181,8 +2181,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
/* Make sure the RF Kill check timer is running */
priv->stop_rf_kill = 0;
- cancel_delayed_work(&priv->rf_kill);
- schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
+ mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
}
static void send_scan_event(void *data)
@@ -4322,9 +4321,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
"disabled by HW switch\n");
/* Make sure the RF_KILL check timer is running */
priv->stop_rf_kill = 0;
- cancel_delayed_work(&priv->rf_kill);
- schedule_delayed_work(&priv->rf_kill,
- round_jiffies_relative(HZ));
+ mod_delayed_work(system_wq, &priv->rf_kill,
+ round_jiffies_relative(HZ));
} else
schedule_reset(priv);
}
@@ -6964,13 +6962,6 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
int err = 0;
- static const unsigned char any[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
- static const unsigned char off[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
- };
-
// sanity checks
if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
return -EINVAL;
@@ -6981,8 +6972,8 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
goto done;
}
- if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
- !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+ if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
+ is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
/* we disable mandatory BSSID association */
IPW_DEBUG_WX("exit - disable mandatory BSSID\n");
priv->config &= ~CFG_STATIC_BSSID;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0df459147394..935120fc8c93 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -9037,18 +9037,11 @@ static int ipw_wx_set_wap(struct net_device *dev,
{
struct ipw_priv *priv = libipw_priv(dev);
- static const unsigned char any[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
- static const unsigned char off[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
- };
-
if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
return -EINVAL;
mutex_lock(&priv->mutex);
- if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
- !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+ if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
+ is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
/* we disable mandatory BSSID association */
IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
priv->config &= ~CFG_STATIC_BSSID;
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index 1571505b1a38..54aba4744438 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -675,7 +675,7 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
}
done:
if (ieee->set_security)
- ieee->set_security(ieee->dev, &sec);
+ ieee->set_security(dev, &sec);
return ret;
}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index faec40467208..e252acb9c862 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -460,7 +460,9 @@ il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
* start C_TX command process
*/
static int
-il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il3945_tx_skb(struct il_priv *il,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -512,7 +514,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find idx into station table for destination station */
- sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+ sta_id = il_sta_id_or_broadcast(il, sta);
if (sta_id == IL_INVALID_STATION) {
D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
goto drop;
@@ -2859,7 +2861,9 @@ il3945_mac_stop(struct ieee80211_hw *hw)
}
static void
-il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il3945_mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct il_priv *il = hw->priv;
@@ -2868,7 +2872,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
- if (il3945_tx_skb(il, skb))
+ if (il3945_tx_skb(il, control->sta, skb))
dev_kfree_skb_any(skb);
D_MAC80211("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 34f61a0581a2..eac4dc8bc879 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -1526,8 +1526,11 @@ il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
}
static void
-il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info, __le16 fc)
+il4965_tx_cmd_build_rate(struct il_priv *il,
+ struct il_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ __le16 fc)
{
const u8 rts_retry_limit = 60;
u32 rate_flags;
@@ -1561,9 +1564,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
rate_idx = info->control.rates[0].idx;
if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
|| rate_idx > RATE_COUNT_LEGACY)
- rate_idx =
- rate_lowest_index(&il->bands[info->band],
- info->control.sta);
+ rate_idx = rate_lowest_index(&il->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
if (info->band == IEEE80211_BAND_5GHZ)
rate_idx += IL_FIRST_OFDM_RATE;
@@ -1630,11 +1631,12 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
* start C_TX command process
*/
int
-il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+il4965_tx_skb(struct il_priv *il,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
struct il_station_priv *sta_priv = NULL;
struct il_tx_queue *txq;
struct il_queue *q;
@@ -1680,7 +1682,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
sta_id = il->hw_params.bcast_id;
else {
/* Find idx into station table for destination station */
- sta_id = il_sta_id_or_broadcast(il, info->control.sta);
+ sta_id = il_sta_id_or_broadcast(il, sta);
if (sta_id == IL_INVALID_STATION) {
D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
@@ -1786,7 +1788,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
/* TODO need this for burst mode later on */
il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
- il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+ il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
il_update_stats(il, true, fc, len);
/*
@@ -5828,7 +5830,9 @@ il4965_mac_stop(struct ieee80211_hw *hw)
}
void
-il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+il4965_mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct il_priv *il = hw->priv;
@@ -5837,7 +5841,7 @@ il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
- if (il4965_tx_skb(il, skb))
+ if (il4965_tx_skb(il, control->sta, skb))
dev_kfree_skb_any(skb);
D_MACDUMP("leave\n");
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 1db677689cfe..2d092f328547 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -78,7 +78,9 @@ int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
struct ieee80211_tx_info *info);
-int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_skb(struct il_priv *il,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb);
int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 * ssn);
int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
@@ -163,7 +165,9 @@ void il4965_eeprom_release_semaphore(struct il_priv *il);
int il4965_eeprom_check_version(struct il_priv *il);
/* mac80211 handlers (for 4965) */
-void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void il4965_mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
int il4965_mac_start(struct ieee80211_hw *hw);
void il4965_mac_stop(struct ieee80211_hw *hw);
void il4965_configure_filter(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 0370403fd0bd..318ed3c9fe74 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1586,9 +1586,9 @@ il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
return 0;
frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- memcpy(frame->da, il_bcast_addr, ETH_ALEN);
+ eth_broadcast_addr(frame->da);
memcpy(frame->sa, ta, ETH_ALEN);
- memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
+ eth_broadcast_addr(frame->bssid);
frame->seq_ctrl = 0;
len += 24;
@@ -4860,7 +4860,7 @@ EXPORT_SYMBOL(il_add_beacon_time);
#ifdef CONFIG_PM
-int
+static int
il_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
@@ -4877,9 +4877,8 @@ il_pci_suspend(struct device *device)
return 0;
}
-EXPORT_SYMBOL(il_pci_suspend);
-int
+static int
il_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
@@ -4906,16 +4905,8 @@ il_pci_resume(struct device *device)
return 0;
}
-EXPORT_SYMBOL(il_pci_resume);
-const struct dev_pm_ops il_pm_ops = {
- .suspend = il_pci_suspend,
- .resume = il_pci_resume,
- .freeze = il_pci_suspend,
- .thaw = il_pci_resume,
- .poweroff = il_pci_suspend,
- .restore = il_pci_resume,
-};
+SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
EXPORT_SYMBOL(il_pm_ops);
#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 5f5017767b99..b4bb813362bd 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1832,10 +1832,8 @@ int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
static inline u16
il_pcie_link_ctl(struct il_priv *il)
{
- int pos;
u16 pci_lnk_ctl;
- pos = pci_pcie_cap(il->pci_dev);
- pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &pci_lnk_ctl);
return pci_lnk_ctl;
}
@@ -1845,8 +1843,6 @@ __le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
u32 beacon_interval);
#ifdef CONFIG_PM
-int il_pci_suspend(struct device *device);
-int il_pci_resume(struct device *device);
extern const struct dev_pm_ops il_pm_ops;
#define IL_LEGACY_PM_OPS (&il_pm_ops)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 9bb16bdf6d26..75e12f29d9eb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -201,7 +201,9 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
/* tx */
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwlagn_tx_skb(struct iwl_priv *priv,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb);
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
@@ -485,16 +487,13 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
#else
-static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+static inline int iwl_dbgfs_register(struct iwl_priv *priv,
+ struct dentry *dbgfs_dir)
{
return 0;
}
-static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 4a361c55c543..01128c96b5d8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -1055,8 +1055,9 @@ struct iwl_wep_cmd {
#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
-#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70
#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
+#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7)
#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index a47b306b522c..1a98fa3ab06d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2352,24 +2352,19 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
* Create the debugfs files and directories
*
*/
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
{
- struct dentry *phyd = priv->hw->wiphy->debugfsdir;
- struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+ struct dentry *dir_data, *dir_rf, *dir_debug;
- dir_drv = debugfs_create_dir(name, phyd);
- if (!dir_drv)
- return -ENOMEM;
-
- priv->debugfs_dir = dir_drv;
+ priv->debugfs_dir = dbgfs_dir;
- dir_data = debugfs_create_dir("data", dir_drv);
+ dir_data = debugfs_create_dir("data", dbgfs_dir);
if (!dir_data)
goto err;
- dir_rf = debugfs_create_dir("rf", dir_drv);
+ dir_rf = debugfs_create_dir("rf", dbgfs_dir);
if (!dir_rf)
goto err;
- dir_debug = debugfs_create_dir("debug", dir_drv);
+ dir_debug = debugfs_create_dir("debug", dbgfs_dir);
if (!dir_debug)
goto err;
@@ -2415,25 +2410,30 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
/* Calibrations disabled/enabled status*/
DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
- if (iwl_trans_dbgfs_register(priv->trans, dir_debug))
- goto err;
+ /*
+ * Create a symlink with mac80211. This is not very robust, as it does
+ * not remove the symlink created. The implicit assumption is that
+ * when the opmode exits, mac80211 will also exit, and will remove
+ * this symlink as part of its cleanup.
+ */
+ if (priv->mac80211_registered) {
+ char buf[100];
+ struct dentry *mac80211_dir, *dev_dir, *root_dir;
+
+ dev_dir = dbgfs_dir->d_parent;
+ root_dir = dev_dir->d_parent;
+ mac80211_dir = priv->hw->wiphy->debugfsdir;
+
+ snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
+ dev_dir->d_name.name);
+
+ if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
+ goto err;
+ }
+
return 0;
err:
- IWL_ERR(priv, "Can't create the debugfs directory\n");
- iwl_dbgfs_unregister(priv);
+ IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
return -ENOMEM;
}
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
- if (!priv->debugfs_dir)
- return;
-
- debugfs_remove_recursive(priv->debugfs_dir);
- priv->debugfs_dir = NULL;
-}
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 054f728f6266..8141f91c3725 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -771,6 +771,7 @@ struct iwl_priv {
u8 agg_tids_count;
struct iwl_rx_phy_res last_phy_res;
+ u32 ampdu_ref;
bool last_phy_res_valid;
/*
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index a5f7bce96325..ff8162d4c454 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -195,7 +195,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
}
- hw->wiphy->max_remain_on_channel_duration = 1000;
+ hw->wiphy->max_remain_on_channel_duration = 500;
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS |
@@ -511,14 +511,16 @@ static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
}
#endif
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwlagn_mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
- if (iwlagn_tx_skb(priv, skb))
+ if (iwlagn_tx_skb(priv, control->sta, skb))
dev_kfree_skb_any(skb);
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 84d3db5aa506..7ff3f1430678 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -862,7 +862,8 @@ void iwl_down(struct iwl_priv *priv)
* No race since we hold the mutex here and a new one
* can't come in at this time.
*/
- ieee80211_remain_on_channel_expired(priv->hw);
+ if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
+ ieee80211_remain_on_channel_expired(priv->hw);
exit_pending =
test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -994,7 +995,11 @@ static void iwl_bg_restart(struct work_struct *data)
iwlagn_prepare_restart(priv);
mutex_unlock(&priv->mutex);
iwl_cancel_deferred_work(priv);
- ieee80211_restart_hw(priv->hw);
+ if (priv->mac80211_registered)
+ ieee80211_restart_hw(priv->hw);
+ else
+ IWL_ERR(priv,
+ "Cannot request restart before registrating with mac80211");
} else {
WARN_ON(1);
}
@@ -1222,7 +1227,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
- const struct iwl_fw *fw)
+ const struct iwl_fw *fw,
+ struct dentry *dbgfs_dir)
{
struct iwl_priv *priv;
struct ieee80211_hw *hw;
@@ -1466,13 +1472,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
goto out_destroy_workqueue;
- if (iwl_dbgfs_register(priv, DRV_NAME))
- IWL_ERR(priv,
- "failed to create debugfs files. Ignoring error\n");
+ if (iwl_dbgfs_register(priv, dbgfs_dir))
+ goto out_mac80211_unregister;
return op_mode;
+out_mac80211_unregister:
+ iwlagn_mac_unregister(priv);
out_destroy_workqueue:
+ iwl_tt_exit(priv);
+ iwl_testmode_free(priv);
+ iwl_cancel_deferred_work(priv);
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
iwl_uninit_drv(priv);
@@ -1493,8 +1503,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
- iwl_dbgfs_unregister(priv);
-
iwl_testmode_free(priv);
iwlagn_mac_unregister(priv);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index fee5cffa1669..5a9c325804f6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -667,6 +667,7 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->last_phy_res_valid = true;
+ priv->ampdu_ref++;
memcpy(&priv->last_phy_res, pkt->data,
sizeof(struct iwl_rx_phy_res));
return 0;
@@ -981,6 +982,16 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
rx_status.flag |= RX_FLAG_SHORTPRE;
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
+ /*
+ * We know which subframes of an A-MPDU belong
+ * together since we get a single PHY response
+ * from the firmware for all of them
+ */
+ rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status.ampdu_reference = priv->ampdu_ref;
+ }
+
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
rx_status.flag |= RX_FLAG_HT;
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index e3467fa86899..bb9f6252d28f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -612,9 +612,9 @@ static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
return 0;
frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
+ eth_broadcast_addr(frame->da);
memcpy(frame->sa, ta, ETH_ALEN);
- memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
+ eth_broadcast_addr(frame->bssid);
frame->seq_ctrl = 0;
len += 24;
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index b29b798f7550..cd9b6de4273e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -128,10 +128,11 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_addsta_cmd *addsta =
- (struct iwl_addsta_cmd *) cmd->payload;
- return iwl_process_add_sta_resp(priv, addsta, pkt);
+ if (!cmd)
+ return 0;
+
+ return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
}
int iwl_send_add_sta(struct iwl_priv *priv,
@@ -150,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
if (!(flags & CMD_ASYNC)) {
- cmd.flags |= CMD_WANT_SKB;
+ cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
might_sleep();
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 5971a23aa47d..f5ca73a89870 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -127,6 +127,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
__le16 fc)
{
u32 rate_flags;
@@ -187,8 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
rate_idx = rate_lowest_index(
- &priv->eeprom_data->bands[info->band],
- info->control.sta);
+ &priv->eeprom_data->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
if (info->band == IEEE80211_BAND_5GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
@@ -291,7 +291,9 @@ static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
/*
* start REPLY_TX command process
*/
-int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+int iwlagn_tx_skb(struct iwl_priv *priv,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -345,7 +347,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
sta_id = ctx->bcast_sta_id;
else {
/* Find index into station table for destination station */
- sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta);
+ sta_id = iwl_sta_id_or_broadcast(ctx, sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -355,8 +357,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
- if (info->control.sta)
- sta_priv = (void *)info->control.sta->drv_priv;
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
if (sta_priv && sta_priv->asleep &&
(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
@@ -397,7 +399,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* TODO need this for burst mode later on */
iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
- iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+ iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
memset(&info->status, 0, sizeof(info->status));
@@ -431,7 +433,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
* only. Check this here.
*/
if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
- tid_data->agg.state != IWL_AGG_OFF,
+ tid_data->agg.state != IWL_AGG_OFF,
"Tx while agg.state = %d", tid_data->agg.state))
goto drop_unlock_sta;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 6d8d6dd7943f..2cb1efbc5ed1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -295,7 +295,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
static int iwl_verify_sec_sparse(struct iwl_priv *priv,
const struct fw_desc *fw_desc)
{
- __le32 *image = (__le32 *)fw_desc->v_addr;
+ __le32 *image = (__le32 *)fw_desc->data;
u32 len = fw_desc->len;
u32 val;
u32 i;
@@ -319,7 +319,7 @@ static int iwl_verify_sec_sparse(struct iwl_priv *priv,
static void iwl_print_mismatch_sec(struct iwl_priv *priv,
const struct fw_desc *fw_desc)
{
- __le32 *image = (__le32 *)fw_desc->v_addr;
+ __le32 *image = (__le32 *)fw_desc->data;
u32 len = fw_desc->len;
u32 val;
u32 offs;
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 06ca505bb2cc..59a5f78402fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,6 +29,7 @@
#include <linux/tracepoint.h>
#include <linux/device.h>
+#include "iwl-trans.h"
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
@@ -237,27 +238,34 @@ TRACE_EVENT(iwlwifi_dbg,
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
- TP_PROTO(const struct device *dev, u32 flags,
- const void *hcmd0, size_t len0,
- const void *hcmd1, size_t len1,
- const void *hcmd2, size_t len2),
- TP_ARGS(dev, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
+ TP_PROTO(const struct device *dev,
+ struct iwl_host_cmd *cmd, u16 total_size,
+ const void *hdr, size_t hdr_len),
+ TP_ARGS(dev, cmd, total_size, hdr, hdr_len),
TP_STRUCT__entry(
DEV_ENTRY
- __dynamic_array(u8, hcmd0, len0)
- __dynamic_array(u8, hcmd1, len1)
- __dynamic_array(u8, hcmd2, len2)
+ __dynamic_array(u8, hcmd, total_size)
__field(u32, flags)
),
TP_fast_assign(
+ int i, offset = hdr_len;
+
DEV_ASSIGN;
- memcpy(__get_dynamic_array(hcmd0), hcmd0, len0);
- memcpy(__get_dynamic_array(hcmd1), hcmd1, len1);
- memcpy(__get_dynamic_array(hcmd2), hcmd2, len2);
- __entry->flags = flags;
+ __entry->flags = cmd->flags;
+ memcpy(__get_dynamic_array(hcmd), hdr, hdr_len);
+
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+ if (!cmd->len[i])
+ continue;
+ if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+ continue;
+ memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
+ cmd->data[i], cmd->len[i]);
+ offset += cmd->len[i];
+ }
),
TP_printk("[%s] hcmd %#.2x (%ssync)",
- __get_str(dev), ((u8 *)__get_dynamic_array(hcmd0))[0],
+ __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
__entry->flags & CMD_ASYNC ? "a" : "")
);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index cc41cfaedfbd..198634b75ed0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -64,6 +64,7 @@
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
@@ -101,6 +102,10 @@ MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static struct dentry *iwl_dbgfs_root;
+#endif
+
/**
* struct iwl_drv - drv common data
* @list: list of drv structures using this opmode
@@ -126,6 +131,12 @@ struct iwl_drv {
char firmware_name[25]; /* name of firmware file to load */
struct completion request_firmware_complete;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct dentry *dbgfs_drv;
+ struct dentry *dbgfs_trans;
+ struct dentry *dbgfs_op_mode;
+#endif
};
#define DVM_OP_MODE 0
@@ -154,10 +165,8 @@ struct fw_sec {
static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
{
- if (desc->v_addr)
- dma_free_coherent(drv->trans->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
+ vfree(desc->data);
+ desc->data = NULL;
desc->len = 0;
}
@@ -176,25 +185,29 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
}
static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
- struct fw_sec *sec)
+ struct fw_sec *sec)
{
- if (!sec || !sec->size) {
- desc->v_addr = NULL;
+ void *data;
+
+ desc->data = NULL;
+
+ if (!sec || !sec->size)
return -EINVAL;
- }
- desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size,
- &desc->p_addr, GFP_KERNEL);
- if (!desc->v_addr)
+ data = vmalloc(sec->size);
+ if (!data)
return -ENOMEM;
desc->len = sec->size;
desc->offset = sec->offset;
- memcpy(desc->v_addr, sec->data, sec->size);
+ memcpy(data, sec->data, desc->len);
+ desc->data = data;
+
return 0;
}
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
+static void iwl_req_fw_callback(const struct firmware *ucode_raw,
+ void *context);
#define UCODE_EXPERIMENTAL_INDEX 100
#define UCODE_EXPERIMENTAL_TAG "exp"
@@ -231,7 +244,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
drv->trans->dev,
- GFP_KERNEL, drv, iwl_ucode_callback);
+ GFP_KERNEL, drv, iwl_req_fw_callback);
}
struct fw_img_parsing {
@@ -759,13 +772,57 @@ static int validate_sec_sizes(struct iwl_drv *drv,
return 0;
}
+static struct iwl_op_mode *
+_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
+{
+ const struct iwl_op_mode_ops *ops = op->ops;
+ struct dentry *dbgfs_dir = NULL;
+ struct iwl_op_mode *op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+ drv->dbgfs_drv);
+ if (!drv->dbgfs_op_mode) {
+ IWL_ERR(drv,
+ "failed to create opmode debugfs directory\n");
+ return op_mode;
+ }
+ dbgfs_dir = drv->dbgfs_op_mode;
+#endif
+
+ op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (!op_mode) {
+ debugfs_remove_recursive(drv->dbgfs_op_mode);
+ drv->dbgfs_op_mode = NULL;
+ }
+#endif
+
+ return op_mode;
+}
+
+static void _iwl_op_mode_stop(struct iwl_drv *drv)
+{
+ /* op_mode can be NULL if its start failed */
+ if (drv->op_mode) {
+ iwl_op_mode_stop(drv->op_mode);
+ drv->op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ debugfs_remove_recursive(drv->dbgfs_op_mode);
+ drv->dbgfs_op_mode = NULL;
+#endif
+ }
+}
+
/**
- * iwl_ucode_callback - callback when firmware was loaded
+ * iwl_req_fw_callback - callback when firmware was loaded
*
* If loaded successfully, copies the firmware into buffers
* for the card to fetch (via DMA).
*/
-static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
+static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
@@ -908,8 +965,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
list_add_tail(&drv->list, &op->drv);
if (op->ops) {
- const struct iwl_op_mode_ops *ops = op->ops;
- drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
+ drv->op_mode = _iwl_op_mode_start(drv, op);
if (!drv->op_mode) {
mutex_unlock(&iwlwifi_opmode_table_mtx);
@@ -969,24 +1025,51 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* Create the device debugfs entries. */
+ drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
+ iwl_dbgfs_root);
+
+ if (!drv->dbgfs_drv) {
+ IWL_ERR(drv, "failed to create debugfs directory\n");
+ goto err_free_drv;
+ }
+
+ /* Create transport layer debugfs dir */
+ drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
+
+ if (!drv->trans->dbgfs_dir) {
+ IWL_ERR(drv, "failed to create transport debugfs directory\n");
+ goto err_free_dbgfs;
+ }
+#endif
+
ret = iwl_request_firmware(drv, true);
if (ret) {
IWL_ERR(trans, "Couldn't request the fw\n");
- kfree(drv);
- drv = NULL;
+ goto err_fw;
}
return drv;
+
+err_fw:
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+err_free_dbgfs:
+ debugfs_remove_recursive(drv->dbgfs_drv);
+err_free_drv:
+#endif
+ kfree(drv);
+ drv = NULL;
+
+ return drv;
}
void iwl_drv_stop(struct iwl_drv *drv)
{
wait_for_completion(&drv->request_firmware_complete);
- /* op_mode can be NULL if its start failed */
- if (drv->op_mode)
- iwl_op_mode_stop(drv->op_mode);
+ _iwl_op_mode_stop(drv);
iwl_dealloc_ucode(drv);
@@ -1000,6 +1083,10 @@ void iwl_drv_stop(struct iwl_drv *drv)
list_del(&drv->list);
mutex_unlock(&iwlwifi_opmode_table_mtx);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ debugfs_remove_recursive(drv->dbgfs_drv);
+#endif
+
kfree(drv);
}
@@ -1022,15 +1109,18 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
{
int i;
struct iwl_drv *drv;
+ struct iwlwifi_opmode_table *op;
mutex_lock(&iwlwifi_opmode_table_mtx);
for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
- if (strcmp(iwlwifi_opmode_table[i].name, name))
+ op = &iwlwifi_opmode_table[i];
+ if (strcmp(op->name, name))
continue;
- iwlwifi_opmode_table[i].ops = ops;
- list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
- drv->op_mode = ops->start(drv->trans, drv->cfg,
- &drv->fw);
+ op->ops = ops;
+ /* TODO: need to handle exceptional case */
+ list_for_each_entry(drv, &op->drv, list)
+ drv->op_mode = _iwl_op_mode_start(drv, op);
+
mutex_unlock(&iwlwifi_opmode_table_mtx);
return 0;
}
@@ -1051,12 +1141,9 @@ void iwl_opmode_deregister(const char *name)
iwlwifi_opmode_table[i].ops = NULL;
/* call the stop routine for all devices */
- list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) {
- if (drv->op_mode) {
- iwl_op_mode_stop(drv->op_mode);
- drv->op_mode = NULL;
- }
- }
+ list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
+ _iwl_op_mode_stop(drv);
+
mutex_unlock(&iwlwifi_opmode_table_mtx);
return;
}
@@ -1076,6 +1163,14 @@ static int __init iwl_drv_init(void)
pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
pr_info(DRV_COPYRIGHT "\n");
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* Create the root of iwlwifi debugfs subsystem. */
+ iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
+
+ if (!iwl_dbgfs_root)
+ return -EFAULT;
+#endif
+
return iwl_pci_register_driver();
}
module_init(iwl_drv_init);
@@ -1083,6 +1178,10 @@ module_init(iwl_drv_init);
static void __exit iwl_drv_exit(void)
{
iwl_pci_unregister_driver();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ debugfs_remove_recursive(iwl_dbgfs_root);
+#endif
}
module_exit(iwl_drv_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 2cbf137b25bf..285de5f68c05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -90,9 +90,9 @@
* 4) The bus specific component configures the bus
* 5) The bus specific component calls to the drv bus agnostic part
* (iwl_drv_start)
- * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback
- * 7) iwl_ucode_callback parses the fw file
- * 8) iwl_ucode_callback starts the wifi implementation to matches the fw
+ * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
+ * 7) iwl_req_fw_callback parses the fw file
+ * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
*/
struct iwl_drv;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 9c07c670a1ce..a5e425718f56 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -85,8 +85,6 @@ struct iwl_eeprom_data {
int n_hw_addrs;
u8 hw_addr[ETH_ALEN];
- u16 radio_config;
-
u8 calib_version;
__le16 calib_voltage;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 2153e4cc5572..d1a86b66bc51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -124,8 +124,7 @@ struct iwl_ucode_capabilities {
/* one for each uCode image (inst/data, init/runtime/wowlan) */
struct fw_desc {
- dma_addr_t p_addr; /* hardware address */
- void *v_addr; /* software address */
+ const void *data; /* vmalloc'ed data */
u32 len; /* size in bytes */
u32 offset; /* offset in the device */
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 64886f95664f..c8d9b9517468 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -134,7 +134,8 @@ struct iwl_cfg;
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
- const struct iwl_fw *fw);
+ const struct iwl_fw *fw,
+ struct dentry *dbgfs_dir);
void (*stop)(struct iwl_op_mode *op_mode);
int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 92576a3e84ef..ff1154232885 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -184,14 +184,20 @@ struct iwl_rx_packet {
* @CMD_SYNC: The caller will be stalled until the fw responds to the command
* @CMD_ASYNC: Return right away and don't want for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
- * response.
+ * response. The caller needs to call iwl_free_resp when done.
+ * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
+ * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
+ * copied. The pointer passed to the response handler is in the transport
+ * ownership and don't need to be freed by the op_mode. This also means
+ * that the pointer is invalidated after the op_mode's handler returns.
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
*/
enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
- CMD_ON_DEMAND = BIT(2),
+ CMD_WANT_HCMD = BIT(2),
+ CMD_ON_DEMAND = BIT(3),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -460,6 +466,8 @@ struct iwl_trans {
size_t dev_cmd_headroom;
char dev_cmd_pool_name[50];
+ struct dentry *dbgfs_dir;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f4c3500b68c6..2a4675396707 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -263,8 +263,6 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-#ifndef CONFIG_IWLWIFI_IDI
-
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@@ -282,8 +280,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!trans_pcie->drv)
goto out_free_trans;
+ /* register transport layer debugfs here */
+ if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir))
+ goto out_free_drv;
+
return 0;
+out_free_drv:
+ iwl_drv_stop(trans_pcie->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
pci_set_drvdata(pdev, NULL);
@@ -301,8 +305,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#endif /* CONFIG_IWLWIFI_IDI */
-
#ifdef CONFIG_PM_SLEEP
static int iwl_pci_suspend(struct device *device)
@@ -347,15 +349,6 @@ static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
#endif
-#ifdef CONFIG_IWLWIFI_IDI
-/*
- * Defined externally in iwl-idi.c
- */
-int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-void __devexit iwl_pci_remove(struct pci_dev *pdev);
-
-#endif /* CONFIG_IWLWIFI_IDI */
-
static struct pci_driver iwl_pci_driver = {
.name = DRV_NAME,
.id_table = iwl_hw_card_ids,
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 4ffc18dc3a57..401178f44a3b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -184,6 +184,7 @@ struct iwl_queue {
struct iwl_pcie_tx_queue_entry {
struct iwl_device_cmd *cmd;
+ struct iwl_device_cmd *copy_cmd;
struct sk_buff *skb;
struct iwl_cmd_meta meta;
};
@@ -310,7 +311,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
******************************************************/
void iwl_bg_rx_replenish(struct work_struct *data);
void iwl_irq_tasklet(struct iwl_trans *trans);
-void iwlagn_rx_replenish(struct iwl_trans *trans);
+void iwl_rx_replenish(struct iwl_trans *trans);
void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
struct iwl_rx_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index d1a61ba6247a..17c8e5d82681 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -35,10 +35,6 @@
#include "internal.h"
#include "iwl-op-mode.h"
-#ifdef CONFIG_IWLWIFI_IDI
-#include "iwl-amfh.h"
-#endif
-
/******************************************************************************
*
* RX path functions
@@ -181,15 +177,15 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
}
/**
- * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
-static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
return cpu_to_le32((u32)(dma_addr >> 8));
}
/**
- * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
* and we have free pre-allocated buffers, fill the ranks as much
@@ -199,7 +195,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
-static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
+static void iwl_rx_queue_restock(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -207,6 +203,17 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
+ /*
+ * If the device isn't enabled - not need to try to add buffers...
+ * This can happen when we stop the device and still have an interrupt
+ * pending. We stop the APM before we sync the interrupts / tasklets
+ * because we have to (see comment there). On the other hand, since
+ * the APM is stopped, we cannot access the HW (in particular not prph).
+ * So don't try to restock if the APM has been already stopped.
+ */
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
+ return;
+
spin_lock_irqsave(&rxq->lock, flags);
while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
@@ -219,7 +226,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
+ rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
@@ -230,7 +237,6 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
if (rxq->free_count <= RX_LOW_WATERMARK)
schedule_work(&trans_pcie->rx_replenish);
-
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
@@ -241,15 +247,16 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
}
}
-/**
- * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
+/*
+ * iwl_rx_allocate - allocate a page for each used RBD
*
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * A used RBD is an Rx buffer that has been given to the stack. To use it again
+ * a page must be allocated and the RBD must point to the page. This function
+ * doesn't change the HW pointer but handles the list of pages that is used by
+ * iwl_rx_queue_restock. The latter function will update the HW to use the newly
+ * allocated buffers.
*/
-static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
@@ -328,23 +335,31 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
}
}
-void iwlagn_rx_replenish(struct iwl_trans *trans)
+/*
+ * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
+ *
+ * When moving to rx_free an page is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+void iwl_rx_replenish(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
- iwlagn_rx_allocate(trans, GFP_KERNEL);
+ iwl_rx_allocate(trans, GFP_KERNEL);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- iwlagn_rx_queue_restock(trans);
+ iwl_rx_queue_restock(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
-static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_rx_replenish_now(struct iwl_trans *trans)
{
- iwlagn_rx_allocate(trans, GFP_ATOMIC);
+ iwl_rx_allocate(trans, GFP_ATOMIC);
- iwlagn_rx_queue_restock(trans);
+ iwl_rx_queue_restock(trans);
}
void iwl_bg_rx_replenish(struct work_struct *data)
@@ -352,7 +367,7 @@ void iwl_bg_rx_replenish(struct work_struct *data)
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwlagn_rx_replenish(trans_pcie->trans);
+ iwl_rx_replenish(trans_pcie->trans);
}
static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
@@ -421,13 +436,23 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
index = SEQ_TO_INDEX(sequence);
cmd_index = get_cmd_index(&txq->q, index);
- if (reclaim)
- cmd = txq->entries[cmd_index].cmd;
- else
+ if (reclaim) {
+ struct iwl_pcie_tx_queue_entry *ent;
+ ent = &txq->entries[cmd_index];
+ cmd = ent->copy_cmd;
+ WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
+ } else {
cmd = NULL;
+ }
err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+ if (reclaim) {
+ /* The original command isn't needed any more */
+ kfree(txq->entries[cmd_index].copy_cmd);
+ txq->entries[cmd_index].copy_cmd = NULL;
+ }
+
/*
* After here, we should always check rxcb._page_stolen,
* if it is true then one of the handlers took the page.
@@ -520,7 +545,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
count++;
if (count >= 8) {
rxq->read = i;
- iwlagn_rx_replenish_now(trans);
+ iwl_rx_replenish_now(trans);
count = 0;
}
}
@@ -529,9 +554,9 @@ static void iwl_rx_handle(struct iwl_trans *trans)
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
- iwlagn_rx_replenish_now(trans);
+ iwl_rx_replenish_now(trans);
else
- iwlagn_rx_queue_restock(trans);
+ iwl_rx_queue_restock(trans);
}
/**
@@ -713,11 +738,9 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* Disable periodic interrupt; we use it as just a one-shot. */
iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
-#ifdef CONFIG_IWLWIFI_IDI
- iwl_amfh_rx_handler();
-#else
+
iwl_rx_handle(trans);
-#endif
+
/*
* Enable periodic interrupt in 8 msec only if we received
* real RX interrupt (instead of just periodic int), to catch
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dbeebef562d5..fe0fffd04304 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -216,7 +216,7 @@ static int iwl_rx_init(struct iwl_trans *trans)
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
- iwlagn_rx_replenish(trans);
+ iwl_rx_replenish(trans);
iwl_trans_rx_hw_init(trans, rxq);
@@ -492,10 +492,11 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
iwl_tx_queue_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
-
if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < txq->q.n_window; i++)
+ for (i = 0; i < txq->q.n_window; i++) {
kfree(txq->entries[i].cmd);
+ kfree(txq->entries[i].copy_cmd);
+ }
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd) {
@@ -675,13 +676,10 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int pos;
u16 pci_lnk_ctl;
- struct pci_dev *pci_dev = trans_pcie->pci_dev;
-
- pos = pci_pcie_cap(pci_dev);
- pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL,
+ &pci_lnk_ctl);
return pci_lnk_ctl;
}
@@ -854,10 +852,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
iwl_op_mode_nic_config(trans->op_mode);
-#ifndef CONFIG_IWLWIFI_IDI
/* Allocate the RX queue, or reset if it is already allocated */
iwl_rx_init(trans);
-#endif
/* Allocate or reset and init all Tx and Command queues */
if (iwl_tx_init(trans))
@@ -896,6 +892,7 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
static int iwl_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
+ int t = 0;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@ -908,30 +905,25 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE);
- ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+ do {
+ ret = iwl_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
- if (ret < 0)
- return ret;
+ usleep_range(200, 1000);
+ t += 200;
+ } while (t < 150000);
- /* HW should be ready by now, check again. */
- ret = iwl_set_hw_ready(trans);
- if (ret >= 0)
- return 0;
return ret;
}
/*
* ucode
*/
-static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
- const struct fw_desc *section)
+static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
+ dma_addr_t phy_addr, u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- dma_addr_t phy_addr = section->p_addr;
- u32 byte_cnt = section->len;
- u32 dst_addr = section->offset;
int ret;
trans_pcie->ucode_write_complete = false;
@@ -945,8 +937,8 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
dst_addr);
iwl_write_direct32(trans,
- FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
- phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+ FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
iwl_write_direct32(trans,
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
@@ -965,33 +957,64 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
- section_num);
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
trans_pcie->ucode_write_complete, 5 * HZ);
if (!ret) {
- IWL_ERR(trans, "Could not load the [%d] uCode section\n",
- section_num);
+ IWL_ERR(trans, "Failed to load firmware chunk!\n");
return -ETIMEDOUT;
}
return 0;
}
-static int iwl_load_given_ucode(struct iwl_trans *trans,
- const struct fw_img *image)
+static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+ const struct fw_desc *section)
{
+ u8 *v_addr;
+ dma_addr_t p_addr;
+ u32 offset;
int ret = 0;
- int i;
- for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
- if (!image->sec[i].p_addr)
- break;
+ IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+ section_num);
- ret = iwl_load_section(trans, i, &image->sec[i]);
- if (ret)
- return ret;
+ v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
+ if (!v_addr)
+ return -ENOMEM;
+
+ for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
+ u32 copy_size;
+
+ copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
+
+ memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+ ret = iwl_load_firmware_chunk(trans, section->offset + offset,
+ p_addr, copy_size);
+ if (ret) {
+ IWL_ERR(trans,
+ "Could not load the [%d] uCode section\n",
+ section_num);
+ break;
}
+ }
+
+ dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
+ return ret;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+ if (!image->sec[i].data)
+ break;
+
+ ret = iwl_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
/* Remove all resets to allow NIC to operate */
iwl_write32(trans, CSR_RESET, 0);
@@ -1184,9 +1207,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
*/
if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
iwl_trans_tx_stop(trans);
-#ifndef CONFIG_IWLWIFI_IDI
iwl_trans_rx_stop(trans);
-#endif
+
/* Power-down device's busmaster DMA clocks */
iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
@@ -1457,14 +1479,16 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
bool hw_rfkill;
unsigned long flags;
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ iwl_disable_interrupts(trans);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
iwl_apm_stop(trans);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
-
if (!op_mode_leaving) {
/*
* Even if we stop the HW, we still want the RF kill
@@ -1552,9 +1576,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_trans_pcie_tx_free(trans);
-#ifndef CONFIG_IWLWIFI_IDI
iwl_trans_pcie_rx_free(trans);
-#endif
+
if (trans_pcie->irq_requested == true) {
free_irq(trans_pcie->irq, trans);
iwl_free_isr_ict(trans);
@@ -1772,7 +1795,7 @@ void iwl_dump_csr(struct iwl_trans *trans)
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
if (!debugfs_create_file(#name, mode, parent, trans, \
&iwl_dbgfs_##name##_ops)) \
- return -ENOMEM; \
+ goto err; \
} while (0)
/* file operation */
@@ -2036,6 +2059,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
return 0;
+
+err:
+ IWL_ERR(trans, "failed to create the trans debugfs entry\n");
+ return -ENOMEM;
}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6baf8deef519..105e3af3c621 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -521,12 +521,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
u16 copy_size, cmd_size;
bool had_nocopy = false;
int i;
- u8 *cmd_dest;
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
- int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
- int trace_idx;
-#endif
+ u32 cmd_pos;
copy_size = sizeof(out_cmd->hdr);
cmd_size = sizeof(out_cmd->hdr);
@@ -584,15 +579,31 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
INDEX_TO_SEQ(q->write_ptr));
/* and copy the data that needs to be copied */
-
- cmd_dest = out_cmd->payload;
+ cmd_pos = offsetof(struct iwl_device_cmd, payload);
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
break;
- memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
- cmd_dest += cmd->len[i];
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
+ cmd_pos += cmd->len[i];
+ }
+
+ WARN_ON_ONCE(txq->entries[idx].copy_cmd);
+
+ /*
+ * since out_cmd will be the source address of the FH, it will write
+ * the retry count there. So when the user needs to receivce the HCMD
+ * that corresponds to the response in the response handler, it needs
+ * to set CMD_WANT_HCMD.
+ */
+ if (cmd->flags & CMD_WANT_HCMD) {
+ txq->entries[idx].copy_cmd =
+ kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
+ if (unlikely(!txq->entries[idx].copy_cmd)) {
+ idx = -ENOMEM;
+ goto out;
+ }
}
IWL_DEBUG_HC(trans,
@@ -612,11 +623,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
dma_unmap_len_set(out_meta, len, copy_size);
iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- trace_bufs[0] = &out_cmd->hdr;
- trace_lens[0] = copy_size;
- trace_idx = 1;
-#endif
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
@@ -635,25 +641,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
cmd->len[i], 0);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- trace_bufs[trace_idx] = cmd->data[i];
- trace_lens[trace_idx] = cmd->len[i];
- trace_idx++;
-#endif
}
out_meta->flags = cmd->flags;
txq->need_update = 1;
- /* check that tracing gets all possible blocks */
- BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
- trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
- trace_bufs[0], trace_lens[0],
- trace_bufs[1], trace_lens[1],
- trace_bufs[2], trace_lens[2]);
-#endif
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
+ &out_cmd->hdr, copy_size);
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 26e68326710b..aaa297315c47 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1159,6 +1159,22 @@ void lbs_set_mac_control(struct lbs_private *priv)
lbs_deb_leave(LBS_DEB_CMD);
}
+int lbs_set_mac_control_sync(struct lbs_private *priv)
+{
+ struct cmd_ds_mac_control cmd;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_CMD);
+
+ cmd.hdr.size = cpu_to_le16(sizeof(cmd));
+ cmd.action = cpu_to_le16(priv->mac_control);
+ cmd.reserved = 0;
+ ret = lbs_cmd_with_response(priv, CMD_MAC_CONTROL, &cmd);
+
+ lbs_deb_leave(LBS_DEB_CMD);
+ return ret;
+}
+
/**
* lbs_allocate_cmd_buffer - allocates the command buffer and links
* it to command free queue
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index ab07608e13d0..4279e8ab95f2 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -96,6 +96,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv);
int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
void lbs_set_mac_control(struct lbs_private *priv);
+int lbs_set_mac_control_sync(struct lbs_private *priv);
int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
s16 *maxlevel);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index fe1ea43c5149..0c02f0483d1f 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -682,8 +682,10 @@ static int lbs_setup_firmware(struct lbs_private *priv)
/* Send cmd to FW to enable 11D function */
ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+ if (ret)
+ goto done;
- lbs_set_mac_control(priv);
+ ret = lbs_set_mac_control_sync(priv);
done:
lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
return ret;
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index a03457292c88..7001856241e6 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -227,7 +227,9 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
lbtf_deb_leave(LBTF_DEB_MAIN);
}
-static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void lbtf_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct lbtf_private *priv = hw->priv;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 00838395778c..429ca3215fdb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -38,7 +38,7 @@ MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
MODULE_LICENSE("GPL");
-static u32 wmediumd_pid;
+static u32 wmediumd_portid;
static int radios = 2;
module_param(radios, int, 0444);
@@ -545,7 +545,7 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
struct sk_buff *my_skb,
- int dst_pid)
+ int dst_portid)
{
struct sk_buff *skb;
struct mac80211_hwsim_data *data = hw->priv;
@@ -619,7 +619,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
genlmsg_end(skb, msg_head);
- genlmsg_unicast(&init_net, skb, dst_pid);
+ genlmsg_unicast(&init_net, skb, dst_portid);
/* Enqueue the packet */
skb_queue_tail(&data->pending, my_skb);
@@ -709,11 +709,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
return ack;
}
-static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
bool ack;
struct ieee80211_tx_info *txi;
- u32 _pid;
+ u32 _portid;
mac80211_hwsim_monitor_rx(hw, skb);
@@ -724,10 +726,10 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* wmediumd mode check */
- _pid = ACCESS_ONCE(wmediumd_pid);
+ _portid = ACCESS_ONCE(wmediumd_portid);
- if (_pid)
- return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+ if (_portid)
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
/* NO wmediumd detected, perfect medium simulation */
ack = mac80211_hwsim_tx_frame_no_nl(hw, skb);
@@ -812,7 +814,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_hw *hw = arg;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
- u32 _pid;
+ u32 _portid;
hwsim_check_magic(vif);
@@ -829,10 +831,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_monitor_rx(hw, skb);
/* wmediumd mode check */
- _pid = ACCESS_ONCE(wmediumd_pid);
+ _portid = ACCESS_ONCE(wmediumd_portid);
- if (_pid)
- return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+ if (_portid)
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
mac80211_hwsim_tx_frame_no_nl(hw, skb);
dev_kfree_skb(skb);
@@ -1313,7 +1315,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_pspoll *pspoll;
- u32 _pid;
+ u32 _portid;
if (!vp->assoc)
return;
@@ -1334,10 +1336,10 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
memcpy(pspoll->ta, mac, ETH_ALEN);
/* wmediumd mode check */
- _pid = ACCESS_ONCE(wmediumd_pid);
+ _portid = ACCESS_ONCE(wmediumd_portid);
- if (_pid)
- return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+ if (_portid)
+ return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
@@ -1351,7 +1353,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- u32 _pid;
+ u32 _portid;
if (!vp->assoc)
return;
@@ -1373,10 +1375,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
/* wmediumd mode check */
- _pid = ACCESS_ONCE(wmediumd_pid);
+ _portid = ACCESS_ONCE(wmediumd_portid);
- if (_pid)
- return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+ if (_portid)
+ return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
@@ -1630,10 +1632,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
if (info == NULL)
goto out;
- wmediumd_pid = info->snd_pid;
+ wmediumd_portid = info->snd_portid;
printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
- "switching to wmediumd mode with pid %d\n", info->snd_pid);
+ "switching to wmediumd mode with pid %d\n", info->snd_portid);
return 0;
out:
@@ -1670,10 +1672,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
if (state != NETLINK_URELEASE)
return NOTIFY_DONE;
- if (notify->pid == wmediumd_pid) {
+ if (notify->portid == wmediumd_portid) {
printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
" socket, switching to perfect channel medium\n");
- wmediumd_pid = 0;
+ wmediumd_portid = 0;
}
return NOTIFY_DONE;
@@ -1727,6 +1729,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
#endif
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
};
static const struct ieee80211_iface_combination hwsim_if_comb = {
@@ -1813,7 +1816,8 @@ static int __init init_mac80211_hwsim(void)
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_SIGNAL_DBM |
@@ -2052,7 +2056,7 @@ failed:
mac80211_hwsim_free();
return err;
}
-
+module_init(init_mac80211_hwsim);
static void __exit exit_mac80211_hwsim(void)
{
@@ -2063,7 +2067,4 @@ static void __exit exit_mac80211_hwsim(void)
mac80211_hwsim_free();
unregister_netdev(hwsim_mon);
}
-
-
-module_init(init_mac80211_hwsim);
module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index e535c937628b..245a371f1a43 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -176,23 +176,6 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
}
/*
- * This function handles the command response of 11n configuration request.
- *
- * Handling includes changing the header fields into CPU format.
- */
-int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
- struct mwifiex_ds_11n_tx_cfg *tx_cfg)
-{
- struct host_cmd_ds_11n_cfg *htcfg = &resp->params.htcfg;
-
- if (tx_cfg) {
- tx_cfg->tx_htcap = le16_to_cpu(htcfg->ht_tx_cap);
- tx_cfg->tx_htinfo = le16_to_cpu(htcfg->ht_tx_info);
- }
- return 0;
-}
-
-/*
* This function prepares command of reconfigure Tx buffer.
*
* Preparation includes -
@@ -258,27 +241,6 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
}
/*
- * This function handles the command response of AMSDU aggregation
- * control request.
- *
- * Handling includes changing the header fields into CPU format.
- */
-int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
- struct mwifiex_ds_11n_amsdu_aggr_ctrl
- *amsdu_aggr_ctrl)
-{
- struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
- &resp->params.amsdu_aggr_ctrl;
-
- if (amsdu_aggr_ctrl) {
- amsdu_aggr_ctrl->enable = le16_to_cpu(amsdu_ctrl->enable);
- amsdu_aggr_ctrl->curr_buf_size =
- le16_to_cpu(amsdu_ctrl->curr_buf_size);
- }
- return 0;
-}
-
-/*
* This function prepares 11n configuration command.
*
* Preparation includes -
@@ -726,3 +688,29 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
return count;
}
+
+/*
+ * This function retrieves the entry for specific tx BA stream table by RA and
+ * deletes it.
+ */
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
+{
+ struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
+ unsigned long flags;
+
+ if (!ra)
+ return;
+
+ spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) {
+ if (!memcmp(tbl->ra, ra, ETH_ALEN)) {
+ spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
+ flags);
+ mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
+ spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+
+ return;
+}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 28366e9211fb..46006a54a656 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -28,8 +28,6 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
-int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
- struct mwifiex_ds_11n_tx_cfg *tx_cfg);
int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
struct mwifiex_ds_11n_tx_cfg *txcfg);
@@ -60,15 +58,13 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
struct mwifiex_ds_rx_reorder_tbl *buf);
int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
struct mwifiex_ds_tx_ba_stream_tbl *buf);
-int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
- struct mwifiex_ds_11n_amsdu_aggr_ctrl
- *amsdu_aggr_ctrl);
int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
int cmd_action, u16 *buf_size);
int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
int cmd_action,
struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
+void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
/*
* This function checks whether AMPDU is allowed or not for a particular TID.
@@ -157,4 +153,18 @@ mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
return false;
}
+
+/*
+ * This function checks whether associated station is 11n enabled
+ */
+static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
+ struct mwifiex_sta_node *node)
+{
+
+ if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
+ !priv->ap_11n_enabled)
+ return 0;
+
+ return node->is_11n_enabled;
+}
#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index ab84eb943749..395f1bfd4102 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -62,9 +62,7 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
};
struct tx_packet_hdr *tx_header;
- skb_put(skb_aggr, sizeof(*tx_header));
-
- tx_header = (struct tx_packet_hdr *) skb_aggr->data;
+ tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header));
/* Copy DA and SA */
dt_offset = 2 * ETH_ALEN;
@@ -82,12 +80,10 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN);
/* Add payload */
- skb_put(skb_aggr, skb_src->len);
- memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data,
- skb_src->len);
- *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len +
- LLC_SNAP_LEN)) & 3)) : 0;
- skb_put(skb_aggr, *pad);
+ memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len);
+
+ /* Add padding for new MSDU to start from 4 byte boundary */
+ *pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4;
return skb_aggr->len + *pad;
}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 591ccd33f83c..9402b93b9a36 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -54,8 +54,13 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
tbl->rx_reorder_ptr[i] = NULL;
}
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
- if (rx_tmp_ptr)
- mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+ if (rx_tmp_ptr) {
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+ else
+ mwifiex_process_rx_packet(priv->adapter,
+ rx_tmp_ptr);
+ }
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -97,7 +102,11 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
- mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
+
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
+ else
+ mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr);
}
spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -148,7 +157,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
* This function returns the pointer to an entry in Rx reordering
* table which matches the given TA/TID pair.
*/
-static struct mwifiex_rx_reorder_tbl *
+struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
@@ -167,6 +176,31 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
return NULL;
}
+/* This function retrieves the pointer to an entry in Rx reordering
+ * table which matches the given TA and deletes it.
+ */
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
+{
+ struct mwifiex_rx_reorder_tbl *tbl, *tmp;
+ unsigned long flags;
+
+ if (!ta)
+ return;
+
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+ if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
+ mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
+ return;
+}
+
/*
* This function finds the last sequence number used in the packets
* buffered in Rx reordering table.
@@ -226,6 +260,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
struct mwifiex_rx_reorder_tbl *tbl, *new_node;
u16 last_seq = 0;
unsigned long flags;
+ struct mwifiex_sta_node *node;
/*
* If we get a TID, ta pair which is already present dispatch all the
@@ -248,19 +283,26 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
new_node->tid = tid;
memcpy(new_node->ta, ta, ETH_ALEN);
new_node->start_win = seq_num;
- if (mwifiex_queuing_ra_based(priv))
- /* TODO for adhoc */
+
+ if (mwifiex_queuing_ra_based(priv)) {
dev_dbg(priv->adapter->dev,
- "info: ADHOC:last_seq=%d start_win=%d\n",
+ "info: AP/ADHOC:last_seq=%d start_win=%d\n",
last_seq, new_node->start_win);
- else
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
+ node = mwifiex_get_sta_entry(priv, ta);
+ if (node)
+ last_seq = node->rx_seq[tid];
+ }
+ } else {
last_seq = priv->rx_seq[tid];
+ }
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
last_seq >= new_node->start_win)
new_node->start_win = last_seq + 1;
new_node->win_size = win_size;
+ new_node->flags = 0;
new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
GFP_KERNEL);
@@ -396,8 +438,13 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
- if (pkt_type != PKT_TYPE_BAR)
- mwifiex_process_rx_packet(priv->adapter, payload);
+ if (pkt_type != PKT_TYPE_BAR) {
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ mwifiex_handle_uap_rx_forward(priv, payload);
+ else
+ mwifiex_process_rx_packet(priv->adapter,
+ payload);
+ }
return 0;
}
start_win = tbl->start_win;
@@ -411,13 +458,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
* If seq_num is less then starting win then ignore and drop the
* packet
*/
- if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {/* Wrap */
- if (seq_num >= ((start_win + TWOPOW11) &
- (MAX_TID_VALUE - 1)) && (seq_num < start_win))
+ if (tbl->flags & RXREOR_FORCE_NO_DROP) {
+ dev_dbg(priv->adapter->dev,
+ "RXREOR_FORCE_NO_DROP when HS is activated\n");
+ tbl->flags &= ~RXREOR_FORCE_NO_DROP;
+ } else {
+ if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
+ if (seq_num >= ((start_win + TWOPOW11) &
+ (MAX_TID_VALUE - 1)) &&
+ seq_num < start_win)
+ return -1;
+ } else if ((seq_num < start_win) ||
+ (seq_num > (start_win + TWOPOW11))) {
return -1;
- } else if ((seq_num < start_win) ||
- (seq_num > (start_win + TWOPOW11))) {
- return -1;
+ }
}
/*
@@ -428,8 +482,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
if (((end_win < start_win) &&
- (seq_num < (TWOPOW11 - (MAX_TID_VALUE - start_win))) &&
- (seq_num > end_win)) ||
+ (seq_num < start_win) && (seq_num > end_win)) ||
((end_win > start_win) && ((seq_num > end_win) ||
(seq_num < start_win)))) {
end_win = seq_num;
@@ -591,3 +644,29 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
mwifiex_reset_11n_rx_seq_num(priv);
}
+
+/*
+ * This function updates all rx_reorder_tbl's flags.
+ */
+void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
+{
+ struct mwifiex_private *priv;
+ struct mwifiex_rx_reorder_tbl *tbl;
+ unsigned long lock_flags;
+ int i;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (!priv)
+ continue;
+ if (list_empty(&priv->rx_reorder_tbl_ptr))
+ continue;
+
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
+ list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
+ tbl->flags = flags;
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
+ }
+
+ return;
+}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 6c9815a0f5d8..4064041ac852 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -38,6 +38,12 @@
#define ADDBA_RSP_STATUS_ACCEPT 0
#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
+#define BA_SETUP_MAX_PACKET_THRESHOLD 16
+#define BA_SETUP_PACKET_OFFSET 16
+
+enum mwifiex_rxreor_flags {
+ RXREOR_FORCE_NO_DROP = 1<<0,
+};
static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
{
@@ -68,5 +74,9 @@ struct mwifiex_rx_reorder_tbl *mwifiex_11n_get_rxreorder_tbl(struct
mwifiex_private
*priv, int tid,
u8 *ta);
+struct mwifiex_rx_reorder_tbl *
+mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
+void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
+void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags);
#endif /* _MWIFIEX_11N_RXREORDER_H_ */
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 3f66ebb0a630..dd0410d2d465 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -33,8 +33,10 @@ mwifiex-y += uap_cmd.o
mwifiex-y += ie.o
mwifiex-y += sta_cmdresp.o
mwifiex-y += sta_event.o
+mwifiex-y += uap_event.o
mwifiex-y += sta_tx.o
mwifiex-y += sta_rx.o
+mwifiex-y += uap_txrx.o
mwifiex-y += cfg80211.o
mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index fe42137384da..2691620393ea 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -22,7 +22,7 @@
static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
{
- .max = 1, .types = BIT(NL80211_IFTYPE_STATION),
+ .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 1, .types = BIT(NL80211_IFTYPE_AP),
@@ -37,6 +37,36 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
.beacon_int_infra_match = true,
};
+static const struct ieee80211_regdomain mwifiex_world_regdom_custom = {
+ .n_reg_rules = 7,
+ .alpha2 = "99",
+ .reg_rules = {
+ /* Channel 1 - 11 */
+ REG_RULE(2412-10, 2462+10, 40, 3, 20, 0),
+ /* Channel 12 - 13 */
+ REG_RULE(2467-10, 2472+10, 20, 3, 20,
+ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ /* Channel 14 */
+ REG_RULE(2484-10, 2484+10, 20, 3, 20,
+ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+ NL80211_RRF_NO_OFDM),
+ /* Channel 36 - 48 */
+ REG_RULE(5180-10, 5240+10, 40, 3, 20,
+ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ /* Channel 149 - 165 */
+ REG_RULE(5745-10, 5825+10, 40, 3, 20,
+ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS),
+ /* Channel 52 - 64 */
+ REG_RULE(5260-10, 5320+10, 40, 3, 30,
+ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+ NL80211_RRF_DFS),
+ /* Channel 100 - 140 */
+ REG_RULE(5500-10, 5700+10, 40, 3, 30,
+ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS |
+ NL80211_RRF_DFS),
+ }
+};
+
/*
* This function maps the nl802.11 channel type into driver channel type.
*
@@ -47,8 +77,7 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
* NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW
* Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE
*/
-static u8
-mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
+u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
{
switch (chan_type) {
case NL80211_CHAN_NO_HT:
@@ -99,7 +128,7 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
- if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) {
+ if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
wiphy_err(wiphy, "deleting the crypto keys\n");
return -EFAULT;
}
@@ -109,6 +138,188 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
}
/*
+ * This function forms an skb for management frame.
+ */
+static int
+mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
+{
+ u8 addr[ETH_ALEN] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ u16 pkt_len;
+ u32 tx_control = 0, pkt_type = PKT_TYPE_MGMT;
+ struct timeval tv;
+
+ pkt_len = len + ETH_ALEN;
+
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memcpy(skb_push(skb, sizeof(pkt_len)), &pkt_len, sizeof(pkt_len));
+
+ memcpy(skb_push(skb, sizeof(tx_control)),
+ &tx_control, sizeof(tx_control));
+
+ memcpy(skb_push(skb, sizeof(pkt_type)), &pkt_type, sizeof(pkt_type));
+
+ /* Add packet data and address4 */
+ memcpy(skb_put(skb, sizeof(struct ieee80211_hdr_3addr)), buf,
+ sizeof(struct ieee80211_hdr_3addr));
+ memcpy(skb_put(skb, ETH_ALEN), addr, ETH_ALEN);
+ memcpy(skb_put(skb, len - sizeof(struct ieee80211_hdr_3addr)),
+ buf + sizeof(struct ieee80211_hdr_3addr),
+ len - sizeof(struct ieee80211_hdr_3addr));
+
+ skb->priority = LOW_PRIO_TID;
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+
+ return 0;
+}
+
+/*
+ * CFG802.11 operation handler to transmit a management frame.
+ */
+static int
+mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *chan, bool offchan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid, unsigned int wait,
+ const u8 *buf, size_t len, bool no_cck,
+ bool dont_wait_for_ack, u64 *cookie)
+{
+ struct sk_buff *skb;
+ u16 pkt_len;
+ const struct ieee80211_mgmt *mgmt;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+
+ if (!buf || !len) {
+ wiphy_err(wiphy, "invalid buffer and length\n");
+ return -EFAULT;
+ }
+
+ mgmt = (const struct ieee80211_mgmt *)buf;
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA &&
+ ieee80211_is_probe_resp(mgmt->frame_control)) {
+ /* Since we support offload probe resp, we need to skip probe
+ * resp in AP or GO mode */
+ wiphy_dbg(wiphy,
+ "info: skip to send probe resp in AP or GO mode\n");
+ return 0;
+ }
+
+ pkt_len = len + ETH_ALEN;
+ skb = dev_alloc_skb(MWIFIEX_MIN_DATA_HEADER_LEN +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ pkt_len + sizeof(pkt_len));
+
+ if (!skb) {
+ wiphy_err(wiphy, "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+
+ mwifiex_form_mgmt_frame(skb, buf, len);
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ *cookie = random32() | 1;
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC);
+
+ wiphy_dbg(wiphy, "info: management frame transmitted\n");
+ return 0;
+}
+
+/*
+ * CFG802.11 operation handler to register a mgmt frame.
+ */
+static void
+mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u16 frame_type, bool reg)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+
+ if (reg)
+ priv->mgmt_frame_mask |= BIT(frame_type >> 4);
+ else
+ priv->mgmt_frame_mask &= ~BIT(frame_type >> 4);
+
+ mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask);
+
+ wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+}
+
+/*
+ * CFG802.11 operation handler to remain on channel.
+ */
+static int
+mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+ int ret;
+
+ if (!chan || !cookie) {
+ wiphy_err(wiphy, "Invalid parameter for ROC\n");
+ return -EINVAL;
+ }
+
+ if (priv->roc_cfg.cookie) {
+ wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llu\n",
+ priv->roc_cfg.cookie);
+ return -EBUSY;
+ }
+
+ ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, chan,
+ &channel_type, duration);
+
+ if (!ret) {
+ *cookie = random32() | 1;
+ priv->roc_cfg.cookie = *cookie;
+ priv->roc_cfg.chan = *chan;
+ priv->roc_cfg.chan_type = channel_type;
+
+ cfg80211_ready_on_channel(wdev, *cookie, chan, channel_type,
+ duration, GFP_ATOMIC);
+
+ wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
+ }
+
+ return ret;
+}
+
+/*
+ * CFG802.11 operation handler to cancel remain on channel.
+ */
+static int
+mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev, u64 cookie)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+ int ret;
+
+ if (cookie != priv->roc_cfg.cookie)
+ return -ENOENT;
+
+ ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
+ &priv->roc_cfg.chan,
+ &priv->roc_cfg.chan_type, 0);
+
+ if (!ret) {
+ cfg80211_remain_on_channel_expired(wdev, cookie,
+ &priv->roc_cfg.chan,
+ priv->roc_cfg.chan_type,
+ GFP_ATOMIC);
+
+ memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
+
+ wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie);
+ }
+
+ return ret;
+}
+
+/*
* CFG802.11 operation handler to set Tx power.
*/
static int
@@ -171,7 +382,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
priv->wep_key_curr_index = key_index;
- } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
+ } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
+ NULL, 0)) {
wiphy_err(wiphy, "set default Tx key index\n");
return -EFAULT;
}
@@ -207,7 +419,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
return 0;
}
- if (mwifiex_set_encode(priv, params->key, params->key_len,
+ if (mwifiex_set_encode(priv, params, params->key, params->key_len,
key_index, peer_mac, 0)) {
wiphy_err(wiphy, "crypto keys added\n");
return -EFAULT;
@@ -462,6 +674,76 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
return 0;
}
+static int
+mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
+{
+ u16 mode = P2P_MODE_DISABLE;
+
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
+ mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
+
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * This function initializes the functionalities for P2P client.
+ * The P2P client initialization sequence is:
+ * disable -> device -> client
+ */
+static int
+mwifiex_cfg80211_init_p2p_client(struct mwifiex_private *priv)
+{
+ u16 mode;
+
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -1;
+
+ mode = P2P_MODE_DEVICE;
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode))
+ return -1;
+
+ mode = P2P_MODE_CLIENT;
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * This function initializes the functionalities for P2P GO.
+ * The P2P GO initialization sequence is:
+ * disable -> device -> GO
+ */
+static int
+mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
+{
+ u16 mode;
+
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -1;
+
+ mode = P2P_MODE_DEVICE;
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode))
+ return -1;
+
+ mode = P2P_MODE_GO;
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &mode))
+ return -1;
+
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
+ mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_UAP);
+
+ return 0;
+}
+
/*
* CFG802.11 operation handler to change interface type.
*/
@@ -494,6 +776,16 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
switch (type) {
case NL80211_IFTYPE_ADHOC:
break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (mwifiex_cfg80211_init_p2p_client(priv))
+ return -EFAULT;
+ dev->ieee80211_ptr->iftype = type;
+ return 0;
+ case NL80211_IFTYPE_P2P_GO:
+ if (mwifiex_cfg80211_init_p2p_go(priv))
+ return -EFAULT;
+ dev->ieee80211_ptr->iftype = type;
+ return 0;
case NL80211_IFTYPE_UNSPECIFIED:
wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
case NL80211_IFTYPE_STATION: /* This shouldn't happen */
@@ -519,6 +811,18 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -EFAULT;
+ dev->ieee80211_ptr->iftype = type;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
default:
wiphy_err(wiphy, "%s: unknown iftype: %d\n",
dev->name, dev->ieee80211_ptr->iftype);
@@ -657,7 +961,6 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
}
/* Supported rates to be advertised to the cfg80211 */
-
static struct ieee80211_rate mwifiex_rates[] = {
{.bitrate = 10, .hw_value = 2, },
{.bitrate = 20, .hw_value = 4, },
@@ -674,7 +977,6 @@ static struct ieee80211_rate mwifiex_rates[] = {
};
/* Channel definitions to be advertised to cfg80211 */
-
static struct ieee80211_channel mwifiex_channels_2ghz[] = {
{.center_freq = 2412, .hw_value = 1, },
{.center_freq = 2417, .hw_value = 2, },
@@ -742,12 +1044,41 @@ static struct ieee80211_supported_band mwifiex_band_5ghz = {
/* Supported crypto cipher suits to be advertised to cfg80211 */
-
static const u32 mwifiex_cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+};
+
+/* Supported mgmt frame types to be advertised to cfg80211 */
+static const struct ieee80211_txrx_stypes
+mwifiex_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ },
};
/*
@@ -842,7 +1173,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
return -EINVAL;
}
@@ -906,6 +1237,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
if (mwifiex_del_mgmt_ies(priv))
wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+ priv->ap_11n_enabled = 0;
+
if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL)) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -928,7 +1261,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
u8 config_bands = 0;
- if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP)
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
return -1;
if (mwifiex_set_mgmt_ies(priv, &params->beacon))
return -1;
@@ -965,15 +1298,18 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
bss_cfg->channel =
(u8)ieee80211_frequency_to_channel(params->channel->center_freq);
- bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
/* Set appropriate bands */
if (params->channel->band == IEEE80211_BAND_2GHZ) {
+ bss_cfg->band_cfg = BAND_CONFIG_BG;
+
if (params->channel_type == NL80211_CHAN_NO_HT)
config_bands = BAND_B | BAND_G;
else
config_bands = BAND_B | BAND_G | BAND_GN;
} else {
+ bss_cfg->band_cfg = BAND_CONFIG_A;
+
if (params->channel_type == NL80211_CHAN_NO_HT)
config_bands = BAND_A;
else
@@ -984,6 +1320,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
~priv->adapter->fw_bands))
priv->adapter->config_bands = config_bands;
+ mwifiex_set_uap_rates(bss_cfg, params);
mwifiex_send_domain_info_cmd_fw(wiphy);
if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
@@ -994,6 +1331,12 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
mwifiex_set_ht_params(priv, bss_cfg, params);
+ if (params->inactivity_timeout > 0) {
+ /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */
+ bss_cfg->sta_ao_timer = 10 * params->inactivity_timeout;
+ bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
+ }
+
if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL)) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -1149,7 +1492,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
~priv->adapter->fw_bands))
priv->adapter->config_bands = config_bands;
}
- mwifiex_send_domain_info_cmd_fw(priv->wdev->wiphy);
}
/* As this is new association, clear locally stored
@@ -1159,7 +1501,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
priv->wep_key_curr_index = 0;
priv->sec_info.encryption_mode = 0;
priv->sec_info.is_authtype_auto = 0;
- ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1);
+ ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1);
if (mode == NL80211_IFTYPE_ADHOC) {
/* "privacy" is set only for ad-hoc mode */
@@ -1206,8 +1548,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
"info: setting wep encryption"
" with key len %d\n", sme->key_len);
priv->wep_key_curr_index = sme->key_idx;
- ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
- sme->key_idx, NULL, 0);
+ ret = mwifiex_set_encode(priv, NULL, sme->key,
+ sme->key_len, sme->key_idx,
+ NULL, 0);
}
}
done:
@@ -1459,11 +1802,18 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
{
struct net_device *dev = request->wdev->netdev;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- int i;
+ int i, offset;
struct ieee80211_channel *chan;
+ struct ieee_types_header *ie;
wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
+ if (atomic_read(&priv->wmm.tx_pkts_queued) >=
+ MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) {
+ dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n");
+ return -EBUSY;
+ }
+
priv->scan_request = request;
priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
@@ -1477,13 +1827,17 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->user_scan_cfg->ssid_list = request->ssids;
if (request->ie && request->ie_len) {
+ offset = 0;
for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
continue;
priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN;
- memcpy(&priv->vs_ie[i].ie, request->ie,
- request->ie_len);
- break;
+ ie = (struct ieee_types_header *)(request->ie + offset);
+ memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len);
+ offset += sizeof(*ie) + ie->len;
+
+ if (offset >= request->ie_len)
+ break;
}
}
@@ -1592,7 +1946,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
* create a new virtual interface with the given name
*/
struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
- char *name,
+ const char *name,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params)
@@ -1632,7 +1986,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_type = MWIFIEX_BSS_TYPE_STA;
priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
priv->bss_num = 0;
@@ -1655,13 +2009,48 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
priv->bss_started = 0;
priv->bss_num = 0;
priv->bss_mode = type;
break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ priv = adapter->priv[MWIFIEX_BSS_TYPE_P2P];
+
+ if (priv->bss_mode) {
+ wiphy_err(wiphy, "Can't create multiple P2P ifaces");
+ return ERR_PTR(-EINVAL);
+ }
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ priv->wdev = wdev;
+ wdev->wiphy = wiphy;
+
+ /* At start-up, wpa_supplicant tries to change the interface
+ * to NL80211_IFTYPE_STATION if it is not managed mode.
+ * So, we initialize it to STA mode.
+ */
+ wdev->iftype = NL80211_IFTYPE_STATION;
+ priv->bss_mode = NL80211_IFTYPE_STATION;
+
+ /* Setting bss_type to P2P tells firmware that this interface
+ * is receiving P2P peers found during find phase and doing
+ * action frame handshake.
+ */
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+
+ priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
+ priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_started = 0;
+ priv->bss_num = 0;
+
+ break;
default:
wiphy_err(wiphy, "type not supported\n");
return ERR_PTR(-EINVAL);
@@ -1769,6 +2158,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.leave_ibss = mwifiex_cfg80211_leave_ibss,
.add_key = mwifiex_cfg80211_add_key,
.del_key = mwifiex_cfg80211_del_key,
+ .mgmt_tx = mwifiex_cfg80211_mgmt_tx,
+ .mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register,
+ .remain_on_channel = mwifiex_cfg80211_remain_on_channel,
+ .cancel_remain_on_channel = mwifiex_cfg80211_cancel_remain_on_channel,
.set_default_key = mwifiex_cfg80211_set_default_key,
.set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
.set_tx_power = mwifiex_cfg80211_set_tx_power,
@@ -1805,8 +2198,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
}
wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
+ wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
+ wiphy->max_remain_on_channel_duration = 5000;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP);
wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
@@ -1825,15 +2222,21 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
- WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
+ WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+ wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
- NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
- wiphy->features = NL80211_FEATURE_HT_IBSS;
+ wiphy->features = NL80211_FEATURE_HT_IBSS |
+ NL80211_FEATURE_INACTIVITY_TIMER;
/* Reserve space for mwifiex specific private data for BSS */
wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -1854,8 +2257,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
return ret;
}
country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
- if (country_code && regulatory_hint(wiphy, country_code))
- dev_err(adapter->dev, "regulatory_hint() failed\n");
+ if (country_code)
+ dev_info(adapter->dev,
+ "ignoring F/W country code %2.2s\n", country_code);
adapter->wiphy = wiphy;
return ret;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 565527aee0ea..8d465107f52b 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -460,7 +460,10 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
}
- ret = mwifiex_process_sta_event(priv);
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ ret = mwifiex_process_uap_event(priv);
+ else
+ ret = mwifiex_process_sta_event(priv);
adapter->event_cause = 0;
adapter->event_skb = NULL;
@@ -1085,6 +1088,8 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
if (activated) {
if (priv->adapter->is_hs_configured) {
priv->adapter->hs_activated = true;
+ mwifiex_update_rxreor_flags(priv->adapter,
+ RXREOR_FORCE_NO_DROP);
dev_dbg(priv->adapter->dev, "event: hs_activated\n");
priv->adapter->hs_activate_wait_q_woken = true;
wake_up_interruptible(
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 070ef25f5186..e9357d87d327 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -28,11 +28,14 @@
#include <linux/ieee80211.h>
-#define MWIFIEX_MAX_BSS_NUM (2)
+#define MWIFIEX_MAX_BSS_NUM (3)
#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd)
* + 4 byte alignment
*/
+#define MWIFIEX_MGMT_FRAME_HEADER_SIZE 8 /* sizeof(pkt_type)
+ * + sizeof(tx_control)
+ */
#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
@@ -60,10 +63,14 @@
#define MWIFIEX_SDIO_BLOCK_SIZE 256
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
+#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
+
+#define MWIFIEX_BRIDGED_PKTS_THRESHOLD 1024
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
+ MWIFIEX_BSS_TYPE_P2P = 2,
MWIFIEX_BSS_TYPE_ANY = 0xff,
};
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index e831b440a24a..dda588b35570 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -65,10 +65,12 @@ enum KEY_TYPE_ID {
KEY_TYPE_ID_TKIP,
KEY_TYPE_ID_AES,
KEY_TYPE_ID_WAPI,
+ KEY_TYPE_ID_AES_CMAC,
};
#define KEY_MCAST BIT(0)
#define KEY_UNICAST BIT(1)
#define KEY_ENABLED BIT(2)
+#define KEY_IGTK BIT(10)
#define WAPI_KEY_LEN 50
@@ -92,6 +94,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
};
#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF)))
+#define CAL_RSSI(SNR, NF) ((s16)((s16)(SNR)+(s16)(NF)))
#define UAP_BSS_PARAMS_I 0
#define UAP_CUSTOM_IE_I 1
@@ -106,6 +109,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define MGMT_MASK_BEACON 0x100
#define TLV_TYPE_UAP_SSID 0x0000
+#define TLV_TYPE_UAP_RATES 0x0001
#define PROPRIETARY_TLV_BASE_ID 0x0100
#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0)
@@ -124,6 +128,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
+#define TLV_TYPE_UAP_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 57)
#define TLV_TYPE_UAP_WEP_KEY (PROPRIETARY_TLV_BASE_ID + 59)
#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -138,6 +143,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105)
#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114)
+#define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123)
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
@@ -257,9 +263,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_TX_RATE_CFG 0x00d6
#define HostCmd_CMD_802_11_PS_MODE_ENH 0x00e4
#define HostCmd_CMD_802_11_HS_CFG_ENH 0x00e5
+#define HostCmd_CMD_P2P_MODE_CFG 0x00eb
#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
+#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
+#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -285,9 +294,17 @@ enum ENH_PS_MODES {
DIS_AUTO_PS = 0xfe,
};
+enum P2P_MODES {
+ P2P_MODE_DISABLE = 0,
+ P2P_MODE_DEVICE = 1,
+ P2P_MODE_GO = 2,
+ P2P_MODE_CLIENT = 3,
+};
+
#define HostCmd_RET_BIT 0x8000
#define HostCmd_ACT_GEN_GET 0x0000
#define HostCmd_ACT_GEN_SET 0x0001
+#define HostCmd_ACT_GEN_REMOVE 0x0004
#define HostCmd_ACT_BITWISE_SET 0x0002
#define HostCmd_ACT_BITWISE_CLR 0x0003
#define HostCmd_RESULT_OK 0x0000
@@ -307,7 +324,7 @@ enum ENH_PS_MODES {
#define HostCmd_SCAN_RADIO_TYPE_A 1
#define HOST_SLEEP_CFG_CANCEL 0xffffffff
-#define HOST_SLEEP_CFG_COND_DEF 0x0000000f
+#define HOST_SLEEP_CFG_COND_DEF 0x00000000
#define HOST_SLEEP_CFG_GPIO_DEF 0xff
#define HOST_SLEEP_CFG_GAP_DEF 0
@@ -385,6 +402,7 @@ enum ENH_PS_MODES {
#define EVENT_BW_CHANGE 0x00000048
#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
+#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_ID_MASK 0xffff
#define BSS_NUM_MASK 0xf
@@ -424,10 +442,10 @@ struct txpd {
struct rxpd {
u8 bss_type;
u8 bss_num;
- u16 rx_pkt_length;
- u16 rx_pkt_offset;
- u16 rx_pkt_type;
- u16 seq_num;
+ __le16 rx_pkt_length;
+ __le16 rx_pkt_offset;
+ __le16 rx_pkt_type;
+ __le16 seq_num;
u8 priority;
u8 rx_rate;
s8 snr;
@@ -439,6 +457,31 @@ struct rxpd {
u8 reserved;
} __packed;
+struct uap_txpd {
+ u8 bss_type;
+ u8 bss_num;
+ __le16 tx_pkt_length;
+ __le16 tx_pkt_offset;
+ __le16 tx_pkt_type;
+ __le32 tx_control;
+ u8 priority;
+ u8 flags;
+ u8 pkt_delay_2ms;
+ u8 reserved1;
+ __le32 reserved2;
+};
+
+struct uap_rxpd {
+ u8 bss_type;
+ u8 bss_num;
+ __le16 rx_pkt_length;
+ __le16 rx_pkt_offset;
+ __le16 rx_pkt_type;
+ __le16 seq_num;
+ u8 priority;
+ u8 reserved1;
+};
+
enum mwifiex_chan_scan_mode_bitmasks {
MWIFIEX_PASSIVE_SCAN = BIT(0),
MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
@@ -558,6 +601,13 @@ struct mwifiex_ie_type_key_param_set {
u8 key[50];
} __packed;
+#define IGTK_PN_LEN 8
+
+struct mwifiex_cmac_param {
+ u8 ipn[IGTK_PN_LEN];
+ u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
struct host_cmd_ds_802_11_key_material {
__le16 action;
struct mwifiex_ie_type_key_param_set key_param_set;
@@ -1250,6 +1300,11 @@ struct host_cmd_tlv_ssid {
u8 ssid[0];
} __packed;
+struct host_cmd_tlv_rates {
+ struct host_cmd_tlv tlv;
+ u8 rates[0];
+} __packed;
+
struct host_cmd_tlv_bcast_ssid {
struct host_cmd_tlv tlv;
u8 bcast_ctl;
@@ -1291,11 +1346,35 @@ struct host_cmd_tlv_channel_band {
u8 channel;
} __packed;
+struct host_cmd_tlv_ageout_timer {
+ struct host_cmd_tlv tlv;
+ __le32 sta_ao_timer;
+} __packed;
+
struct host_cmd_ds_version_ext {
u8 version_str_sel;
char version_str[128];
} __packed;
+struct host_cmd_ds_mgmt_frame_reg {
+ __le16 action;
+ __le32 mask;
+} __packed;
+
+struct host_cmd_ds_p2p_mode_cfg {
+ __le16 action;
+ __le16 mode;
+} __packed;
+
+struct host_cmd_ds_remain_on_chan {
+ __le16 action;
+ u8 status;
+ u8 reserved;
+ u8 band_cfg;
+ u8 channel;
+ __le32 duration;
+} __packed;
+
struct host_cmd_ds_802_11_ibss_status {
__le16 action;
__le16 enable;
@@ -1307,6 +1386,7 @@ struct host_cmd_ds_802_11_ibss_status {
#define CONNECTION_TYPE_INFRA 0
#define CONNECTION_TYPE_ADHOC 1
+#define CONNECTION_TYPE_AP 2
struct host_cmd_ds_set_bss_mode {
u8 con_type;
@@ -1404,6 +1484,9 @@ struct host_cmd_ds_command {
struct host_cmd_ds_wmm_get_status get_wmm_status;
struct host_cmd_ds_802_11_key_material key_material;
struct host_cmd_ds_version_ext verext;
+ struct host_cmd_ds_mgmt_frame_reg reg_mask;
+ struct host_cmd_ds_remain_on_chan roc_cfg;
+ struct host_cmd_ds_p2p_mode_cfg mode_cfg;
struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
struct host_cmd_ds_mac_reg_access mac_reg;
struct host_cmd_ds_bbp_reg_access bbp_reg;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 1d8dd003e396..e38342f86c51 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -114,9 +114,6 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
cpu_to_le16(mask);
ie->ie_index = cpu_to_le16(index);
- ie->ie_length = priv->mgmt_ie[index].ie_length;
- memcpy(&ie->ie_buffer, &priv->mgmt_ie[index].ie_buffer,
- le16_to_cpu(priv->mgmt_ie[index].ie_length));
} else {
if (mask != MWIFIEX_DELETE_MASK)
return -1;
@@ -160,7 +157,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
u16 len;
int ret;
- ap_custom_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ ap_custom_ie = kzalloc(sizeof(*ap_custom_ie), GFP_KERNEL);
if (!ap_custom_ie)
return -ENOMEM;
@@ -214,30 +211,35 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
return ret;
}
-/* This function checks if WPS IE is present in passed buffer and copies it to
- * mwifiex_ie structure.
+/* This function checks if the vendor specified IE is present in passed buffer
+ * and copies it to mwifiex_ie structure.
* Function takes pointer to struct mwifiex_ie pointer as argument.
- * If WPS IE is present memory is allocated for mwifiex_ie pointer and filled
- * in with WPS IE. Caller should take care of freeing this memory.
+ * If the vendor specified IE is present then memory is allocated for
+ * mwifiex_ie pointer and filled in with IE. Caller should take care of freeing
+ * this memory.
*/
-static int mwifiex_update_wps_ie(const u8 *ies, int ies_len,
- struct mwifiex_ie **ie_ptr, u16 mask)
+static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
+ struct mwifiex_ie **ie_ptr, u16 mask,
+ unsigned int oui, u8 oui_type)
{
- struct ieee_types_header *wps_ie;
- struct mwifiex_ie *ie = NULL;
+ struct ieee_types_header *vs_ie;
+ struct mwifiex_ie *ie = *ie_ptr;
const u8 *vendor_ie;
- vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPS,
- ies, ies_len);
+ vendor_ie = cfg80211_find_vendor_ie(oui, oui_type, ies, ies_len);
if (vendor_ie) {
- ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!ie)
- return -ENOMEM;
+ if (!*ie_ptr) {
+ *ie_ptr = kzalloc(sizeof(struct mwifiex_ie),
+ GFP_KERNEL);
+ if (!*ie_ptr)
+ return -ENOMEM;
+ ie = *ie_ptr;
+ }
- wps_ie = (struct ieee_types_header *)vendor_ie;
- memcpy(ie->ie_buffer, wps_ie, wps_ie->len + 2);
- ie->ie_length = cpu_to_le16(wps_ie->len + 2);
+ vs_ie = (struct ieee_types_header *)vendor_ie;
+ memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
+ vs_ie, vs_ie->len + 2);
+ le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
ie->mgmt_subtype_mask = cpu_to_le16(mask);
ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
}
@@ -257,20 +259,40 @@ static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
int ret = 0;
- if (data->beacon_ies && data->beacon_ies_len)
- mwifiex_update_wps_ie(data->beacon_ies, data->beacon_ies_len,
- &beacon_ie, MGMT_MASK_BEACON);
+ if (data->beacon_ies && data->beacon_ies_len) {
+ mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
+ &beacon_ie, MGMT_MASK_BEACON,
+ WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPS);
+ mwifiex_update_vs_ie(data->beacon_ies, data->beacon_ies_len,
+ &beacon_ie, MGMT_MASK_BEACON,
+ WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
+ }
- if (data->proberesp_ies && data->proberesp_ies_len)
- mwifiex_update_wps_ie(data->proberesp_ies,
- data->proberesp_ies_len, &pr_ie,
- MGMT_MASK_PROBE_RESP);
+ if (data->proberesp_ies && data->proberesp_ies_len) {
+ mwifiex_update_vs_ie(data->proberesp_ies,
+ data->proberesp_ies_len, &pr_ie,
+ MGMT_MASK_PROBE_RESP, WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPS);
+ mwifiex_update_vs_ie(data->proberesp_ies,
+ data->proberesp_ies_len, &pr_ie,
+ MGMT_MASK_PROBE_RESP,
+ WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P);
+ }
- if (data->assocresp_ies && data->assocresp_ies_len)
- mwifiex_update_wps_ie(data->assocresp_ies,
- data->assocresp_ies_len, &ar_ie,
- MGMT_MASK_ASSOC_RESP |
- MGMT_MASK_REASSOC_RESP);
+ if (data->assocresp_ies && data->assocresp_ies_len) {
+ mwifiex_update_vs_ie(data->assocresp_ies,
+ data->assocresp_ies_len, &ar_ie,
+ MGMT_MASK_ASSOC_RESP |
+ MGMT_MASK_REASSOC_RESP,
+ WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPS);
+ mwifiex_update_vs_ie(data->assocresp_ies,
+ data->assocresp_ies_len, &ar_ie,
+ MGMT_MASK_ASSOC_RESP |
+ MGMT_MASK_REASSOC_RESP, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P);
+ }
if (beacon_ie || pr_ie || ar_ie) {
ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 21fdc6c02775..b5d37a8caa09 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -64,60 +64,77 @@ static void scan_delay_timer_fn(unsigned long data)
struct cmd_ctrl_node *cmd_node, *tmp_node;
unsigned long flags;
- if (!mwifiex_wmm_lists_empty(adapter)) {
- if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+ if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+ /*
+ * Abort scan operation by cancelling all pending scan
+ * commands
+ */
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ list_for_each_entry_safe(cmd_node, tmp_node,
+ &adapter->scan_pending_q, list) {
+ list_del(&cmd_node->list);
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ }
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ adapter->scan_processing = false;
+ adapter->scan_delay_cnt = 0;
+ adapter->empty_tx_q_cnt = 0;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+ if (priv->user_scan_cfg) {
+ dev_dbg(priv->adapter->dev,
+ "info: %s: scan aborted\n", __func__);
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ kfree(priv->user_scan_cfg);
+ priv->user_scan_cfg = NULL;
+ }
+
+ if (priv->scan_pending_on_block) {
+ priv->scan_pending_on_block = false;
+ up(&priv->async_sem);
+ }
+ goto done;
+ }
+
+ if (!atomic_read(&priv->adapter->is_tx_received)) {
+ adapter->empty_tx_q_cnt++;
+ if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
/*
- * Abort scan operation by cancelling all pending scan
- * command
+ * No Tx traffic for 200msec. Get scan command from
+ * scan pending queue and put to cmd pending queue to
+ * resume scan operation
*/
+ adapter->scan_delay_cnt = 0;
+ adapter->empty_tx_q_cnt = 0;
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q,
- list) {
- list_del(&cmd_node->list);
- cmd_node->wait_q_enabled = false;
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock,
- flags);
-
- if (priv->user_scan_cfg) {
- dev_dbg(priv->adapter->dev,
- "info: %s: scan aborted\n", __func__);
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
- kfree(priv->user_scan_cfg);
- priv->user_scan_cfg = NULL;
- }
- } else {
- /*
- * Tx data queue is still not empty, delay scan
- * operation further by 20msec.
- */
- mod_timer(&priv->scan_delay_timer, jiffies +
- msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
- adapter->scan_delay_cnt++;
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+ true);
+ queue_work(adapter->workqueue, &adapter->main_work);
+ goto done;
}
- queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
} else {
- /*
- * Tx data queue is empty. Get scan command from scan_pending_q
- * and put to cmd_pending_q to resume scan operation
- */
- adapter->scan_delay_cnt = 0;
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- cmd_node = list_first_entry(&adapter->scan_pending_q,
- struct cmd_ctrl_node, list);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-
- mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ adapter->empty_tx_q_cnt = 0;
}
+
+ /* Delay scan operation further by 20msec */
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ adapter->scan_delay_cnt++;
+
+done:
+ if (atomic_read(&priv->adapter->is_tx_received))
+ atomic_set(&priv->adapter->is_tx_received, false);
+
+ return;
}
/*
@@ -127,7 +144,7 @@ static void scan_delay_timer_fn(unsigned long data)
* Additionally, it also initializes all the locks and sets up all the
* lists.
*/
-static int mwifiex_init_priv(struct mwifiex_private *priv)
+int mwifiex_init_priv(struct mwifiex_private *priv)
{
u32 i;
@@ -196,6 +213,8 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
priv->curr_bcn_size = 0;
priv->wps_ie = NULL;
priv->wps_ie_len = 0;
+ priv->ap_11n_enabled = 0;
+ memset(&priv->roc_cfg, 0, sizeof(priv->roc_cfg));
priv->scan_block = false;
@@ -345,6 +364,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
+ adapter->empty_tx_q_cnt = 0;
}
/*
@@ -410,6 +430,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
list_del(&priv->wmm.tid_tbl_ptr[j].ra_list);
list_del(&priv->tx_ba_stream_tbl_ptr);
list_del(&priv->rx_reorder_tbl_ptr);
+ list_del(&priv->sta_list);
}
}
}
@@ -472,6 +493,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&priv->rx_pkt_lock);
spin_lock_init(&priv->wmm.ra_list_spinlock);
spin_lock_init(&priv->curr_bcn_buf_lock);
+ spin_lock_init(&priv->sta_list_spinlock);
}
}
@@ -504,6 +526,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
}
INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
+ INIT_LIST_HEAD(&priv->sta_list);
spin_lock_init(&priv->tx_ba_stream_tbl_lock);
spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -626,6 +649,17 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
}
/*
+ * This function frees the private structure, including cleans
+ * up the TX and RX queues and frees the BSS priority tables.
+ */
+void mwifiex_free_priv(struct mwifiex_private *priv)
+{
+ mwifiex_clean_txrx(priv);
+ mwifiex_delete_bss_prio_tbl(priv);
+ mwifiex_free_curr_bcn(priv);
+}
+
+/*
* This function is used to shutdown the driver.
*
* The following operations are performed sequentially -
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 50191539bb32..4e31c6013ebe 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -81,7 +81,11 @@ struct wep_key {
#define KEY_MGMT_ON_HOST 0x03
#define MWIFIEX_AUTH_MODE_AUTO 0xFF
-#define BAND_CONFIG_MANUAL 0x00
+#define BAND_CONFIG_BG 0x00
+#define BAND_CONFIG_A 0x01
+#define MWIFIEX_SUPPORTED_RATES 14
+#define MWIFIEX_SUPPORTED_RATES_EXT 32
+
struct mwifiex_uap_bss_param {
u8 channel;
u8 band_cfg;
@@ -100,6 +104,9 @@ struct mwifiex_uap_bss_param {
struct wpa_param wpa_cfg;
struct wep_key wep_cfg[NUM_WEP_KEYS];
struct ieee80211_ht_cap ht_cap;
+ u8 rates[MWIFIEX_SUPPORTED_RATES];
+ u32 sta_ao_timer;
+ u32 ps_sta_ao_timer;
};
enum {
@@ -213,7 +220,7 @@ struct mwifiex_debug_info {
};
#define MWIFIEX_KEY_INDEX_UNICAST 0x40000000
-#define WAPI_RXPN_LEN 16
+#define PN_LEN 16
struct mwifiex_ds_encrypt_key {
u32 key_disable;
@@ -222,7 +229,8 @@ struct mwifiex_ds_encrypt_key {
u8 key_material[WLAN_MAX_KEY_LEN];
u8 mac_addr[ETH_ALEN];
u32 is_wapi_key;
- u8 wapi_rxpn[WAPI_RXPN_LEN];
+ u8 pn[PN_LEN]; /* packet number */
+ u8 is_igtk_key;
};
struct mwifiex_power_cfg {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 46803621d015..eb22dd248d54 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -72,7 +72,6 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
goto error;
adapter->priv[i]->adapter = adapter;
- adapter->priv[i]->bss_priority = i;
adapter->priv_num++;
}
mwifiex_init_lock_list(adapter);
@@ -370,6 +369,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
dev_err(adapter->dev, "cannot create default AP interface\n");
goto err_add_intf;
}
+
+ /* Create P2P interface by default */
+ if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
+ NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
+ dev_err(adapter->dev, "cannot create default P2P interface\n");
+ goto err_add_intf;
+ }
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -470,6 +476,27 @@ mwifiex_close(struct net_device *dev)
}
/*
+ * Add buffer into wmm tx queue and queue work to transmit it.
+ */
+int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
+{
+ mwifiex_wmm_add_buf_txqueue(priv, skb);
+ atomic_inc(&priv->adapter->tx_pending);
+
+ if (priv->adapter->scan_delay_cnt)
+ atomic_set(&priv->adapter->is_tx_received, true);
+
+ if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
+ mwifiex_set_trans_start(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+ }
+
+ queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+
+ return 0;
+}
+
+/*
* CFG802.11 network device handler for data transmission.
*/
static int
@@ -517,15 +544,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->bss_type = priv->bss_type;
mwifiex_fill_buffer(skb);
- mwifiex_wmm_add_buf_txqueue(priv, skb);
- atomic_inc(&priv->adapter->tx_pending);
-
- if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
- mwifiex_set_trans_start(dev);
- mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
- }
-
- queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+ mwifiex_queue_tx_pkt(priv, skb);
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index e7c2a82fd610..bfb3fa69805c 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -88,13 +88,18 @@ enum {
#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
#define MWIFIEX_MAX_SCAN_DELAY_CNT 50
+#define MWIFIEX_MAX_EMPTY_TX_Q_CNT 10
#define MWIFIEX_SCAN_DELAY_MSEC 20
+#define MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN 2
+
#define RSN_GTK_OUI_OFFSET 2
#define MWIFIEX_OUI_NOT_PRESENT 0
#define MWIFIEX_OUI_PRESENT 1
+#define PKT_TYPE_MGMT 0xE5
+
/*
* Do not check for data_received for USB, as data_received
* is handled in mwifiex_usb_recv for USB
@@ -115,6 +120,7 @@ enum {
#define MAX_BITMAP_RATES_SIZE 10
#define MAX_CHANNEL_BAND_BG 14
+#define MAX_CHANNEL_BAND_A 165
#define MAX_FREQUENCY_BAND_BG 2484
@@ -199,6 +205,9 @@ struct mwifiex_ra_list_tbl {
u8 ra[ETH_ALEN];
u32 total_pkts_size;
u32 is_11n_enabled;
+ u16 max_amsdu;
+ u16 pkt_count;
+ u8 ba_packet_thr;
};
struct mwifiex_tid_tbl {
@@ -245,10 +254,6 @@ struct ieee_types_header {
u8 len;
} __packed;
-#define MWIFIEX_SUPPORTED_RATES 14
-
-#define MWIFIEX_SUPPORTED_RATES_EXT 32
-
struct ieee_types_vendor_specific {
struct ieee_types_vendor_header vend_hdr;
u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_vendor_header)];
@@ -365,6 +370,12 @@ struct wps {
u8 session_enable;
};
+struct mwifiex_roc_cfg {
+ u64 cookie;
+ struct ieee80211_channel chan;
+ enum nl80211_channel_type chan_type;
+};
+
struct mwifiex_adapter;
struct mwifiex_private;
@@ -431,6 +442,9 @@ struct mwifiex_private {
u8 wmm_enabled;
u8 wmm_qosinfo;
struct mwifiex_wmm_desc wmm;
+ struct list_head sta_list;
+ /* spin lock for associated station list */
+ spinlock_t sta_list_spinlock;
struct list_head tx_ba_stream_tbl_ptr;
/* spin lock for tx_ba_stream_tbl_ptr queue */
spinlock_t tx_ba_stream_tbl_lock;
@@ -480,12 +494,16 @@ struct mwifiex_private {
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
u8 subsc_evt_rssi_state;
+ struct mwifiex_ds_misc_subsc_evt async_subsc_evt_storage;
struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX];
u16 beacon_idx;
u16 proberesp_idx;
u16 assocresp_idx;
u16 rsn_idx;
struct timer_list scan_delay_timer;
+ u8 ap_11n_enabled;
+ u32 mgmt_frame_mask;
+ struct mwifiex_roc_cfg roc_cfg;
};
enum mwifiex_ba_status {
@@ -517,6 +535,7 @@ struct mwifiex_rx_reorder_tbl {
int win_size;
void **rx_reorder_ptr;
struct reorder_tmr_cnxt timer_context;
+ u8 flags;
};
struct mwifiex_bss_prio_node {
@@ -550,6 +569,19 @@ struct mwifiex_bss_priv {
u64 fw_tsf;
};
+/* This is AP specific structure which stores information
+ * about associated STA
+ */
+struct mwifiex_sta_node {
+ struct list_head list;
+ u8 mac_addr[ETH_ALEN];
+ u8 is_wmm_enabled;
+ u8 is_11n_enabled;
+ u8 ampdu_sta[MAX_NUM_TID];
+ u16 rx_seq[MAX_NUM_TID];
+ u16 max_amsdu;
+};
+
struct mwifiex_if_ops {
int (*init_if) (struct mwifiex_adapter *);
void (*cleanup_if) (struct mwifiex_adapter *);
@@ -690,6 +722,9 @@ struct mwifiex_adapter {
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u16 max_mgmt_ie_index;
u8 scan_delay_cnt;
+ u8 empty_tx_q_cnt;
+ atomic_t is_tx_received;
+ atomic_t pending_bridged_pkts;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -702,6 +737,9 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev,
void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
struct mwifiex_adapter *adapter);
+int mwifiex_init_priv(struct mwifiex_private *priv);
+void mwifiex_free_priv(struct mwifiex_private *priv);
+
int mwifiex_init_fw(struct mwifiex_adapter *adapter);
int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
@@ -714,6 +752,9 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb);
+int mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb);
+
int mwifiex_process_event(struct mwifiex_adapter *adapter);
int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
@@ -780,8 +821,17 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
struct host_cmd_ds_command *resp);
int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
struct sk_buff *skb);
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb);
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+ struct sk_buff *skb);
int mwifiex_process_sta_event(struct mwifiex_private *);
+int mwifiex_process_uap_event(struct mwifiex_private *);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
+void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
struct mwifiex_scan_cmd_config *scan_cfg);
@@ -840,6 +890,8 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
void mwifiex_set_ht_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_ap_settings *params);
+void mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_ap_settings *params);
/*
* This function checks if the queuing is RA based or not.
@@ -925,6 +977,14 @@ mwifiex_netdev_get_priv(struct net_device *dev)
return (struct mwifiex_private *) (*(unsigned long *) netdev_priv(dev));
}
+/*
+ * This function checks if a skb holds a management frame.
+ */
+static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
+{
+ return (*(u32 *)skb->data == PKT_TYPE_MGMT);
+}
+
int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
u32 func_init_shutdown);
int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -949,14 +1009,21 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
const struct mwifiex_user_scan_cfg *user_scan_in);
int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
- int key_len, u8 key_index, const u8 *mac_addr,
- int disable);
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+ const u8 *key, int key_len, u8 key_index,
+ const u8 *mac_addr, int disable);
int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
int mwifiex_get_ver_ext(struct mwifiex_private *priv);
+int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type *channel_type,
+ unsigned int duration);
+
+int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role);
+
int mwifiex_get_stats_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *log);
@@ -987,6 +1054,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
int mwifiex_main_process(struct mwifiex_adapter *);
+int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb);
+
int mwifiex_get_bss_info(struct mwifiex_private *,
struct mwifiex_bss_info *);
int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
@@ -997,8 +1066,10 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
+u8 mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type);
+
struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
- char *name,
+ const char *name,
enum nl80211_iftype type,
u32 *flags,
struct vif_params *params);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 04dc7ca4ac22..e36a75988f87 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -614,9 +614,8 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Increment the TLV header length by the size
appended */
- chan_tlv_out->header.len =
- cpu_to_le16(le16_to_cpu(chan_tlv_out->header.len) +
- (sizeof(chan_tlv_out->chan_scan_param)));
+ le16_add_cpu(&chan_tlv_out->header.len,
+ sizeof(chan_tlv_out->chan_scan_param));
/*
* The tlv buffer length is set to the number of bytes
@@ -726,7 +725,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
struct mwifiex_ie_types_rates_param_set *rates_tlv;
- const u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
u8 *tlv_pos;
u32 num_probes;
u32 ssid_len;
@@ -840,8 +838,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
* or BSSID filter applied to the scan results in the firmware.
*/
if ((i && ssid_filter) ||
- memcmp(scan_cfg_out->specific_bssid, &zero_mac,
- sizeof(zero_mac)))
+ !is_zero_ether_addr(scan_cfg_out->specific_bssid))
*filtered_scan = true;
} else {
scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
@@ -989,6 +986,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
*max_chan_per_scan = 2;
else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
*max_chan_per_scan = 3;
+ else
+ *max_chan_per_scan = 4;
}
}
@@ -1433,9 +1432,9 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
if (ret)
dev_err(priv->adapter->dev, "cannot find ssid "
"%s\n", bss_desc->ssid.ssid);
- break;
+ break;
default:
- ret = 0;
+ ret = 0;
}
}
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index df3a33c530cf..5d87195390f8 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -551,7 +551,6 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
struct host_cmd_tlv_mac_addr *tlv_mac;
u16 key_param_len = 0, cmd_size;
int ret = 0;
- const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
key_material->action = cpu_to_le16(cmd_action);
@@ -593,7 +592,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
/* set 0 when re-key */
key_material->key_param_set.key[1] = 0;
- if (0 != memcmp(enc_key->mac_addr, bc_mac, sizeof(bc_mac))) {
+ if (!is_broadcast_ether_addr(enc_key->mac_addr)) {
/* WAPI pairwise key: unicast */
key_material->key_param_set.key_info |=
cpu_to_le16(KEY_UNICAST);
@@ -610,7 +609,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
memcpy(&key_material->key_param_set.key[2],
enc_key->key_material, enc_key->key_len);
memcpy(&key_material->key_param_set.key[2 + enc_key->key_len],
- enc_key->wapi_rxpn, WAPI_RXPN_LEN);
+ enc_key->pn, PN_LEN);
key_material->key_param_set.length =
cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN);
@@ -621,23 +620,38 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
return ret;
}
if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
- dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
- key_material->key_param_set.key_type_id =
+ if (enc_key->is_igtk_key) {
+ dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
+ key_material->key_param_set.key_type_id =
+ cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
+ if (cmd_oid == KEY_INFO_ENABLED)
+ key_material->key_param_set.key_info =
+ cpu_to_le16(KEY_ENABLED);
+ else
+ key_material->key_param_set.key_info =
+ cpu_to_le16(!KEY_ENABLED);
+
+ key_material->key_param_set.key_info |=
+ cpu_to_le16(KEY_IGTK);
+ } else {
+ dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
+ key_material->key_param_set.key_type_id =
cpu_to_le16(KEY_TYPE_ID_AES);
- if (cmd_oid == KEY_INFO_ENABLED)
- key_material->key_param_set.key_info =
+ if (cmd_oid == KEY_INFO_ENABLED)
+ key_material->key_param_set.key_info =
cpu_to_le16(KEY_ENABLED);
- else
- key_material->key_param_set.key_info =
+ else
+ key_material->key_param_set.key_info =
cpu_to_le16(!KEY_ENABLED);
- if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+ if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
/* AES pairwise key: unicast */
- key_material->key_param_set.key_info |=
+ key_material->key_param_set.key_info |=
cpu_to_le16(KEY_UNICAST);
- else /* AES group key: multicast */
- key_material->key_param_set.key_info |=
+ else /* AES group key: multicast */
+ key_material->key_param_set.key_info |=
cpu_to_le16(KEY_MCAST);
+ }
} else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
key_material->key_param_set.key_type_id =
@@ -668,6 +682,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN)
+ sizeof(struct mwifiex_ie_types_header);
+ if (le16_to_cpu(key_material->key_param_set.key_type_id) ==
+ KEY_TYPE_ID_AES_CMAC) {
+ struct mwifiex_cmac_param *param =
+ (void *)key_material->key_param_set.key;
+
+ memcpy(param->ipn, enc_key->pn, IGTK_PN_LEN);
+ memcpy(param->key, enc_key->key_material,
+ WLAN_KEY_LEN_AES_CMAC);
+
+ key_param_len = sizeof(struct mwifiex_cmac_param);
+ key_material->key_param_set.key_len =
+ cpu_to_le16(key_param_len);
+ key_param_len += KEYPARAMSET_FIXED_LEN;
+ key_material->key_param_set.length =
+ cpu_to_le16(key_param_len);
+ key_param_len += sizeof(struct mwifiex_ie_types_header);
+ }
+
cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
+ key_param_len);
@@ -1135,6 +1167,31 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
S_DS_GEN);
ret = 0;
break;
+ case HostCmd_CMD_MGMT_FRAME_REG:
+ cmd_ptr->command = cpu_to_le16(cmd_no);
+ cmd_ptr->params.reg_mask.action = cpu_to_le16(cmd_action);
+ cmd_ptr->params.reg_mask.mask = cpu_to_le32(*(u32 *)data_buf);
+ cmd_ptr->size =
+ cpu_to_le16(sizeof(struct host_cmd_ds_mgmt_frame_reg) +
+ S_DS_GEN);
+ ret = 0;
+ break;
+ case HostCmd_CMD_REMAIN_ON_CHAN:
+ cmd_ptr->command = cpu_to_le16(cmd_no);
+ memcpy(&cmd_ptr->params, data_buf,
+ sizeof(struct host_cmd_ds_remain_on_chan));
+ cmd_ptr->size =
+ cpu_to_le16(sizeof(struct host_cmd_ds_remain_on_chan) +
+ S_DS_GEN);
+ break;
+ case HostCmd_CMD_P2P_MODE_CFG:
+ cmd_ptr->command = cpu_to_le16(cmd_no);
+ cmd_ptr->params.mode_cfg.action = cpu_to_le16(cmd_action);
+ cmd_ptr->params.mode_cfg.mode = cpu_to_le16(*(u16 *)data_buf);
+ cmd_ptr->size =
+ cpu_to_le16(sizeof(struct host_cmd_ds_p2p_mode_cfg) +
+ S_DS_GEN);
+ break;
case HostCmd_CMD_FUNC_INIT:
if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET)
priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY;
@@ -1204,6 +1261,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
else if (priv->bss_mode == NL80211_IFTYPE_STATION)
cmd_ptr->params.bss_mode.con_type =
CONNECTION_TYPE_INFRA;
+ else if (priv->bss_mode == NL80211_IFTYPE_AP)
+ cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
cmd_ptr->size = cpu_to_le16(sizeof(struct
host_cmd_ds_set_bss_mode) + S_DS_GEN);
ret = 0;
@@ -1253,35 +1312,35 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (first_sta) {
if (priv->adapter->iface_type == MWIFIEX_PCIE) {
- ret = mwifiex_send_cmd_async(priv,
+ ret = mwifiex_send_cmd_sync(priv,
HostCmd_CMD_PCIE_DESC_DETAILS,
HostCmd_ACT_GEN_SET, 0, NULL);
if (ret)
return -1;
}
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_FUNC_INIT,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_FUNC_INIT,
+ HostCmd_ACT_GEN_SET, 0, NULL);
if (ret)
return -1;
/* Read MAC address from HW */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_GET_HW_SPEC,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
+ HostCmd_ACT_GEN_GET, 0, NULL);
if (ret)
return -1;
/* Reconfigure tx buf size */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_RECONFIGURE_TX_BUFF,
- HostCmd_ACT_GEN_SET, 0,
- &priv->adapter->tx_buf_size);
+ ret = mwifiex_send_cmd_sync(priv,
+ HostCmd_CMD_RECONFIGURE_TX_BUFF,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->adapter->tx_buf_size);
if (ret)
return -1;
if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Enable IEEE PS by default */
priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
- ret = mwifiex_send_cmd_async(
+ ret = mwifiex_send_cmd_sync(
priv, HostCmd_CMD_802_11_PS_MODE_ENH,
EN_AUTO_PS, BITMAP_STA_PS, NULL);
if (ret)
@@ -1290,21 +1349,21 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
}
/* get tx rate */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TX_RATE_CFG,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
+ HostCmd_ACT_GEN_GET, 0, NULL);
if (ret)
return -1;
priv->data_rate = 0;
/* get tx power */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_RF_TX_PWR,
- HostCmd_ACT_GEN_GET, 0, NULL);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_TX_PWR,
+ HostCmd_ACT_GEN_GET, 0, NULL);
if (ret)
return -1;
if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
/* set ibss coalescing_status */
- ret = mwifiex_send_cmd_async(
+ ret = mwifiex_send_cmd_sync(
priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
HostCmd_ACT_GEN_SET, 0, &enable);
if (ret)
@@ -1314,16 +1373,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
amsdu_aggr_ctrl.enable = true;
/* Send request to firmware */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
- HostCmd_ACT_GEN_SET, 0,
- &amsdu_aggr_ctrl);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
+ HostCmd_ACT_GEN_SET, 0,
+ &amsdu_aggr_ctrl);
if (ret)
return -1;
/* MAC Control must be the last command in init_fw */
/* set MAC Control */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
- HostCmd_ACT_GEN_SET, 0,
- &priv->curr_pkt_filter);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter);
if (ret)
return -1;
@@ -1332,10 +1391,10 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
/* Enable auto deep sleep */
auto_ds.auto_ds = DEEP_SLEEP_ON;
auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_PS_MODE_ENH,
- EN_AUTO_PS, BITMAP_AUTO_DS,
- &auto_ds);
+ ret = mwifiex_send_cmd_sync(priv,
+ HostCmd_CMD_802_11_PS_MODE_ENH,
+ EN_AUTO_PS, BITMAP_AUTO_DS,
+ &auto_ds);
if (ret)
return -1;
}
@@ -1343,23 +1402,24 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Send cmd to FW to enable/disable 11D function */
state_11d = ENABLE_11D;
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, DOT11D_I,
- &state_11d);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, DOT11D_I,
+ &state_11d);
if (ret)
dev_err(priv->adapter->dev,
"11D: failed to enable 11D\n");
}
+ /* set last_init_cmd before sending the command */
+ priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
+
/* Send cmd to FW to configure 11n specific configuration
* (Short GI, Channel BW, Green field support etc.) for transmit
*/
tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_CFG,
- HostCmd_ACT_GEN_SET, 0, &tx_cfg);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_11N_CFG,
+ HostCmd_ACT_GEN_SET, 0, &tx_cfg);
- /* set last_init_cmd */
- priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
ret = -EINPROGRESS;
return ret;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 0b09004ebb25..e380171c4c5d 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -123,7 +123,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
{
struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
&resp->params.rssi_info_rsp;
- struct mwifiex_ds_misc_subsc_evt subsc_evt;
+ struct mwifiex_ds_misc_subsc_evt *subsc_evt =
+ &priv->async_subsc_evt_storage;
priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -140,26 +141,27 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
if (priv->subsc_evt_rssi_state == EVENT_HANDLED)
return 0;
+ memset(subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
+
/* Resubscribe low and high rssi events with new thresholds */
- memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
- subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
- subsc_evt.action = HostCmd_ACT_BITWISE_SET;
+ subsc_evt->events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
+ subsc_evt->action = HostCmd_ACT_BITWISE_SET;
if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) {
- subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
+ subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
priv->cqm_rssi_hyst);
- subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
+ subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
} else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) {
- subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
- subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
+ subsc_evt->bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
+ subsc_evt->bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
priv->cqm_rssi_hyst);
}
- subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
- subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
+ subsc_evt->bcn_l_rssi_cfg.evt_freq = 1;
+ subsc_evt->bcn_h_rssi_cfg.evt_freq = 1;
priv->subsc_evt_rssi_state = EVENT_HANDLED;
mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
- 0, 0, &subsc_evt);
+ 0, 0, subsc_evt);
return 0;
}
@@ -652,6 +654,38 @@ static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
}
/*
+ * This function handles the command response of remain on channel.
+ */
+static int
+mwifiex_ret_remain_on_chan(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp,
+ struct host_cmd_ds_remain_on_chan *roc_cfg)
+{
+ struct host_cmd_ds_remain_on_chan *resp_cfg = &resp->params.roc_cfg;
+
+ if (roc_cfg)
+ memcpy(roc_cfg, resp_cfg, sizeof(*roc_cfg));
+
+ return 0;
+}
+
+/*
+ * This function handles the command response of P2P mode cfg.
+ */
+static int
+mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp,
+ void *data_buf)
+{
+ struct host_cmd_ds_p2p_mode_cfg *mode_cfg = &resp->params.mode_cfg;
+
+ if (data_buf)
+ *((u16 *)data_buf) = le16_to_cpu(mode_cfg->mode);
+
+ return 0;
+}
+
+/*
* This function handles the command response of register access.
*
* The register value and offset are returned to the user. For EEPROM
@@ -736,7 +770,6 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
{
struct host_cmd_ds_802_11_ibss_status *ibss_coal_resp =
&(resp->params.ibss_coalescing);
- u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
return 0;
@@ -745,7 +778,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
"info: new BSSID %pM\n", ibss_coal_resp->bssid);
/* If rsp has NULL BSSID, Just return..... No Action */
- if (!memcmp(ibss_coal_resp->bssid, zero_mac, ETH_ALEN)) {
+ if (is_zero_ether_addr(ibss_coal_resp->bssid)) {
dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
return 0;
}
@@ -775,8 +808,7 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
* This function handles the command response for subscribe event command.
*/
static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp,
- struct mwifiex_ds_misc_subsc_evt *sub_event)
+ struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
&resp->params.subsc_evt;
@@ -786,10 +818,6 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
le16_to_cpu(cmd_sub_event->events));
- /*Return the subscribed event info for a Get request*/
- if (sub_event)
- sub_event->events = le16_to_cpu(cmd_sub_event->events);
-
return 0;
}
@@ -879,6 +907,13 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_VERSION_EXT:
ret = mwifiex_ret_ver_ext(priv, resp, data_buf);
break;
+ case HostCmd_CMD_REMAIN_ON_CHAN:
+ ret = mwifiex_ret_remain_on_chan(priv, resp, data_buf);
+ break;
+ case HostCmd_CMD_P2P_MODE_CFG:
+ ret = mwifiex_ret_p2p_mode_cfg(priv, resp, data_buf);
+ break;
+ case HostCmd_CMD_MGMT_FRAME_REG:
case HostCmd_CMD_FUNC_INIT:
case HostCmd_CMD_FUNC_SHUTDOWN:
break;
@@ -913,7 +948,6 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
le16_to_cpu(resp->params.tx_buf.mp_end_port));
break;
case HostCmd_CMD_AMSDU_AGGR_CTRL:
- ret = mwifiex_ret_amsdu_aggr_ctrl(resp, data_buf);
break;
case HostCmd_CMD_WMM_GET_STATUS:
ret = mwifiex_ret_wmm_get_status(priv, resp);
@@ -932,12 +966,11 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_SET_BSS_MODE:
break;
case HostCmd_CMD_11N_CFG:
- ret = mwifiex_ret_11n_cfg(resp, data_buf);
break;
case HostCmd_CMD_PCIE_DESC_DETAILS:
break;
case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
- ret = mwifiex_ret_subsc_evt(priv, resp, data_buf);
+ ret = mwifiex_ret_subsc_evt(priv, resp);
break;
case HostCmd_CMD_UAP_SYS_CONFIG:
break;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index b8614a825460..aafde30e714a 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -184,10 +184,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
int mwifiex_process_sta_event(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
- int len, ret = 0;
+ int ret = 0;
u32 eventcause = adapter->event_cause;
- struct station_info sinfo;
- struct mwifiex_assoc_event *event;
+ u16 ctrl;
switch (eventcause) {
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -279,10 +278,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_MIC_ERR_UNICAST:
dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
+ cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+ NL80211_KEYTYPE_PAIRWISE,
+ -1, NULL, GFP_KERNEL);
break;
case EVENT_MIC_ERR_MULTICAST:
dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
+ cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
+ NL80211_KEYTYPE_GROUP,
+ -1, NULL, GFP_KERNEL);
break;
case EVENT_MIB_CHANGED:
case EVENT_INIT_DONE:
@@ -384,11 +389,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
adapter->event_body);
break;
case EVENT_AMSDU_AGGR_CTRL:
- dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n",
- *(u16 *) adapter->event_body);
+ ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+ dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
adapter->tx_buf_size =
- min(adapter->curr_tx_buf_size,
- le16_to_cpu(*(__le16 *) adapter->event_body));
+ min_t(u16, adapter->curr_tx_buf_size, ctrl);
dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
adapter->tx_buf_size);
break;
@@ -405,51 +410,18 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
break;
- case EVENT_UAP_STA_ASSOC:
- memset(&sinfo, 0, sizeof(sinfo));
- event = (struct mwifiex_assoc_event *)
- (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
- if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
- len = -1;
-
- if (ieee80211_is_assoc_req(event->frame_control))
- len = 0;
- else if (ieee80211_is_reassoc_req(event->frame_control))
- /* There will be ETH_ALEN bytes of
- * current_ap_addr before the re-assoc ies.
- */
- len = ETH_ALEN;
-
- if (len != -1) {
- sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
- sinfo.assoc_req_ies = &event->data[len];
- len = (u8 *)sinfo.assoc_req_ies -
- (u8 *)&event->frame_control;
- sinfo.assoc_req_ies_len =
- le16_to_cpu(event->len) - (u16)len;
- }
- }
- cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
- GFP_KERNEL);
- break;
- case EVENT_UAP_STA_DEAUTH:
- cfg80211_del_sta(priv->netdev, adapter->event_body +
- MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
- break;
- case EVENT_UAP_BSS_IDLE:
- priv->media_connected = false;
- break;
- case EVENT_UAP_BSS_ACTIVE:
- priv->media_connected = true;
- break;
- case EVENT_UAP_BSS_START:
- dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
- memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN);
- break;
- case EVENT_UAP_MIC_COUNTERMEASURES:
- /* For future development */
- dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ case EVENT_REMAIN_ON_CHAN_EXPIRED:
+ dev_dbg(adapter->dev, "event: Remain on channel expired\n");
+ cfg80211_remain_on_channel_expired(priv->wdev,
+ priv->roc_cfg.cookie,
+ &priv->roc_cfg.chan,
+ priv->roc_cfg.chan_type,
+ GFP_ATOMIC);
+
+ memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
+
break;
+
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index fb2136089a22..0c9f70b2cbe6 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -26,6 +26,9 @@
#include "11n.h"
#include "cfg80211.h"
+static int disconnect_on_suspend = 1;
+module_param(disconnect_on_suspend, int, 0644);
+
/*
* Copies the multicast address list from device to driver.
*
@@ -192,6 +195,44 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
return ret;
}
+static int mwifiex_process_country_ie(struct mwifiex_private *priv,
+ struct cfg80211_bss *bss)
+{
+ u8 *country_ie, country_ie_len;
+ struct mwifiex_802_11d_domain_reg *domain_info =
+ &priv->adapter->domain_reg;
+
+ country_ie = (u8 *)ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
+
+ if (!country_ie)
+ return 0;
+
+ country_ie_len = country_ie[1];
+ if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
+ return 0;
+
+ domain_info->country_code[0] = country_ie[2];
+ domain_info->country_code[1] = country_ie[3];
+ domain_info->country_code[2] = ' ';
+
+ country_ie_len -= IEEE80211_COUNTRY_STRING_LEN;
+
+ domain_info->no_of_triplet =
+ country_ie_len / sizeof(struct ieee80211_country_ie_triplet);
+
+ memcpy((u8 *)domain_info->triplet,
+ &country_ie[2] + IEEE80211_COUNTRY_STRING_LEN, country_ie_len);
+
+ if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
+ HostCmd_ACT_GEN_SET, 0, NULL)) {
+ wiphy_err(priv->adapter->wiphy,
+ "11D: setting domain info in FW\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* In Ad-Hoc mode, the IBSS is created if not found in scan list.
* In both Ad-Hoc and infra mode, an deauthentication is performed
@@ -207,6 +248,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
priv->scan_block = false;
if (bss) {
+ mwifiex_process_country_ie(priv, bss);
+
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
GFP_KERNEL);
@@ -408,6 +451,16 @@ EXPORT_SYMBOL_GPL(mwifiex_cancel_hs);
int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
{
struct mwifiex_ds_hs_cfg hscfg;
+ struct mwifiex_private *priv;
+ int i;
+
+ if (disconnect_on_suspend) {
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv)
+ mwifiex_deauthenticate(priv, NULL);
+ }
+ }
if (adapter->hs_activated) {
dev_dbg(adapter->dev, "cmd: HS Already actived\n");
@@ -942,20 +995,26 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
-int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
- int key_len, u8 key_index,
- const u8 *mac_addr, int disable)
+int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
+ const u8 *key, int key_len, u8 key_index,
+ const u8 *mac_addr, int disable)
{
struct mwifiex_ds_encrypt_key encrypt_key;
memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
encrypt_key.key_len = key_len;
+
+ if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ encrypt_key.is_igtk_key = true;
+
if (!disable) {
encrypt_key.key_index = key_index;
if (key_len)
memcpy(encrypt_key.key_material, key, key_len);
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
+ if (kp && kp->seq && kp->seq_len)
+ memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
} else {
encrypt_key.key_disable = true;
if (mac_addr)
@@ -984,6 +1043,65 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
return 0;
}
+int
+mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type *ct,
+ unsigned int duration)
+{
+ struct host_cmd_ds_remain_on_chan roc_cfg;
+ u8 sc;
+
+ memset(&roc_cfg, 0, sizeof(roc_cfg));
+ roc_cfg.action = cpu_to_le16(action);
+ if (action == HostCmd_ACT_GEN_SET) {
+ roc_cfg.band_cfg = chan->band;
+ sc = mwifiex_chan_type_to_sec_chan_offset(*ct);
+ roc_cfg.band_cfg |= (sc << 2);
+
+ roc_cfg.channel =
+ ieee80211_frequency_to_channel(chan->center_freq);
+ roc_cfg.duration = cpu_to_le32(duration);
+ }
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN,
+ action, 0, &roc_cfg)) {
+ dev_err(priv->adapter->dev, "failed to remain on channel\n");
+ return -1;
+ }
+
+ return roc_cfg.status;
+}
+
+int
+mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
+{
+ if (GET_BSS_ROLE(priv) == bss_role) {
+ dev_dbg(priv->adapter->dev,
+ "info: already in the desired role.\n");
+ return 0;
+ }
+
+ mwifiex_free_priv(priv);
+ mwifiex_init_priv(priv);
+
+ priv->bss_role = bss_role;
+ switch (bss_role) {
+ case MWIFIEX_BSS_ROLE_UAP:
+ priv->bss_mode = NL80211_IFTYPE_AP;
+ break;
+ case MWIFIEX_BSS_ROLE_STA:
+ case MWIFIEX_BSS_ROLE_ANY:
+ default:
+ priv->bss_mode = NL80211_IFTYPE_STATION;
+ break;
+ }
+
+ mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL);
+
+ return mwifiex_sta_init_cmd(priv, false);
+}
+
/*
* Sends IOCTL request to get statistics information.
*
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 02ce3b77d3e7..07d32b73783e 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -54,8 +54,8 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
local_rx_pd = (struct rxpd *) (skb->data);
- rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
- local_rx_pd->rx_pkt_offset);
+ rx_pkt_hdr = (void *)local_rx_pd +
+ le16_to_cpu(local_rx_pd->rx_pkt_offset);
if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) {
@@ -125,7 +125,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
struct rx_packet_hdr *rx_pkt_hdr;
u8 ta[ETH_ALEN];
- u16 rx_pkt_type;
+ u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
struct mwifiex_private *priv =
mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
rx_info->bss_type);
@@ -134,16 +134,17 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
return -1;
local_rx_pd = (struct rxpd *) (skb->data);
- rx_pkt_type = local_rx_pd->rx_pkt_type;
+ rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
+ rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+ rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
+ seq_num = le16_to_cpu(local_rx_pd->seq_num);
- rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd +
- local_rx_pd->rx_pkt_offset);
+ rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
- if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) >
- (u16) skb->len) {
- dev_err(adapter->dev, "wrong rx packet: len=%d,"
- " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
- local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
+ if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
+ dev_err(adapter->dev,
+ "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+ skb->len, rx_pkt_offset, rx_pkt_length);
priv->stats.rx_dropped++;
if (adapter->if_ops.data_complete)
@@ -154,14 +155,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
return ret;
}
- if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) {
+ if (rx_pkt_type == PKT_TYPE_AMSDU) {
struct sk_buff_head list;
struct sk_buff *rx_skb;
__skb_queue_head_init(&list);
- skb_pull(skb, local_rx_pd->rx_pkt_offset);
- skb_trim(skb, local_rx_pd->rx_pkt_length);
+ skb_pull(skb, rx_pkt_offset);
+ skb_trim(skb, rx_pkt_length);
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
priv->wdev->iftype, 0, false);
@@ -173,6 +174,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
dev_err(adapter->dev, "Rx of A-MSDU failed");
}
return 0;
+ } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+ ret = mwifiex_process_mgmt_packet(adapter, skb);
+ if (ret)
+ dev_err(adapter->dev, "Rx of mgmt packet failed");
+ dev_kfree_skb_any(skb);
+ return ret;
}
/*
@@ -189,17 +196,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
} else {
if (rx_pkt_type != PKT_TYPE_BAR)
- priv->rx_seq[local_rx_pd->priority] =
- local_rx_pd->seq_num;
+ priv->rx_seq[local_rx_pd->priority] = seq_num;
memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
ETH_ALEN);
}
/* Reorder and send to OS */
- ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num,
- local_rx_pd->priority, ta,
- (u8) local_rx_pd->rx_pkt_type,
- skb);
+ ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
+ ta, (u8) rx_pkt_type, skb);
if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
if (adapter->if_ops.data_complete)
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 0a046d3a0c16..7b581af24f5f 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -48,6 +48,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
struct txpd *local_tx_pd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
u8 pad;
+ u16 pkt_type, pkt_offset;
if (!skb->len) {
dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
@@ -55,6 +56,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
return skb->data;
}
+ pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
+
/* If skb->data is not aligned; add padding */
pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
@@ -93,7 +96,14 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
}
/* Offset of actual data */
- local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + pad);
+ pkt_offset = sizeof(struct txpd) + pad;
+ if (pkt_type == PKT_TYPE_MGMT) {
+ /* Set the packet type and add header for management frame */
+ local_tx_pd->tx_pkt_type = cpu_to_le16(pkt_type);
+ pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
+ }
+
+ local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset);
/* make space for INTF_HEADER_LEN */
skb_push(skb, INTF_HEADER_LEN);
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index cecb27283196..2af263992e83 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -51,6 +51,9 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ return mwifiex_process_uap_rx_packet(adapter, skb);
+
return mwifiex_process_sta_rx_packet(adapter, skb);
}
EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
@@ -72,7 +75,11 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
u8 *head_ptr;
struct txpd *local_tx_pd = NULL;
- head_ptr = mwifiex_process_sta_txpd(priv, skb);
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ head_ptr = mwifiex_process_uap_txpd(priv, skb);
+ else
+ head_ptr = mwifiex_process_sta_txpd(priv, skb);
+
if (head_ptr) {
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
local_tx_pd =
@@ -157,6 +164,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
priv->stats.tx_errors++;
}
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+ atomic_dec_return(&adapter->pending_bridged_pkts);
if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
goto done;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index f40e93fe894a..d95a2d558fcf 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -167,6 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
if (ht_ie) {
memcpy(&bss_cfg->ht_cap, ht_ie + 2,
sizeof(struct ieee80211_ht_cap));
+ priv->ap_11n_enabled = 1;
} else {
memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
@@ -176,6 +177,25 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
return;
}
+/* This function finds supported rates IE from beacon parameter and sets
+ * these rates into bss_config structure.
+ */
+void
+mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_ap_settings *params)
+{
+ struct ieee_types_header *rate_ie;
+ int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ const u8 *var_pos = params->beacon.head + var_offset;
+ int len = params->beacon.head_len - var_offset;
+
+ rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
+ if (rate_ie)
+ memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
+
+ return;
+}
+
/* This function initializes some of mwifiex_uap_bss_param variables.
* This helps FW in ignoring invalid values. These values may or may not
* be get updated to valid ones at later stage.
@@ -322,8 +342,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
struct host_cmd_tlv_retry_limit *retry_limit;
struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
struct host_cmd_tlv_auth_type *auth_type;
+ struct host_cmd_tlv_rates *tlv_rates;
+ struct host_cmd_tlv_ageout_timer *ao_timer, *ps_ao_timer;
struct mwifiex_ie_types_htcap *htcap;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
+ int i;
u16 cmd_size = *param_size;
if (bss_cfg->ssid.ssid_len) {
@@ -343,7 +366,23 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
}
- if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
+ if (bss_cfg->rates[0]) {
+ tlv_rates = (struct host_cmd_tlv_rates *)tlv;
+ tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
+
+ for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
+ i++)
+ tlv_rates->rates[i] = bss_cfg->rates[i];
+
+ tlv_rates->tlv.len = cpu_to_le16(i);
+ cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
+ tlv += sizeof(struct host_cmd_tlv_rates) + i;
+ }
+ if (bss_cfg->channel &&
+ ((bss_cfg->band_cfg == BAND_CONFIG_BG &&
+ bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
+ (bss_cfg->band_cfg == BAND_CONFIG_A &&
+ bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
chan_band = (struct host_cmd_tlv_channel_band *)tlv;
chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
chan_band->tlv.len =
@@ -459,6 +498,27 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
tlv += sizeof(struct mwifiex_ie_types_htcap);
}
+ if (bss_cfg->sta_ao_timer) {
+ ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
+ ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
+ ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) -
+ sizeof(struct host_cmd_tlv));
+ ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
+ cmd_size += sizeof(*ao_timer);
+ tlv += sizeof(*ao_timer);
+ }
+
+ if (bss_cfg->ps_sta_ao_timer) {
+ ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
+ ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
+ ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) -
+ sizeof(struct host_cmd_tlv));
+ ps_ao_timer->sta_ao_timer =
+ cpu_to_le32(bss_cfg->ps_sta_ao_timer);
+ cmd_size += sizeof(*ps_ao_timer);
+ tlv += sizeof(*ps_ao_timer);
+ }
+
*param_size = cmd_size;
return 0;
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
new file mode 100644
index 000000000000..a33fa394e349
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -0,0 +1,290 @@
+/*
+ * Marvell Wireless LAN device driver: AP event handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "main.h"
+#include "11n.h"
+
+/*
+ * This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+
+ if (!mac)
+ return NULL;
+
+ list_for_each_entry(node, &priv->sta_list, list) {
+ if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+ return node;
+ }
+
+ return NULL;
+}
+
+/*
+ * This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+static struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ if (!mac)
+ return NULL;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node)
+ goto done;
+
+ node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
+ if (!node)
+ goto done;
+
+ memcpy(node->mac_addr, mac, ETH_ALEN);
+ list_add_tail(&node->list, &priv->sta_list);
+
+done:
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return node;
+}
+
+/*
+ * This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+static void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+ int ies_len, struct mwifiex_sta_node *node)
+{
+ const struct ieee80211_ht_cap *ht_cap;
+
+ if (!ies)
+ return;
+
+ ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+ if (ht_cap) {
+ node->is_11n_enabled = 1;
+ node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU ?
+ MWIFIEX_TX_DATA_BUF_SIZE_8K :
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ } else {
+ node->is_11n_enabled = 0;
+ }
+
+ return;
+}
+
+/*
+ * This function will delete a station entry from station list
+ */
+static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node) {
+ list_for_each_entry_safe(node, tmp, &priv->sta_list,
+ list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
+
+/*
+ * This function will delete all stations from associated station list.
+ */
+static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *node, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ INIT_LIST_HEAD(&priv->sta_list);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
+
+/*
+ * This function handles AP interface specific events generated by firmware.
+ *
+ * Event specific routines are called by this function based
+ * upon the generated event cause.
+ *
+ *
+ * Events supported for AP -
+ * - EVENT_UAP_STA_ASSOC
+ * - EVENT_UAP_STA_DEAUTH
+ * - EVENT_UAP_BSS_ACTIVE
+ * - EVENT_UAP_BSS_START
+ * - EVENT_UAP_BSS_IDLE
+ * - EVENT_UAP_MIC_COUNTERMEASURES:
+ */
+int mwifiex_process_uap_event(struct mwifiex_private *priv)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ int len, i;
+ u32 eventcause = adapter->event_cause;
+ struct station_info sinfo;
+ struct mwifiex_assoc_event *event;
+ struct mwifiex_sta_node *node;
+ u8 *deauth_mac;
+ struct host_cmd_ds_11n_batimeout *ba_timeout;
+ u16 ctrl;
+
+ switch (eventcause) {
+ case EVENT_UAP_STA_ASSOC:
+ memset(&sinfo, 0, sizeof(sinfo));
+ event = (struct mwifiex_assoc_event *)
+ (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
+ if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
+ len = -1;
+
+ if (ieee80211_is_assoc_req(event->frame_control))
+ len = 0;
+ else if (ieee80211_is_reassoc_req(event->frame_control))
+ /* There will be ETH_ALEN bytes of
+ * current_ap_addr before the re-assoc ies.
+ */
+ len = ETH_ALEN;
+
+ if (len != -1) {
+ sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+ sinfo.assoc_req_ies = &event->data[len];
+ len = (u8 *)sinfo.assoc_req_ies -
+ (u8 *)&event->frame_control;
+ sinfo.assoc_req_ies_len =
+ le16_to_cpu(event->len) - (u16)len;
+ }
+ }
+ cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
+ GFP_KERNEL);
+
+ node = mwifiex_add_sta_entry(priv, event->sta_addr);
+ if (!node) {
+ dev_warn(adapter->dev,
+ "could not create station entry!\n");
+ return -1;
+ }
+
+ if (!priv->ap_11n_enabled)
+ break;
+
+ mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies,
+ sinfo.assoc_req_ies_len, node);
+
+ for (i = 0; i < MAX_NUM_TID; i++) {
+ if (node->is_11n_enabled)
+ node->ampdu_sta[i] =
+ priv->aggr_prio_tbl[i].ampdu_user;
+ else
+ node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+ }
+ memset(node->rx_seq, 0xff, sizeof(node->rx_seq));
+ break;
+ case EVENT_UAP_STA_DEAUTH:
+ deauth_mac = adapter->event_body +
+ MWIFIEX_UAP_EVENT_EXTRA_HEADER;
+ cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL);
+
+ if (priv->ap_11n_enabled) {
+ mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac);
+ mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac);
+ }
+ mwifiex_del_sta_entry(priv, deauth_mac);
+ break;
+ case EVENT_UAP_BSS_IDLE:
+ priv->media_connected = false;
+ mwifiex_clean_txrx(priv);
+ mwifiex_del_all_sta_list(priv);
+ break;
+ case EVENT_UAP_BSS_ACTIVE:
+ priv->media_connected = true;
+ break;
+ case EVENT_UAP_BSS_START:
+ dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
+ ETH_ALEN);
+ break;
+ case EVENT_UAP_MIC_COUNTERMEASURES:
+ /* For future development */
+ dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ break;
+ case EVENT_AMSDU_AGGR_CTRL:
+ ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
+ dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+
+ if (priv->media_connected) {
+ adapter->tx_buf_size =
+ min_t(u16, adapter->curr_tx_buf_size, ctrl);
+ dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
+ adapter->tx_buf_size);
+ }
+ break;
+ case EVENT_ADDBA:
+ dev_dbg(adapter->dev, "event: ADDBA Request\n");
+ if (priv->media_connected)
+ mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
+ HostCmd_ACT_GEN_SET, 0,
+ adapter->event_body);
+ break;
+ case EVENT_DELBA:
+ dev_dbg(adapter->dev, "event: DELBA Request\n");
+ if (priv->media_connected)
+ mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
+ break;
+ case EVENT_BA_STREAM_TIEMOUT:
+ dev_dbg(adapter->dev, "event: BA Stream timeout\n");
+ if (priv->media_connected) {
+ ba_timeout = (void *)adapter->event_body;
+ mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
+ }
+ break;
+ default:
+ dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
+ eventcause);
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
new file mode 100644
index 000000000000..0966ac24b3b4
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -0,0 +1,340 @@
+/*
+ * Marvell Wireless LAN device driver: AP TX and RX data handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "decl.h"
+#include "ioctl.h"
+#include "main.h"
+#include "wmm.h"
+#include "11n_aggr.h"
+#include "11n_rxreorder.h"
+
+static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct uap_rxpd *uap_rx_pd;
+ struct rx_packet_hdr *rx_pkt_hdr;
+ struct sk_buff *new_skb;
+ struct mwifiex_txinfo *tx_info;
+ int hdr_chop;
+ struct timeval tv;
+ u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+
+ uap_rx_pd = (struct uap_rxpd *)(skb->data);
+ rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+ if ((atomic_read(&adapter->pending_bridged_pkts) >=
+ MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
+ dev_err(priv->adapter->dev,
+ "Tx: Bridge packet limit reached. Drop packet!\n");
+ kfree_skb(skb);
+ return;
+ }
+
+ if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
+ rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
+ /* Chop off the rxpd + the excess memory from
+ * 802.2/llc/snap header that was removed.
+ */
+ hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
+ else
+ /* Chop off the rxpd */
+ hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
+
+ /* Chop off the leading header bytes so the it points
+ * to the start of either the reconstructed EthII frame
+ * or the 802.2/llc/snap frame.
+ */
+ skb_pull(skb, hdr_chop);
+
+ if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
+ dev_dbg(priv->adapter->dev,
+ "data: Tx: insufficient skb headroom %d\n",
+ skb_headroom(skb));
+ /* Insufficient skb headroom - allocate a new skb */
+ new_skb =
+ skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+ if (unlikely(!new_skb)) {
+ dev_err(priv->adapter->dev,
+ "Tx: cannot allocate new_skb\n");
+ kfree_skb(skb);
+ priv->stats.tx_dropped++;
+ return;
+ }
+
+ kfree_skb(skb);
+ skb = new_skb;
+ dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
+ skb_headroom(skb));
+ }
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ mwifiex_wmm_add_buf_txqueue(priv, skb);
+ atomic_inc(&adapter->tx_pending);
+ atomic_inc(&adapter->pending_bridged_pkts);
+
+ if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
+ mwifiex_set_trans_start(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+ }
+ return;
+}
+
+/*
+ * This function contains logic for AP packet forwarding.
+ *
+ * If a packet is multicast/broadcast, it is sent to kernel/upper layer
+ * as well as queued back to AP TX queue so that it can be sent to other
+ * associated stations.
+ * If a packet is unicast and RA is present in associated station list,
+ * it is again requeued into AP TX queue.
+ * If a packet is unicast and RA is not in associated station list,
+ * packet is forwarded to kernel to handle routing logic.
+ */
+int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct uap_rxpd *uap_rx_pd;
+ struct rx_packet_hdr *rx_pkt_hdr;
+ u8 ra[ETH_ALEN];
+ struct sk_buff *skb_uap;
+
+ uap_rx_pd = (struct uap_rxpd *)(skb->data);
+ rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+ /* don't do packet forwarding in disconnected state */
+ if (!priv->media_connected) {
+ dev_err(adapter->dev, "drop packet in disconnected state.\n");
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
+
+ if (is_multicast_ether_addr(ra)) {
+ skb_uap = skb_copy(skb, GFP_ATOMIC);
+ mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
+ } else {
+ if (mwifiex_get_sta_entry(priv, ra)) {
+ /* Requeue Intra-BSS packet */
+ mwifiex_uap_queue_bridged_pkt(priv, skb);
+ return 0;
+ }
+ }
+
+ /* Forward unicat/Inter-BSS packets to kernel. */
+ return mwifiex_process_rx_packet(adapter, skb);
+}
+
+/*
+ * This function processes the packet received on AP interface.
+ *
+ * The function looks into the RxPD and performs sanity tests on the
+ * received buffer to ensure its a valid packet before processing it
+ * further. If the packet is determined to be aggregated, it is
+ * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
+ *
+ * The completion callback is called after processing is complete.
+ */
+int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb)
+{
+ int ret;
+ struct uap_rxpd *uap_rx_pd;
+ struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
+ struct rx_packet_hdr *rx_pkt_hdr;
+ u16 rx_pkt_type;
+ u8 ta[ETH_ALEN], pkt_type;
+ struct mwifiex_sta_node *node;
+
+ struct mwifiex_private *priv =
+ mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
+ rx_info->bss_type);
+
+ if (!priv)
+ return -1;
+
+ uap_rx_pd = (struct uap_rxpd *)(skb->data);
+ rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
+ rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
+
+ if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
+ le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
+ dev_err(adapter->dev,
+ "wrong rx packet: len=%d, offset=%d, length=%d\n",
+ skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
+ le16_to_cpu(uap_rx_pd->rx_pkt_length));
+ priv->stats.rx_dropped++;
+
+ if (adapter->if_ops.data_complete)
+ adapter->if_ops.data_complete(adapter, skb);
+ else
+ dev_kfree_skb_any(skb);
+
+ return 0;
+ }
+
+ if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
+ struct sk_buff_head list;
+ struct sk_buff *rx_skb;
+
+ __skb_queue_head_init(&list);
+ skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
+ skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
+
+ ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
+ priv->wdev->iftype, 0, false);
+
+ while (!skb_queue_empty(&list)) {
+ rx_skb = __skb_dequeue(&list);
+ ret = mwifiex_recv_packet(adapter, rx_skb);
+ if (ret)
+ dev_err(adapter->dev,
+ "AP:Rx A-MSDU failed");
+ }
+
+ return 0;
+ } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+ ret = mwifiex_process_mgmt_packet(adapter, skb);
+ if (ret)
+ dev_err(adapter->dev, "Rx of mgmt packet failed");
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
+
+ if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
+ node = mwifiex_get_sta_entry(priv, ta);
+ if (node)
+ node->rx_seq[uap_rx_pd->priority] =
+ le16_to_cpu(uap_rx_pd->seq_num);
+ }
+
+ if (!priv->ap_11n_enabled ||
+ (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
+ (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
+ ret = mwifiex_handle_uap_rx_forward(priv, skb);
+ return ret;
+ }
+
+ /* Reorder and send to kernel */
+ pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
+ ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
+ uap_rx_pd->priority, ta, pkt_type,
+ skb);
+
+ if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
+ if (adapter->if_ops.data_complete)
+ adapter->if_ops.data_complete(adapter, skb);
+ else
+ dev_kfree_skb_any(skb);
+ }
+
+ if (ret)
+ priv->stats.rx_dropped++;
+
+ return ret;
+}
+
+/*
+ * This function fills the TxPD for AP tx packets.
+ *
+ * The Tx buffer received by this function should already have the
+ * header space allocated for TxPD.
+ *
+ * This function inserts the TxPD in between interface header and actual
+ * data and adjusts the buffer pointers accordingly.
+ *
+ * The following TxPD fields are set by this function, as required -
+ * - BSS number
+ * - Tx packet length and offset
+ * - Priority
+ * - Packet delay
+ * - Priority specific Tx control
+ * - Flags
+ */
+void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct uap_txpd *txpd;
+ struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+ int pad, len;
+ u16 pkt_type;
+
+ if (!skb->len) {
+ dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+ tx_info->status_code = -1;
+ return skb->data;
+ }
+
+ pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
+
+ /* If skb->data is not aligned, add padding */
+ pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
+
+ len = sizeof(*txpd) + pad;
+
+ BUG_ON(skb_headroom(skb) < len + INTF_HEADER_LEN);
+
+ skb_push(skb, len);
+
+ txpd = (struct uap_txpd *)skb->data;
+ memset(txpd, 0, sizeof(*txpd));
+ txpd->bss_num = priv->bss_num;
+ txpd->bss_type = priv->bss_type;
+ txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len));
+
+ txpd->priority = (u8)skb->priority;
+ txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
+
+ if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
+ /*
+ * Set the priority specific tx_control field, setting of 0 will
+ * cause the default value to be used later in this function.
+ */
+ txpd->tx_control =
+ cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
+
+ /* Offset of actual data */
+ if (pkt_type == PKT_TYPE_MGMT) {
+ /* Set the packet type and add header for management frame */
+ txpd->tx_pkt_type = cpu_to_le16(pkt_type);
+ len += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
+ }
+
+ txpd->tx_pkt_offset = cpu_to_le16(len);
+
+ /* make space for INTF_HEADER_LEN */
+ skb_push(skb, INTF_HEADER_LEN);
+
+ if (!txpd->tx_control)
+ /* TxCtrl set by user or default */
+ txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
+
+ return skb->data;
+}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 2864c74bdb6f..ae88f80cf86b 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -142,6 +142,46 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
}
/*
+ * This function processes the received management packet and send it
+ * to the kernel.
+ */
+int
+mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb)
+{
+ struct rxpd *rx_pd;
+ struct mwifiex_private *priv;
+ u16 pkt_len;
+
+ if (!skb)
+ return -1;
+
+ rx_pd = (struct rxpd *)skb->data;
+ priv = mwifiex_get_priv_by_id(adapter, rx_pd->bss_num, rx_pd->bss_type);
+ if (!priv)
+ return -1;
+
+ skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
+ skb_pull(skb, sizeof(pkt_len));
+
+ pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
+
+ /* Remove address4 */
+ memmove(skb->data + sizeof(struct ieee80211_hdr_3addr),
+ skb->data + sizeof(struct ieee80211_hdr),
+ pkt_len - sizeof(struct ieee80211_hdr));
+
+ pkt_len -= ETH_ALEN + sizeof(pkt_len);
+ rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
+
+ cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq,
+ CAL_RSSI(rx_pd->snr, rx_pd->nf),
+ skb->data, pkt_len, GFP_ATOMIC);
+
+ return 0;
+}
+
+/*
* This function processes the received packet before sending it to the
* kernel.
*
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 3fa4d4176993..600d8194610e 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -127,6 +127,29 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
return ra_list;
}
+/* This function returns random no between 16 and 32 to be used as threshold
+ * for no of packets after which BA setup is initiated.
+ */
+static u8 mwifiex_get_random_ba_threshold(void)
+{
+ u32 sec, usec;
+ struct timeval ba_tstamp;
+ u8 ba_threshold;
+
+ /* setup ba_packet_threshold here random number between
+ * [BA_SETUP_PACKET_OFFSET,
+ * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
+ */
+
+ do_gettimeofday(&ba_tstamp);
+ sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
+ usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
+ ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
+ + BA_SETUP_PACKET_OFFSET;
+
+ return ba_threshold;
+}
+
/*
* This function allocates and adds a RA list for all TIDs
* with the given RA.
@@ -137,6 +160,12 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
int i;
struct mwifiex_ra_list_tbl *ra_list;
struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ node = mwifiex_get_sta_entry(priv, ra);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
@@ -145,14 +174,24 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
if (!ra_list)
break;
- if (!mwifiex_queuing_ra_based(priv))
+ ra_list->is_11n_enabled = 0;
+ if (!mwifiex_queuing_ra_based(priv)) {
ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
- else
- ra_list->is_11n_enabled = false;
+ } else {
+ ra_list->is_11n_enabled =
+ mwifiex_is_sta_11n_enabled(priv, node);
+ if (ra_list->is_11n_enabled)
+ ra_list->max_amsdu = node->max_amsdu;
+ }
dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
ra_list, ra_list->is_11n_enabled);
+ if (ra_list->is_11n_enabled) {
+ ra_list->pkt_count = 0;
+ ra_list->ba_packet_thr =
+ mwifiex_get_random_ba_threshold();
+ }
list_add_tail(&ra_list->list,
&priv->wmm.tid_tbl_ptr[i].ra_list);
@@ -423,7 +462,7 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
for (i = 0; i < adapter->priv_num; ++i) {
priv = adapter->priv[i];
if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
- return false;
+ return false;
}
return true;
@@ -609,7 +648,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
u8 ra[ETH_ALEN], tid_down;
unsigned long flags;
- if (!priv->media_connected) {
+ if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
mwifiex_write_data_complete(adapter, skb, -1);
return;
@@ -624,7 +663,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
/* In case of infra as we have already created the list during
association we just don't have to call get_queue_raptr, we will
have only 1 raptr for a tid in case of infra */
- if (!mwifiex_queuing_ra_based(priv)) {
+ if (!mwifiex_queuing_ra_based(priv) &&
+ !mwifiex_is_skb_mgmt_frame(skb)) {
if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
ra_list = list_first_entry(
&priv->wmm.tid_tbl_ptr[tid_down].ra_list,
@@ -633,7 +673,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
ra_list = NULL;
} else {
memcpy(ra, skb->data, ETH_ALEN);
- if (ra[0] & 0x01)
+ if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
memset(ra, 0xff, ETH_ALEN);
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
}
@@ -647,6 +687,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
skb_queue_tail(&ra_list->skb_head, skb);
ra_list->total_pkts_size += skb->len;
+ ra_list->pkt_count++;
atomic_inc(&priv->wmm.tx_pkts_queued);
@@ -867,17 +908,16 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
if (adapter->bss_prio_tbl[j].bss_prio_cur ==
(struct mwifiex_bss_prio_node *)
&adapter->bss_prio_tbl[j].bss_prio_head) {
- bssprio_node =
+ adapter->bss_prio_tbl[j].bss_prio_cur =
list_first_entry(&adapter->bss_prio_tbl[j]
.bss_prio_head,
struct mwifiex_bss_prio_node,
list);
- bssprio_head = bssprio_node;
- } else {
- bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
- bssprio_head = bssprio_node;
}
+ bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
+ bssprio_head = bssprio_node;
+
do {
priv_tmp = bssprio_node->priv;
hqp = &priv_tmp->wmm.highest_queued_prio;
@@ -986,10 +1026,17 @@ mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
{
int count = 0, total_size = 0;
struct sk_buff *skb, *tmp;
+ int max_amsdu_size;
+
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
+ ptr->is_11n_enabled)
+ max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
+ else
+ max_amsdu_size = max_buf_size;
skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
total_size += skb->len;
- if (total_size >= max_buf_size)
+ if (total_size >= max_amsdu_size)
break;
if (++count >= MIN_NUM_AMSDU)
return true;
@@ -1050,6 +1097,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
skb_queue_tail(&ptr->skb_head, skb);
ptr->total_pkts_size += skb->len;
+ ptr->pkt_count++;
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
@@ -1231,7 +1279,8 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
/* ra_list_spinlock has been freed in
mwifiex_send_single_packet() */
} else {
- if (mwifiex_is_ampdu_allowed(priv, tid)) {
+ if (mwifiex_is_ampdu_allowed(priv, tid) &&
+ ptr->pkt_count > ptr->ba_packet_thr) {
if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
mwifiex_create_ba_tbl(priv, ptr->ra, tid,
BA_SETUP_INPROGRESS);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 224e03ade145..5099e5375cb3 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1830,12 +1830,14 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid)
}
static void
-mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
+mwl8k_txq_xmit(struct ieee80211_hw *hw,
+ int index,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
{
struct mwl8k_priv *priv = hw->priv;
struct ieee80211_tx_info *tx_info;
struct mwl8k_vif *mwl8k_vif;
- struct ieee80211_sta *sta;
struct ieee80211_hdr *wh;
struct mwl8k_tx_queue *txq;
struct mwl8k_tx_desc *tx;
@@ -1867,7 +1869,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
wh = &((struct mwl8k_dma_data *)skb->data)->wh;
tx_info = IEEE80211_SKB_CB(skb);
- sta = tx_info->control.sta;
mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -2019,8 +2020,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
tx->pkt_phys_addr = cpu_to_le32(dma);
tx->pkt_len = cpu_to_le16(skb->len);
tx->rate_info = 0;
- if (!priv->ap_fw && tx_info->control.sta != NULL)
- tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+ if (!priv->ap_fw && sta != NULL)
+ tx->peer_id = MWL8K_STA(sta)->peer_id;
else
tx->peer_id = 0;
@@ -4364,7 +4365,9 @@ static void mwl8k_rx_poll(unsigned long data)
/*
* Core driver operations.
*/
-static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mwl8k_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct mwl8k_priv *priv = hw->priv;
int index = skb_get_queue_mapping(skb);
@@ -4376,7 +4379,7 @@ static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
- mwl8k_txq_xmit(hw, index, skb);
+ mwl8k_txq_xmit(hw, index, control->sta, skb);
}
static int mwl8k_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 33747e131a96..3b5508f982e8 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -7,6 +7,7 @@
#include <linux/if_arp.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
#include <net/cfg80211-wext.h>
@@ -159,15 +160,13 @@ static int orinoco_ioctl_setwap(struct net_device *dev,
struct orinoco_private *priv = ndev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
unsigned long flags;
- static const u8 off_addr[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
- static const u8 any_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
/* Enable automatic roaming - no sanity checks are needed */
- if (memcmp(&ap_addr->sa_data, off_addr, ETH_ALEN) == 0 ||
- memcmp(&ap_addr->sa_data, any_addr, ETH_ALEN) == 0) {
+ if (is_zero_ether_addr(ap_addr->sa_data) ||
+ is_broadcast_ether_addr(ap_addr->sa_data)) {
priv->bssid_fixed = 0;
memset(priv->desired_bssid, 0, ETH_ALEN);
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 14037092ba89..1ef1bfe6a9d7 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -76,6 +76,7 @@ struct p54_channel_entry {
u16 freq;
u16 data;
int index;
+ int max_power;
enum ieee80211_band band;
};
@@ -173,6 +174,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
(i < list->entries); i++) {
struct p54_channel_entry *chan = &list->channels[i];
+ struct ieee80211_channel *dest = &tmp->channels[j];
if (chan->band != band)
continue;
@@ -190,14 +192,15 @@ static int p54_generate_band(struct ieee80211_hw *dev,
continue;
}
- tmp->channels[j].band = chan->band;
- tmp->channels[j].center_freq = chan->freq;
+ dest->band = chan->band;
+ dest->center_freq = chan->freq;
+ dest->max_power = chan->max_power;
priv->survey[*chan_num].channel = &tmp->channels[j];
priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
SURVEY_INFO_CHANNEL_TIME |
SURVEY_INFO_CHANNEL_TIME_BUSY |
SURVEY_INFO_CHANNEL_TIME_TX;
- tmp->channels[j].hw_value = (*chan_num);
+ dest->hw_value = (*chan_num);
j++;
(*chan_num)++;
}
@@ -229,10 +232,11 @@ err_out:
return ret;
}
-static void p54_update_channel_param(struct p54_channel_list *list,
- u16 freq, u16 data)
+static struct p54_channel_entry *p54_update_channel_param(struct p54_channel_list *list,
+ u16 freq, u16 data)
{
- int band, i;
+ int i;
+ struct p54_channel_entry *entry = NULL;
/*
* usually all lists in the eeprom are mostly sorted.
@@ -241,30 +245,78 @@ static void p54_update_channel_param(struct p54_channel_list *list,
*/
for (i = list->entries; i >= 0; i--) {
if (freq == list->channels[i].freq) {
- list->channels[i].data |= data;
+ entry = &list->channels[i];
break;
}
}
if ((i < 0) && (list->entries < list->max_entries)) {
/* entry does not exist yet. Initialize a new one. */
- band = p54_get_band_from_freq(freq);
+ int band = p54_get_band_from_freq(freq);
/*
* filter out frequencies which don't belong into
* any supported band.
*/
- if (band < 0)
- return ;
+ if (band >= 0) {
+ i = list->entries++;
+ list->band_channel_num[band]++;
+
+ entry = &list->channels[i];
+ entry->freq = freq;
+ entry->band = band;
+ entry->index = ieee80211_frequency_to_channel(freq);
+ entry->max_power = 0;
+ entry->data = 0;
+ }
+ }
- i = list->entries++;
- list->band_channel_num[band]++;
+ if (entry)
+ entry->data |= data;
- list->channels[i].freq = freq;
- list->channels[i].data = data;
- list->channels[i].band = band;
- list->channels[i].index = ieee80211_frequency_to_channel(freq);
- /* TODO: parse output_limit and fill max_power */
+ return entry;
+}
+
+static int p54_get_maxpower(struct p54_common *priv, void *data)
+{
+ switch (priv->rxhw & PDR_SYNTH_FRONTEND_MASK) {
+ case PDR_SYNTH_FRONTEND_LONGBOW: {
+ struct pda_channel_output_limit_longbow *pda = data;
+ int j;
+ u16 rawpower = 0;
+ pda = data;
+ for (j = 0; j < ARRAY_SIZE(pda->point); j++) {
+ struct pda_channel_output_limit_point_longbow *point =
+ &pda->point[j];
+ rawpower = max_t(u16,
+ rawpower, le16_to_cpu(point->val_qpsk));
+ rawpower = max_t(u16,
+ rawpower, le16_to_cpu(point->val_bpsk));
+ rawpower = max_t(u16,
+ rawpower, le16_to_cpu(point->val_16qam));
+ rawpower = max_t(u16,
+ rawpower, le16_to_cpu(point->val_64qam));
+ }
+ /* longbow seems to use 1/16 dBm units */
+ return rawpower / 16;
+ }
+
+ case PDR_SYNTH_FRONTEND_DUETTE3:
+ case PDR_SYNTH_FRONTEND_DUETTE2:
+ case PDR_SYNTH_FRONTEND_FRISBEE:
+ case PDR_SYNTH_FRONTEND_XBOW: {
+ struct pda_channel_output_limit *pda = data;
+ u8 rawpower = 0;
+ rawpower = max(rawpower, pda->val_qpsk);
+ rawpower = max(rawpower, pda->val_bpsk);
+ rawpower = max(rawpower, pda->val_16qam);
+ rawpower = max(rawpower, pda->val_64qam);
+ /* raw values are in 1/4 dBm units */
+ return rawpower / 4;
+ }
+
+ default:
+ return 20;
}
}
@@ -315,12 +367,19 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
}
if (i < priv->output_limit->entries) {
- freq = le16_to_cpup((__le16 *) (i *
- priv->output_limit->entry_size +
- priv->output_limit->offset +
- priv->output_limit->data));
-
- p54_update_channel_param(list, freq, CHAN_HAS_LIMIT);
+ struct p54_channel_entry *tmp;
+
+ void *data = (void *) ((unsigned long) i *
+ priv->output_limit->entry_size +
+ priv->output_limit->offset +
+ priv->output_limit->data);
+
+ freq = le16_to_cpup((__le16 *) data);
+ tmp = p54_update_channel_param(list, freq,
+ CHAN_HAS_LIMIT);
+ if (tmp) {
+ tmp->max_power = p54_get_maxpower(priv, data);
+ }
}
if (i < priv->curve_data->entries) {
@@ -834,11 +893,12 @@ good_eeprom:
goto err;
}
+ priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
+
err = p54_generate_channel_lists(dev);
if (err)
goto err;
- priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
p54_init_xbow_synth(priv);
if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h
index afde72b84606..20ebe39a3f4e 100644
--- a/drivers/net/wireless/p54/eeprom.h
+++ b/drivers/net/wireless/p54/eeprom.h
@@ -57,6 +57,18 @@ struct pda_channel_output_limit {
u8 rate_set_size;
} __packed;
+struct pda_channel_output_limit_point_longbow {
+ __le16 val_bpsk;
+ __le16 val_qpsk;
+ __le16 val_16qam;
+ __le16 val_64qam;
+} __packed;
+
+struct pda_channel_output_limit_longbow {
+ __le16 freq;
+ struct pda_channel_output_limit_point_longbow point[3];
+} __packed;
+
struct pda_pa_curve_data_sample_rev0 {
u8 rf_power;
u8 pa_detector;
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 3d8d622bec55..de1d46bf97df 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -526,7 +526,9 @@ int p54_init_leds(struct p54_common *priv);
void p54_unregister_leds(struct p54_common *priv);
/* xmit functions */
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
+void p54_tx_80211(struct ieee80211_hw *dev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
void p54_tx(struct p54_common *priv, struct sk_buff *skb);
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 7cffea795ad2..aadda99989c0 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -139,6 +139,7 @@ static int p54_beacon_format_ie_tim(struct sk_buff *skb)
static int p54_beacon_update(struct p54_common *priv,
struct ieee80211_vif *vif)
{
+ struct ieee80211_tx_control control = { };
struct sk_buff *beacon;
int ret;
@@ -158,7 +159,7 @@ static int p54_beacon_update(struct p54_common *priv,
* to cancel the old beacon template by hand, instead the firmware
* will release the previous one through the feedback mechanism.
*/
- p54_tx_80211(priv->hw, beacon);
+ p54_tx_80211(priv->hw, &control, beacon);
priv->tsf_high32 = 0;
priv->tsf_low32 = 0;
@@ -514,6 +515,17 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
if (modparam_nohwcrypt)
return -EOPNOTSUPP;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ /*
+ * Unfortunately most/all firmwares are trying to decrypt
+ * incoming management frames if a suitable key can be found.
+ * However, in doing so the data in these frames gets
+ * corrupted. So, we can't have firmware supported crypto
+ * offload in this case.
+ */
+ return -EOPNOTSUPP;
+ }
+
mutex_lock(&priv->conf_mutex);
if (cmd == SET_KEY) {
switch (key->cipher) {
@@ -737,6 +749,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 89318adc8c7f..b4390797d78c 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -488,6 +488,58 @@ static int p54p_open(struct ieee80211_hw *dev)
return 0;
}
+static void p54p_firmware_step2(const struct firmware *fw,
+ void *context)
+{
+ struct p54p_priv *priv = context;
+ struct ieee80211_hw *dev = priv->common.hw;
+ struct pci_dev *pdev = priv->pdev;
+ int err;
+
+ if (!fw) {
+ dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
+ err = -ENOENT;
+ goto out;
+ }
+
+ priv->firmware = fw;
+
+ err = p54p_open(dev);
+ if (err)
+ goto out;
+ err = p54_read_eeprom(dev);
+ p54p_stop(dev);
+ if (err)
+ goto out;
+
+ err = p54_register_common(dev, &pdev->dev);
+ if (err)
+ goto out;
+
+out:
+
+ complete(&priv->fw_loaded);
+
+ if (err) {
+ struct device *parent = pdev->dev.parent;
+
+ if (parent)
+ device_lock(parent);
+
+ /*
+ * This will indirectly result in a call to p54p_remove.
+ * Hence, we don't need to bother with freeing any
+ * allocated ressources at all.
+ */
+ device_release_driver(&pdev->dev);
+
+ if (parent)
+ device_unlock(parent);
+ }
+
+ pci_dev_put(pdev);
+}
+
static int __devinit p54p_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -496,6 +548,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
unsigned long mem_addr, mem_len;
int err;
+ pci_dev_get(pdev);
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable new PCI device\n");
@@ -537,6 +590,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
priv = dev->priv;
priv->pdev = pdev;
+ init_completion(&priv->fw_loaded);
SET_IEEE80211_DEV(dev, &pdev->dev);
pci_set_drvdata(pdev, dev);
@@ -561,32 +615,12 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
spin_lock_init(&priv->lock);
tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
- err = request_firmware(&priv->firmware, "isl3886pci",
- &priv->pdev->dev);
- if (err) {
- dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
- err = request_firmware(&priv->firmware, "isl3886",
- &priv->pdev->dev);
- if (err)
- goto err_free_common;
- }
-
- err = p54p_open(dev);
- if (err)
- goto err_free_common;
- err = p54_read_eeprom(dev);
- p54p_stop(dev);
- if (err)
- goto err_free_common;
-
- err = p54_register_common(dev, &pdev->dev);
- if (err)
- goto err_free_common;
-
- return 0;
+ err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
+ &priv->pdev->dev, GFP_KERNEL,
+ priv, p54p_firmware_step2);
+ if (!err)
+ return 0;
- err_free_common:
- release_firmware(priv->firmware);
pci_free_consistent(pdev, sizeof(*priv->ring_control),
priv->ring_control, priv->ring_control_dma);
@@ -601,6 +635,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
err_disable_dev:
pci_disable_device(pdev);
+ pci_dev_put(pdev);
return err;
}
@@ -612,8 +647,9 @@ static void __devexit p54p_remove(struct pci_dev *pdev)
if (!dev)
return;
- p54_unregister_common(dev);
priv = dev->priv;
+ wait_for_completion(&priv->fw_loaded);
+ p54_unregister_common(dev);
release_firmware(priv->firmware);
pci_free_consistent(pdev, sizeof(*priv->ring_control),
priv->ring_control, priv->ring_control_dma);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 7aa509f7e387..68405c142f97 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -105,6 +105,7 @@ struct p54p_priv {
struct sk_buff *tx_buf_data[32];
struct sk_buff *tx_buf_mgmt[4];
struct completion boot_comp;
+ struct completion fw_loaded;
};
#endif /* P54USB_H */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f38786e02623..5861e13a6fd8 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -676,8 +676,9 @@ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
EXPORT_SYMBOL_GPL(p54_rx);
static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
- struct ieee80211_tx_info *info, u8 *queue,
- u32 *extra_len, u16 *flags, u16 *aid,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ u8 *queue, u32 *extra_len, u16 *flags, u16 *aid,
bool *burst_possible)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -746,8 +747,8 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
}
}
- if (info->control.sta)
- *aid = info->control.sta->aid;
+ if (sta)
+ *aid = sta->aid;
break;
}
}
@@ -767,7 +768,9 @@ static u8 p54_convert_algo(u32 cipher)
}
}
-void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
+void p54_tx_80211(struct ieee80211_hw *dev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct p54_common *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -784,7 +787,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
u8 nrates = 0, nremaining = 8;
bool burst_allowed = false;
- p54_tx_80211_header(priv, skb, info, &queue, &extra_len,
+ p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len,
&hdr_flags, &aid, &burst_allowed);
if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 7a4ae9ee1c63..bd1f0cb56085 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1959,9 +1959,6 @@ static int rndis_scan(struct wiphy *wiphy,
*/
rndis_check_bssid_list(usbdev, NULL, NULL);
- if (!request)
- return -EINVAL;
-
if (priv->scan_request && priv->scan_request != request)
return -EBUSY;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 64328af496f5..e3a2d9070cf6 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -205,7 +205,7 @@ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
u32 reg;
rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
- return rt2x00_get_field32(reg, GPIOCSR_BIT0);
+ return rt2x00_get_field32(reg, GPIOCSR_VAL0);
}
#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1629,7 +1629,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
* rfkill switch GPIO pin correctly.
*/
rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
- rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1);
+ rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
/*
@@ -1789,7 +1789,6 @@ static const struct data_queue_desc rt2400pci_queue_atim = {
static const struct rt2x00_ops rt2400pci_ops = {
.name = KBUILD_MODNAME,
- .max_sta_intf = 1,
.max_ap_intf = 1,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index 7564ae992b73..e4b07f0aa3cc 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -660,17 +660,26 @@
/*
* GPIOCSR: GPIO control register.
+ * GPIOCSR_VALx: Actual GPIO pin x value
+ * GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
*/
#define GPIOCSR 0x0120
-#define GPIOCSR_BIT0 FIELD32(0x00000001)
-#define GPIOCSR_BIT1 FIELD32(0x00000002)
-#define GPIOCSR_BIT2 FIELD32(0x00000004)
-#define GPIOCSR_BIT3 FIELD32(0x00000008)
-#define GPIOCSR_BIT4 FIELD32(0x00000010)
-#define GPIOCSR_BIT5 FIELD32(0x00000020)
-#define GPIOCSR_BIT6 FIELD32(0x00000040)
-#define GPIOCSR_BIT7 FIELD32(0x00000080)
-#define GPIOCSR_BIT8 FIELD32(0x00000100)
+#define GPIOCSR_VAL0 FIELD32(0x00000001)
+#define GPIOCSR_VAL1 FIELD32(0x00000002)
+#define GPIOCSR_VAL2 FIELD32(0x00000004)
+#define GPIOCSR_VAL3 FIELD32(0x00000008)
+#define GPIOCSR_VAL4 FIELD32(0x00000010)
+#define GPIOCSR_VAL5 FIELD32(0x00000020)
+#define GPIOCSR_VAL6 FIELD32(0x00000040)
+#define GPIOCSR_VAL7 FIELD32(0x00000080)
+#define GPIOCSR_DIR0 FIELD32(0x00000100)
+#define GPIOCSR_DIR1 FIELD32(0x00000200)
+#define GPIOCSR_DIR2 FIELD32(0x00000400)
+#define GPIOCSR_DIR3 FIELD32(0x00000800)
+#define GPIOCSR_DIR4 FIELD32(0x00001000)
+#define GPIOCSR_DIR5 FIELD32(0x00002000)
+#define GPIOCSR_DIR6 FIELD32(0x00004000)
+#define GPIOCSR_DIR7 FIELD32(0x00008000)
/*
* BBPPCSR: BBP Pin control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 3de0406735f6..479d756e275b 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -205,7 +205,7 @@ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
u32 reg;
rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
- return rt2x00_get_field32(reg, GPIOCSR_BIT0);
+ return rt2x00_get_field32(reg, GPIOCSR_VAL0);
}
#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2081,7 +2081,6 @@ static const struct data_queue_desc rt2500pci_queue_atim = {
static const struct rt2x00_ops rt2500pci_ops = {
.name = KBUILD_MODNAME,
- .max_sta_intf = 1,
.max_ap_intf = 1,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 2aad7ba8a100..9c10068e4987 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -789,16 +789,18 @@
/*
* GPIOCSR: GPIO control register.
+ * GPIOCSR_VALx: GPIO value
+ * GPIOCSR_DIRx: GPIO direction: 0 = output; 1 = input
*/
#define GPIOCSR 0x0120
-#define GPIOCSR_BIT0 FIELD32(0x00000001)
-#define GPIOCSR_BIT1 FIELD32(0x00000002)
-#define GPIOCSR_BIT2 FIELD32(0x00000004)
-#define GPIOCSR_BIT3 FIELD32(0x00000008)
-#define GPIOCSR_BIT4 FIELD32(0x00000010)
-#define GPIOCSR_BIT5 FIELD32(0x00000020)
-#define GPIOCSR_BIT6 FIELD32(0x00000040)
-#define GPIOCSR_BIT7 FIELD32(0x00000080)
+#define GPIOCSR_VAL0 FIELD32(0x00000001)
+#define GPIOCSR_VAL1 FIELD32(0x00000002)
+#define GPIOCSR_VAL2 FIELD32(0x00000004)
+#define GPIOCSR_VAL3 FIELD32(0x00000008)
+#define GPIOCSR_VAL4 FIELD32(0x00000010)
+#define GPIOCSR_VAL5 FIELD32(0x00000020)
+#define GPIOCSR_VAL6 FIELD32(0x00000040)
+#define GPIOCSR_VAL7 FIELD32(0x00000080)
#define GPIOCSR_DIR0 FIELD32(0x00000100)
#define GPIOCSR_DIR1 FIELD32(0x00000200)
#define GPIOCSR_DIR2 FIELD32(0x00000400)
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 89fee311d8fd..a12e84f892be 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
u16 reg;
rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
- return rt2x00_get_field16(reg, MAC_CSR19_BIT7);
+ return rt2x00_get_field16(reg, MAC_CSR19_VAL7);
}
#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -1786,7 +1786,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
* rfkill switch GPIO pin correctly.
*/
rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
- rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0);
+ rt2x00_set_field16(&reg, MAC_CSR19_DIR0, 0);
rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
/*
@@ -1896,7 +1896,6 @@ static const struct data_queue_desc rt2500usb_queue_atim = {
static const struct rt2x00_ops rt2500usb_ops = {
.name = KBUILD_MODNAME,
- .max_sta_intf = 1,
.max_ap_intf = 1,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 196bd5103e4f..1b91a4cef965 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -187,17 +187,26 @@
/*
* MAC_CSR19: GPIO control register.
+ * MAC_CSR19_VALx: GPIO value
+ * MAC_CSR19_DIRx: GPIO direction: 0 = input; 1 = output
*/
#define MAC_CSR19 0x0426
-#define MAC_CSR19_BIT0 FIELD16(0x0001)
-#define MAC_CSR19_BIT1 FIELD16(0x0002)
-#define MAC_CSR19_BIT2 FIELD16(0x0004)
-#define MAC_CSR19_BIT3 FIELD16(0x0008)
-#define MAC_CSR19_BIT4 FIELD16(0x0010)
-#define MAC_CSR19_BIT5 FIELD16(0x0020)
-#define MAC_CSR19_BIT6 FIELD16(0x0040)
-#define MAC_CSR19_BIT7 FIELD16(0x0080)
-#define MAC_CSR19_BIT8 FIELD16(0x0100)
+#define MAC_CSR19_VAL0 FIELD16(0x0001)
+#define MAC_CSR19_VAL1 FIELD16(0x0002)
+#define MAC_CSR19_VAL2 FIELD16(0x0004)
+#define MAC_CSR19_VAL3 FIELD16(0x0008)
+#define MAC_CSR19_VAL4 FIELD16(0x0010)
+#define MAC_CSR19_VAL5 FIELD16(0x0020)
+#define MAC_CSR19_VAL6 FIELD16(0x0040)
+#define MAC_CSR19_VAL7 FIELD16(0x0080)
+#define MAC_CSR19_DIR0 FIELD16(0x0100)
+#define MAC_CSR19_DIR1 FIELD16(0x0200)
+#define MAC_CSR19_DIR2 FIELD16(0x0400)
+#define MAC_CSR19_DIR3 FIELD16(0x0800)
+#define MAC_CSR19_DIR4 FIELD16(0x1000)
+#define MAC_CSR19_DIR5 FIELD16(0x2000)
+#define MAC_CSR19_DIR6 FIELD16(0x4000)
+#define MAC_CSR19_DIR7 FIELD16(0x8000)
/*
* MAC_CSR20: LED control register.
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index e252e9bafd0e..6d67c3ede651 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -439,26 +439,33 @@
#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000)
/*
- * GPIO_CTRL_CFG:
- * GPIOD: GPIO direction, 0: Output, 1: Input
- */
-#define GPIO_CTRL_CFG 0x0228
-#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
-#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002)
-#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004)
-#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008)
-#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010)
-#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
-#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
-#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
-#define GPIO_CTRL_CFG_GPIOD_BIT0 FIELD32(0x00000100)
-#define GPIO_CTRL_CFG_GPIOD_BIT1 FIELD32(0x00000200)
-#define GPIO_CTRL_CFG_GPIOD_BIT2 FIELD32(0x00000400)
-#define GPIO_CTRL_CFG_GPIOD_BIT3 FIELD32(0x00000800)
-#define GPIO_CTRL_CFG_GPIOD_BIT4 FIELD32(0x00001000)
-#define GPIO_CTRL_CFG_GPIOD_BIT5 FIELD32(0x00002000)
-#define GPIO_CTRL_CFG_GPIOD_BIT6 FIELD32(0x00004000)
-#define GPIO_CTRL_CFG_GPIOD_BIT7 FIELD32(0x00008000)
+ * GPIO_CTRL:
+ * GPIO_CTRL_VALx: GPIO value
+ * GPIO_CTRL_DIRx: GPIO direction: 0 = output; 1 = input
+ */
+#define GPIO_CTRL 0x0228
+#define GPIO_CTRL_VAL0 FIELD32(0x00000001)
+#define GPIO_CTRL_VAL1 FIELD32(0x00000002)
+#define GPIO_CTRL_VAL2 FIELD32(0x00000004)
+#define GPIO_CTRL_VAL3 FIELD32(0x00000008)
+#define GPIO_CTRL_VAL4 FIELD32(0x00000010)
+#define GPIO_CTRL_VAL5 FIELD32(0x00000020)
+#define GPIO_CTRL_VAL6 FIELD32(0x00000040)
+#define GPIO_CTRL_VAL7 FIELD32(0x00000080)
+#define GPIO_CTRL_DIR0 FIELD32(0x00000100)
+#define GPIO_CTRL_DIR1 FIELD32(0x00000200)
+#define GPIO_CTRL_DIR2 FIELD32(0x00000400)
+#define GPIO_CTRL_DIR3 FIELD32(0x00000800)
+#define GPIO_CTRL_DIR4 FIELD32(0x00001000)
+#define GPIO_CTRL_DIR5 FIELD32(0x00002000)
+#define GPIO_CTRL_DIR6 FIELD32(0x00004000)
+#define GPIO_CTRL_DIR7 FIELD32(0x00008000)
+#define GPIO_CTRL_VAL8 FIELD32(0x00010000)
+#define GPIO_CTRL_VAL9 FIELD32(0x00020000)
+#define GPIO_CTRL_VAL10 FIELD32(0x00040000)
+#define GPIO_CTRL_DIR8 FIELD32(0x01000000)
+#define GPIO_CTRL_DIR9 FIELD32(0x02000000)
+#define GPIO_CTRL_DIR10 FIELD32(0x04000000)
/*
* MCU_CMD_CFG
@@ -1936,6 +1943,11 @@ struct mac_iveiv_entry {
#define BBP47_TSSI_ADC6 FIELD8(0x80)
/*
+ * BBP 49
+ */
+#define BBP49_UPDATE_FLAG FIELD8(0x01)
+
+/*
* BBP 109
*/
#define BBP109_TX0_POWER FIELD8(0x0f)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b93516d832fb..540c94f8505a 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -923,8 +923,8 @@ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
} else {
- rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
- return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ return rt2x00_get_field32(reg, GPIO_CTRL_VAL2);
}
}
EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
@@ -1570,10 +1570,10 @@ static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
eesk_pin, 0);
- rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3);
- rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, gpio_bit3);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
}
void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
@@ -1615,6 +1615,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
case 1:
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3390)) {
rt2x00_eeprom_read(rt2x00dev,
EEPROM_NIC_CONF1, &eeprom);
@@ -1762,36 +1763,15 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
+ rt2x00dev->default_ant.rx_chain_num <= 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD,
+ rt2x00dev->default_ant.rx_chain_num <= 2);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
- if (rt2x00_rt(rt2x00dev, RT3390)) {
- rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
- rt2x00dev->default_ant.rx_chain_num == 1);
- rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
- rt2x00dev->default_ant.tx_chain_num == 1);
- } else {
- rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
- rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
- rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
- rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
-
- switch (rt2x00dev->default_ant.tx_chain_num) {
- case 1:
- rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
- /* fall through */
- case 2:
- rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
- break;
- }
-
- switch (rt2x00dev->default_ant.rx_chain_num) {
- case 1:
- rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
- /* fall through */
- case 2:
- rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
- break;
- }
- }
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
+ rt2x00dev->default_ant.tx_chain_num <= 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD,
+ rt2x00dev->default_ant.tx_chain_num <= 2);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
@@ -1995,13 +1975,13 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 29, 0x9f);
}
- rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT7, 0);
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
if (rf->channel <= 14)
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 1);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
else
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 0);
- rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 0);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
@@ -2053,6 +2033,60 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
}
}
+static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr;
+
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+
+ if (info->default_power1 > POWER_BOUND)
+ rt2800_rfcsr_write(rt2x00dev, 47, POWER_BOUND);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 47, info->default_power1);
+
+ if (info->default_power2 > POWER_BOUND)
+ rt2800_rfcsr_write(rt2x00dev, 48, POWER_BOUND);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+
+ if ( rt2x00dev->default_ant.tx_chain_num == 2 )
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+
+ if ( rt2x00dev->default_ant.rx_chain_num == 2 )
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 31, 80);
+}
+
static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -2182,6 +2216,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3290:
rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
break;
+ case RF3322:
+ rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
+ break;
case RF5360:
case RF5370:
case RF5372:
@@ -2194,6 +2231,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
}
if (rt2x00_rf(rt2x00dev, RF3290) ||
+ rt2x00_rf(rt2x00dev, RF3322) ||
rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5370) ||
rt2x00_rf(rt2x00dev, RF5372) ||
@@ -2212,10 +2250,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* Change BBP settings
*/
- rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
- rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
- rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
- rt2800_bbp_write(rt2x00dev, 86, 0);
+ if (rt2x00_rt(rt2x00dev, RT3352)) {
+ rt2800_bbp_write(rt2x00dev, 27, 0x0);
+ rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 27, 0x20);
+ rt2800_bbp_write(rt2x00dev, 62, 0x26 + rt2x00dev->lna_gain);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 86, 0);
+ }
if (rf->channel <= 14) {
if (!rt2x00_rt(rt2x00dev, RT5390) &&
@@ -2310,6 +2355,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_register_read(rt2x00dev, CH_IDLE_STA, &reg);
rt2800_register_read(rt2x00dev, CH_BUSY_STA, &reg);
rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
+
+ /*
+ * Clear update flag
+ */
+ if (rt2x00_rt(rt2x00dev, RT3352)) {
+ rt2800_bbp_read(rt2x00dev, 49, &bbp);
+ rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0);
+ rt2800_bbp_write(rt2x00dev, 49, bbp);
+ }
}
static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
@@ -2821,23 +2875,32 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
+ u8 vgc;
+
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
- return 0x1c + (2 * rt2x00dev->lna_gain);
+ vgc = 0x1c + (2 * rt2x00dev->lna_gain);
else
- return 0x2e + rt2x00dev->lna_gain;
+ vgc = 0x2e + rt2x00dev->lna_gain;
+ } else { /* 5GHZ band */
+ if (rt2x00_rt(rt2x00dev, RT3572))
+ vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+ else {
+ if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+ vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3;
+ else
+ vgc = 0x3a + (rt2x00dev->lna_gain * 5) / 3;
+ }
}
- if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
- return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
- else
- return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
+ return vgc;
}
static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
@@ -2998,11 +3061,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
+ } else if (rt2x00_rt(rt2x00dev, RT3352)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
} else if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3378,6 +3445,11 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
+ if (rt2x00_rt(rt2x00dev, RT3352)) {
+ rt2800_bbp_write(rt2x00dev, 3, 0x00);
+ rt2800_bbp_write(rt2x00dev, 4, 0x50);
+ }
+
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3388,15 +3460,20 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
if (rt2800_is_305x_soc(rt2x00dev) ||
rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 31, 0x08);
+ if (rt2x00_rt(rt2x00dev, RT3352))
+ rt2800_bbp_write(rt2x00dev, 47, 0x48);
+
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
@@ -3405,6 +3482,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
} else if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
@@ -3436,15 +3514,17 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
} else if (rt2800_is_305x_soc(rt2x00dev)) {
rt2800_bbp_write(rt2x00dev, 78, 0x0e);
rt2800_bbp_write(rt2x00dev, 80, 0x08);
- } else {
- rt2800_bbp_write(rt2x00dev, 81, 0x37);
- }
-
- if (rt2x00_rt(rt2x00dev, RT3290)) {
+ } else if (rt2x00_rt(rt2x00dev, RT3290)) {
rt2800_bbp_write(rt2x00dev, 74, 0x0b);
rt2800_bbp_write(rt2x00dev, 79, 0x18);
rt2800_bbp_write(rt2x00dev, 80, 0x09);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
+ } else if (rt2x00_rt(rt2x00dev, RT3352)) {
+ rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+ rt2800_bbp_write(rt2x00dev, 80, 0x08);
+ rt2800_bbp_write(rt2x00dev, 81, 0x37);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 81, 0x37);
}
rt2800_bbp_write(rt2x00dev, 82, 0x62);
@@ -3465,18 +3545,21 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 84, 0x99);
if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 86, 0x38);
else
rt2800_bbp_write(rt2x00dev, 86, 0x00);
- if (rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3352) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 88, 0x90);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 92, 0x02);
@@ -3493,6 +3576,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
@@ -3502,6 +3586,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0x00);
if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 104, 0x92);
@@ -3510,6 +3595,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 105, 0x01);
else if (rt2x00_rt(rt2x00dev, RT3290))
rt2800_bbp_write(rt2x00dev, 105, 0x1c);
+ else if (rt2x00_rt(rt2x00dev, RT3352))
+ rt2800_bbp_write(rt2x00dev, 105, 0x34);
else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 105, 0x3c);
@@ -3519,11 +3606,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 106, 0x03);
+ else if (rt2x00_rt(rt2x00dev, RT3352))
+ rt2800_bbp_write(rt2x00dev, 106, 0x05);
else if (rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 106, 0x12);
else
rt2800_bbp_write(rt2x00dev, 106, 0x35);
+ if (rt2x00_rt(rt2x00dev, RT3352))
+ rt2800_bbp_write(rt2x00dev, 120, 0x50);
+
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
@@ -3534,6 +3626,9 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 135, 0xf6);
}
+ if (rt2x00_rt(rt2x00dev, RT3352))
+ rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
@@ -3574,6 +3669,28 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 3, value);
}
+ if (rt2x00_rt(rt2x00dev, RT3352)) {
+ rt2800_bbp_write(rt2x00dev, 163, 0xbd);
+ /* Set ITxBF timeout to 0x9c40=1000msec */
+ rt2800_bbp_write(rt2x00dev, 179, 0x02);
+ rt2800_bbp_write(rt2x00dev, 180, 0x00);
+ rt2800_bbp_write(rt2x00dev, 182, 0x40);
+ rt2800_bbp_write(rt2x00dev, 180, 0x01);
+ rt2800_bbp_write(rt2x00dev, 182, 0x9c);
+ rt2800_bbp_write(rt2x00dev, 179, 0x00);
+ /* Reprogram the inband interface to put right values in RXWI */
+ rt2800_bbp_write(rt2x00dev, 142, 0x04);
+ rt2800_bbp_write(rt2x00dev, 143, 0x3b);
+ rt2800_bbp_write(rt2x00dev, 142, 0x06);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa0);
+ rt2800_bbp_write(rt2x00dev, 142, 0x07);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa1);
+ rt2800_bbp_write(rt2x00dev, 142, 0x08);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa2);
+
+ rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+ }
+
if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
int ant, div_mode;
@@ -3587,16 +3704,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
u32 reg;
- rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
if (ant == 0)
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
else if (ant == 1)
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
- rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
}
/* This chip has hardware antenna diversity*/
@@ -3707,6 +3824,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3290) &&
+ !rt2x00_rt(rt2x00dev, RT3352) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
@@ -3903,6 +4021,70 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
return 0;
+ } else if (rt2x00_rt(rt2x00dev, RT3352)) {
+ rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x18);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x33);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd2);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x1c);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x5a);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0xdd);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
@@ -4104,6 +4286,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572)) {
drv_data->calibration_bw20 =
@@ -4392,7 +4575,7 @@ void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
-int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u16 word;
@@ -4400,6 +4583,11 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
u8 default_lna_gain;
/*
+ * Read the EEPROM.
+ */
+ rt2800_read_eeprom(rt2x00dev);
+
+ /*
* Start validation of the data that has been read.
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
@@ -4521,9 +4709,8 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
return 0;
}
-EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
-int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 value;
@@ -4562,6 +4749,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RT3071:
case RT3090:
case RT3290:
+ case RT3352:
case RT3390:
case RT3572:
case RT5390:
@@ -4584,6 +4772,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3052:
case RF3290:
case RF3320:
+ case RF3322:
case RF5360:
case RF5370:
case RF5372:
@@ -4608,6 +4797,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3390)) {
value = rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY);
@@ -4681,7 +4871,6 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
return 0;
}
-EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
/*
* RF value list for rt28xx
@@ -4824,7 +5013,7 @@ static const struct rf_channel rf_vals_3x[] = {
{173, 0x61, 0, 9},
};
-int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
@@ -4901,6 +5090,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF3022) ||
rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3320) ||
+ rt2x00_rf(rt2x00dev, RF3322) ||
rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5370) ||
rt2x00_rf(rt2x00dev, RF5372) ||
@@ -5000,7 +5190,72 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
return 0;
}
-EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode);
+
+int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev)
+{
+ int retval;
+ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+ */
+ retval = rt2800_validate_eeprom(rt2x00dev);
+ if (retval)
+ return retval;
+
+ retval = rt2800_init_eeprom(rt2x00dev);
+ if (retval)
+ return retval;
+
+ /*
+ * Enable rfkill polling by setting GPIO direction of the
+ * rfkill switch GPIO pin correctly.
+ */
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR2, 1);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+
+ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2800_probe_hw_mode(rt2x00dev);
+ if (retval)
+ return retval;
+
+ /*
+ * Set device capabilities.
+ */
+ __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
+ __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
+ if (!rt2x00_is_usb(rt2x00dev))
+ __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
+
+ /*
+ * Set device requirements.
+ */
+ if (!rt2x00_is_soc(rt2x00dev))
+ __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
+ __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
+ __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
+ if (!rt2800_hwcrypt_disabled(rt2x00dev))
+ __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
+ __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
+ __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
+ if (rt2x00_is_usb(rt2x00dev))
+ __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
+ else {
+ __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
+ __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
+ }
+
+ /*
+ * Set the rssi offset.
+ */
+ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800_probe_hw);
/*
* IEEE80211 stack callback functions.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 18a0b67b4c68..a128ceadcb3e 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -43,6 +43,9 @@ struct rt2800_ops {
const unsigned int offset,
const struct rt2x00_field32 field, u32 *reg);
+ void (*read_eeprom)(struct rt2x00_dev *rt2x00dev);
+ bool (*hwcrypt_disabled)(struct rt2x00_dev *rt2x00dev);
+
int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len);
int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
@@ -114,6 +117,20 @@ static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
}
+static inline void rt2800_read_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+ const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
+
+ rt2800ops->read_eeprom(rt2x00dev);
+}
+
+static inline bool rt2800_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+ const struct rt2800_ops *rt2800ops = rt2x00dev->ops->drv;
+
+ return rt2800ops->hwcrypt_disabled(rt2x00dev);
+}
+
static inline int rt2800_drv_write_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len)
{
@@ -191,9 +208,8 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
-int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev);
-int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev);
-int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev);
+
+int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
u16 *iv16);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 4765bbd654cd..27829e1e2e38 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -54,6 +54,11 @@ static bool modparam_nohwcrypt = false;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+static bool rt2800pci_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+ return modparam_nohwcrypt;
+}
+
static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
{
unsigned int i;
@@ -965,85 +970,14 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
/*
* Device probe functions.
*/
-static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+static void rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
- /*
- * Read EEPROM into buffer
- */
if (rt2x00_is_soc(rt2x00dev))
rt2800pci_read_eeprom_soc(rt2x00dev);
else if (rt2800pci_efuse_detect(rt2x00dev))
rt2800pci_read_eeprom_efuse(rt2x00dev);
else
rt2800pci_read_eeprom_pci(rt2x00dev);
-
- return rt2800_validate_eeprom(rt2x00dev);
-}
-
-static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
-{
- int retval;
- u32 reg;
-
- /*
- * Allocate eeprom data.
- */
- retval = rt2800pci_validate_eeprom(rt2x00dev);
- if (retval)
- return retval;
-
- retval = rt2800_init_eeprom(rt2x00dev);
- if (retval)
- return retval;
-
- /*
- * Enable rfkill polling by setting GPIO direction of the
- * rfkill switch GPIO pin correctly.
- */
- rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
- rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
-
- /*
- * Initialize hw specifications.
- */
- retval = rt2800_probe_hw_mode(rt2x00dev);
- if (retval)
- return retval;
-
- /*
- * This device has multiple filters for control frames
- * and has a separate filter for PS Poll frames.
- */
- __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
- __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
-
- /*
- * This device has a pre tbtt interrupt and thus fetches
- * a new beacon directly prior to transmission.
- */
- __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags);
-
- /*
- * This device requires firmware.
- */
- if (!rt2x00_is_soc(rt2x00dev))
- __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
- __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags);
- __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
- __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
- __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
- if (!modparam_nohwcrypt)
- __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
- __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
- __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
-
- /*
- * Set the rssi offset.
- */
- rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
-
- return 0;
}
static const struct ieee80211_ops rt2800pci_mac80211_ops = {
@@ -1081,6 +1015,8 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
.register_multiread = rt2x00pci_register_multiread,
.register_multiwrite = rt2x00pci_register_multiwrite,
.regbusy_read = rt2x00pci_regbusy_read,
+ .read_eeprom = rt2800pci_read_eeprom,
+ .hwcrypt_disabled = rt2800pci_hwcrypt_disabled,
.drv_write_firmware = rt2800pci_write_firmware,
.drv_init_registers = rt2800pci_init_registers,
.drv_get_txwi = rt2800pci_get_txwi,
@@ -1093,7 +1029,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.tbtt_tasklet = rt2800pci_tbtt_tasklet,
.rxdone_tasklet = rt2800pci_rxdone_tasklet,
.autowake_tasklet = rt2800pci_autowake_tasklet,
- .probe_hw = rt2800pci_probe_hw,
+ .probe_hw = rt2800_probe_hw,
.get_firmware_name = rt2800pci_get_firmware_name,
.check_firmware = rt2800_check_firmware,
.load_firmware = rt2800_load_firmware,
@@ -1152,7 +1088,6 @@ static const struct data_queue_desc rt2800pci_queue_bcn = {
static const struct rt2x00_ops rt2800pci_ops = {
.name = KBUILD_MODNAME,
.drv_data_size = sizeof(struct rt2800_drv_data),
- .max_sta_intf = 1,
.max_ap_intf = 8,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 6b4226b71618..c9e9370eb789 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -49,6 +49,11 @@ static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+static bool rt2800usb_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+ return modparam_nohwcrypt;
+}
+
/*
* Queue handlers.
*/
@@ -730,73 +735,27 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
/*
* Device probe functions.
*/
-static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+static void rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
{
if (rt2800_efuse_detect(rt2x00dev))
rt2800_read_eeprom_efuse(rt2x00dev);
else
rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
EEPROM_SIZE);
-
- return rt2800_validate_eeprom(rt2x00dev);
}
static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
- u32 reg;
- /*
- * Allocate eeprom data.
- */
- retval = rt2800usb_validate_eeprom(rt2x00dev);
+ retval = rt2800_probe_hw(rt2x00dev);
if (retval)
return retval;
- retval = rt2800_init_eeprom(rt2x00dev);
- if (retval)
- return retval;
-
- /*
- * Enable rfkill polling by setting GPIO direction of the
- * rfkill switch GPIO pin correctly.
- */
- rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
- rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
- rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
-
- /*
- * Initialize hw specifications.
- */
- retval = rt2800_probe_hw_mode(rt2x00dev);
- if (retval)
- return retval;
-
- /*
- * This device has multiple filters for control frames
- * and has a separate filter for PS Poll frames.
- */
- __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags);
- __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags);
-
- /*
- * This device requires firmware.
- */
- __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
- __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags);
- if (!modparam_nohwcrypt)
- __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags);
- __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags);
- __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags);
- __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags);
- __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags);
-
- rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout,
-
/*
- * Set the rssi offset.
+ * Set txstatus timer function.
*/
- rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
+ rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
/*
* Overwrite TX done handler
@@ -842,6 +801,8 @@ static const struct rt2800_ops rt2800usb_rt2800_ops = {
.register_multiread = rt2x00usb_register_multiread,
.register_multiwrite = rt2x00usb_register_multiwrite,
.regbusy_read = rt2x00usb_regbusy_read,
+ .read_eeprom = rt2800usb_read_eeprom,
+ .hwcrypt_disabled = rt2800usb_hwcrypt_disabled,
.drv_write_firmware = rt2800usb_write_firmware,
.drv_init_registers = rt2800usb_init_registers,
.drv_get_txwi = rt2800usb_get_txwi,
@@ -909,7 +870,6 @@ static const struct data_queue_desc rt2800usb_queue_bcn = {
static const struct rt2x00_ops rt2800usb_ops = {
.name = KBUILD_MODNAME,
.drv_data_size = sizeof(struct rt2800_drv_data),
- .max_sta_intf = 1,
.max_ap_intf = 8,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8afb546c2b2d..0751b35ef6dc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -188,6 +188,7 @@ struct rt2x00_chip {
#define RT3071 0x3071
#define RT3090 0x3090 /* 2.4GHz PCIe */
#define RT3290 0x3290
+#define RT3352 0x3352 /* WSOC */
#define RT3390 0x3390
#define RT3572 0x3572
#define RT3593 0x3593
@@ -655,7 +656,6 @@ struct rt2x00lib_ops {
struct rt2x00_ops {
const char *name;
const unsigned int drv_data_size;
- const unsigned int max_sta_intf;
const unsigned int max_ap_intf;
const unsigned int eeprom_size;
const unsigned int rf_size;
@@ -741,6 +741,14 @@ enum rt2x00_capability_flags {
};
/*
+ * Interface combinations
+ */
+enum {
+ IF_COMB_AP = 0,
+ NUM_IF_COMB,
+};
+
+/*
* rt2x00 device structure.
*/
struct rt2x00_dev {
@@ -867,6 +875,12 @@ struct rt2x00_dev {
unsigned int intf_beaconing;
/*
+ * Interface combinations
+ */
+ struct ieee80211_iface_limit if_limits_ap;
+ struct ieee80211_iface_combination if_combinations[NUM_IF_COMB];
+
+ /*
* Link quality
*/
struct link link;
@@ -1287,7 +1301,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
/*
* mac80211 handlers.
*/
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 3f07e36f462b..69097d1faeb6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -194,7 +194,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac,
*/
skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
while (skb) {
- rt2x00mac_tx(rt2x00dev->hw, skb);
+ rt2x00mac_tx(rt2x00dev->hw, NULL, skb);
skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif);
}
}
@@ -1118,6 +1118,34 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
rt2x00dev->intf_associated = 0;
}
+static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_iface_limit *if_limit;
+ struct ieee80211_iface_combination *if_combination;
+
+ /*
+ * Build up AP interface limits structure.
+ */
+ if_limit = &rt2x00dev->if_limits_ap;
+ if_limit->max = rt2x00dev->ops->max_ap_intf;
+ if_limit->types = BIT(NL80211_IFTYPE_AP);
+
+ /*
+ * Build up AP interface combinations structure.
+ */
+ if_combination = &rt2x00dev->if_combinations[IF_COMB_AP];
+ if_combination->limits = if_limit;
+ if_combination->n_limits = 1;
+ if_combination->max_interfaces = if_limit->max;
+ if_combination->num_different_channels = 1;
+
+ /*
+ * Finally, specify the possible combinations to mac80211.
+ */
+ rt2x00dev->hw->wiphy->iface_combinations = rt2x00dev->if_combinations;
+ rt2x00dev->hw->wiphy->n_iface_combinations = 1;
+}
+
/*
* driver allocation handlers.
*/
@@ -1126,6 +1154,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
int retval = -ENOMEM;
/*
+ * Set possible interface combinations.
+ */
+ rt2x00lib_set_if_combinations(rt2x00dev);
+
+ /*
* Allocate the driver data memory, if necessary.
*/
if (rt2x00dev->ops->drv_data_size > 0) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 4ff26c2159bf..98a9e48f8e4a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -99,7 +99,9 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
return retval;
}
-void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void rt2x00mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -212,46 +214,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
return -ENODEV;
- switch (vif->type) {
- case NL80211_IFTYPE_AP:
- /*
- * We don't support mixed combinations of
- * sta and ap interfaces.
- */
- if (rt2x00dev->intf_sta_count)
- return -ENOBUFS;
-
- /*
- * Check if we exceeded the maximum amount
- * of supported interfaces.
- */
- if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf)
- return -ENOBUFS;
-
- break;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_WDS:
- /*
- * We don't support mixed combinations of
- * sta and ap interfaces.
- */
- if (rt2x00dev->intf_ap_count)
- return -ENOBUFS;
-
- /*
- * Check if we exceeded the maximum amount
- * of supported interfaces.
- */
- if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)
- return -ENOBUFS;
-
- break;
- default:
- return -EINVAL;
- }
-
/*
* Loop through all beacon queues to find a free
* entry. Since there are as much beacon entries
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index f7e74a0a7759..e488b944a034 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -315,6 +315,7 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct txentry_desc *txdesc,
+ struct ieee80211_sta *sta,
const struct rt2x00_rate *hwrate)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -322,11 +323,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rt2x00_sta *sta_priv = NULL;
- if (tx_info->control.sta) {
+ if (sta) {
txdesc->u.ht.mpdu_density =
- tx_info->control.sta->ht_cap.ampdu_density;
+ sta->ht_cap.ampdu_density;
- sta_priv = sta_to_rt2x00_sta(tx_info->control.sta);
+ sta_priv = sta_to_rt2x00_sta(sta);
txdesc->u.ht.wcid = sta_priv->wcid;
}
@@ -341,8 +342,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
* MIMO PS should be set to 1 for STA's using dynamic SM PS
* when using more then one tx stream (>MCS7).
*/
- if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
- ((tx_info->control.sta->ht_cap.cap &
+ if (sta && txdesc->u.ht.mcs > 7 &&
+ ((sta->ht_cap.cap &
IEEE80211_HT_CAP_SM_PS) >>
IEEE80211_HT_CAP_SM_PS_SHIFT) ==
WLAN_HT_CAP_SM_PS_DYNAMIC)
@@ -409,7 +410,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
- struct txentry_desc *txdesc)
+ struct txentry_desc *txdesc,
+ struct ieee80211_sta *sta)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -503,7 +505,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
- hwrate);
+ sta, hwrate);
else
rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
hwrate);
@@ -595,7 +597,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
* after that we are free to use the skb->cb array
* for our information.
*/
- rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
+ rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
/*
* All information is retrieved from the skb->cb array,
@@ -740,7 +742,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
* after that we are free to use the skb->cb array
* for our information.
*/
- rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
+ rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
/*
* Fill in skb descriptor
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index b8ec96163922..d6582a2fa353 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -243,7 +243,7 @@ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
u32 reg;
rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
- return rt2x00_get_field32(reg, MAC_CSR13_BIT5);
+ return rt2x00_get_field32(reg, MAC_CSR13_VAL5);
}
#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -715,11 +715,11 @@ static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
- rt2x00_set_field32(&reg, MAC_CSR13_BIT4, p1);
- rt2x00_set_field32(&reg, MAC_CSR13_BIT12, 0);
+ rt2x00_set_field32(&reg, MAC_CSR13_DIR4, 0);
+ rt2x00_set_field32(&reg, MAC_CSR13_VAL4, p1);
- rt2x00_set_field32(&reg, MAC_CSR13_BIT3, !p2);
- rt2x00_set_field32(&reg, MAC_CSR13_BIT11, 0);
+ rt2x00_set_field32(&reg, MAC_CSR13_DIR3, 0);
+ rt2x00_set_field32(&reg, MAC_CSR13_VAL3, !p2);
rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
}
@@ -2855,7 +2855,7 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
* rfkill switch GPIO pin correctly.
*/
rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
- rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1);
+ rt2x00_set_field32(&reg, MAC_CSR13_DIR5, 1);
rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
/*
@@ -3045,7 +3045,6 @@ static const struct data_queue_desc rt61pci_queue_bcn = {
static const struct rt2x00_ops rt61pci_ops = {
.name = KBUILD_MODNAME,
- .max_sta_intf = 1,
.max_ap_intf = 4,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 8f3da5a56766..9bc6b6044e34 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -357,22 +357,22 @@ struct hw_pairwise_ta_entry {
/*
* MAC_CSR13: GPIO.
+ * MAC_CSR13_VALx: GPIO value
+ * MAC_CSR13_DIRx: GPIO direction: 0 = output; 1 = input
*/
#define MAC_CSR13 0x3034
-#define MAC_CSR13_BIT0 FIELD32(0x00000001)
-#define MAC_CSR13_BIT1 FIELD32(0x00000002)
-#define MAC_CSR13_BIT2 FIELD32(0x00000004)
-#define MAC_CSR13_BIT3 FIELD32(0x00000008)
-#define MAC_CSR13_BIT4 FIELD32(0x00000010)
-#define MAC_CSR13_BIT5 FIELD32(0x00000020)
-#define MAC_CSR13_BIT6 FIELD32(0x00000040)
-#define MAC_CSR13_BIT7 FIELD32(0x00000080)
-#define MAC_CSR13_BIT8 FIELD32(0x00000100)
-#define MAC_CSR13_BIT9 FIELD32(0x00000200)
-#define MAC_CSR13_BIT10 FIELD32(0x00000400)
-#define MAC_CSR13_BIT11 FIELD32(0x00000800)
-#define MAC_CSR13_BIT12 FIELD32(0x00001000)
-#define MAC_CSR13_BIT13 FIELD32(0x00002000)
+#define MAC_CSR13_VAL0 FIELD32(0x00000001)
+#define MAC_CSR13_VAL1 FIELD32(0x00000002)
+#define MAC_CSR13_VAL2 FIELD32(0x00000004)
+#define MAC_CSR13_VAL3 FIELD32(0x00000008)
+#define MAC_CSR13_VAL4 FIELD32(0x00000010)
+#define MAC_CSR13_VAL5 FIELD32(0x00000020)
+#define MAC_CSR13_DIR0 FIELD32(0x00000100)
+#define MAC_CSR13_DIR1 FIELD32(0x00000200)
+#define MAC_CSR13_DIR2 FIELD32(0x00000400)
+#define MAC_CSR13_DIR3 FIELD32(0x00000800)
+#define MAC_CSR13_DIR4 FIELD32(0x00001000)
+#define MAC_CSR13_DIR5 FIELD32(0x00002000)
/*
* MAC_CSR14: LED control register.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 248436c13ce0..e5eb43b3eee7 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -189,7 +189,7 @@ static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
u32 reg;
rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
- return rt2x00_get_field32(reg, MAC_CSR13_BIT7);
+ return rt2x00_get_field32(reg, MAC_CSR13_VAL7);
}
#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -2195,7 +2195,7 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
* rfkill switch GPIO pin correctly.
*/
rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
- rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0);
+ rt2x00_set_field32(&reg, MAC_CSR13_DIR7, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
/*
@@ -2382,7 +2382,6 @@ static const struct data_queue_desc rt73usb_queue_bcn = {
static const struct rt2x00_ops rt73usb_ops = {
.name = KBUILD_MODNAME,
- .max_sta_intf = 1,
.max_ap_intf = 4,
.eeprom_size = EEPROM_SIZE,
.rf_size = RF_SIZE,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index df1cc116b83b..7577e0ba3877 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -267,24 +267,26 @@ struct hw_pairwise_ta_entry {
/*
* MAC_CSR13: GPIO.
+ * MAC_CSR13_VALx: GPIO value
+ * MAC_CSR13_DIRx: GPIO direction: 0 = input; 1 = output
*/
#define MAC_CSR13 0x3034
-#define MAC_CSR13_BIT0 FIELD32(0x00000001)
-#define MAC_CSR13_BIT1 FIELD32(0x00000002)
-#define MAC_CSR13_BIT2 FIELD32(0x00000004)
-#define MAC_CSR13_BIT3 FIELD32(0x00000008)
-#define MAC_CSR13_BIT4 FIELD32(0x00000010)
-#define MAC_CSR13_BIT5 FIELD32(0x00000020)
-#define MAC_CSR13_BIT6 FIELD32(0x00000040)
-#define MAC_CSR13_BIT7 FIELD32(0x00000080)
-#define MAC_CSR13_BIT8 FIELD32(0x00000100)
-#define MAC_CSR13_BIT9 FIELD32(0x00000200)
-#define MAC_CSR13_BIT10 FIELD32(0x00000400)
-#define MAC_CSR13_BIT11 FIELD32(0x00000800)
-#define MAC_CSR13_BIT12 FIELD32(0x00001000)
-#define MAC_CSR13_BIT13 FIELD32(0x00002000)
-#define MAC_CSR13_BIT14 FIELD32(0x00004000)
-#define MAC_CSR13_BIT15 FIELD32(0x00008000)
+#define MAC_CSR13_VAL0 FIELD32(0x00000001)
+#define MAC_CSR13_VAL1 FIELD32(0x00000002)
+#define MAC_CSR13_VAL2 FIELD32(0x00000004)
+#define MAC_CSR13_VAL3 FIELD32(0x00000008)
+#define MAC_CSR13_VAL4 FIELD32(0x00000010)
+#define MAC_CSR13_VAL5 FIELD32(0x00000020)
+#define MAC_CSR13_VAL6 FIELD32(0x00000040)
+#define MAC_CSR13_VAL7 FIELD32(0x00000080)
+#define MAC_CSR13_DIR0 FIELD32(0x00000100)
+#define MAC_CSR13_DIR1 FIELD32(0x00000200)
+#define MAC_CSR13_DIR2 FIELD32(0x00000400)
+#define MAC_CSR13_DIR3 FIELD32(0x00000800)
+#define MAC_CSR13_DIR4 FIELD32(0x00001000)
+#define MAC_CSR13_DIR5 FIELD32(0x00002000)
+#define MAC_CSR13_DIR6 FIELD32(0x00004000)
+#define MAC_CSR13_DIR7 FIELD32(0x00008000)
/*
* MAC_CSR14: LED control register.
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index aceaf689f737..021d83e1b1d3 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -244,7 +244,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8180_tx(struct ieee80211_hw *dev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -710,7 +712,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
/* TODO: use actual beacon queue */
skb_set_queue_mapping(skb, 0);
- rtl8180_tx(dev, skb);
+ rtl8180_tx(dev, NULL, skb);
resched:
/*
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 533024095c43..7811b6315973 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -228,7 +228,9 @@ static void rtl8187_tx_cb(struct urb *urb)
}
}
-static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8187_tx(struct ieee80211_hw *dev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct rtl8187_priv *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1076,7 +1078,7 @@ static void rtl8187_beacon_work(struct work_struct *work)
/* TODO: use actual beacon queue */
skb_set_queue_mapping(skb, 0);
- rtl8187_tx(dev, skb);
+ rtl8187_tx(dev, NULL, skb);
resched:
/*
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index cefac6a43601..6b28e92d1d21 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,6 +1,6 @@
config RTL8192CE
tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
- depends on MAC80211 && PCI && EXPERIMENTAL
+ depends on MAC80211 && PCI
select FW_LOADER
select RTLWIFI
select RTL8192C_COMMON
@@ -12,7 +12,7 @@ config RTL8192CE
config RTL8192SE
tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
- depends on MAC80211 && EXPERIMENTAL && PCI
+ depends on MAC80211 && PCI
select FW_LOADER
select RTLWIFI
---help---
@@ -23,7 +23,7 @@ config RTL8192SE
config RTL8192DE
tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
- depends on MAC80211 && EXPERIMENTAL && PCI
+ depends on MAC80211 && PCI
select FW_LOADER
select RTLWIFI
---help---
@@ -34,7 +34,7 @@ config RTL8192DE
config RTL8192CU
tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
- depends on MAC80211 && USB && EXPERIMENTAL
+ depends on MAC80211 && USB
select FW_LOADER
select RTLWIFI
select RTL8192C_COMMON
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 942e56b77b60..59381fe8ed06 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1341,9 +1341,8 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
info->control.rates[0].idx = 0;
- info->control.sta = sta;
info->band = hw->conf.channel->band;
- rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+ rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc);
}
err_free:
return 0;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index a18ad2a98938..a7c0e52869ba 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -124,7 +124,9 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
mutex_unlock(&rtlpriv->locks.conf_mutex);
}
-static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void rtl_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -138,8 +140,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
goto err_free;
- if (!rtlpriv->intf_ops->waitq_insert(hw, skb))
- rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+ if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb))
+ rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc);
return;
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 80f75d3ba84a..abc306b502ac 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -372,13 +372,11 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
u8 tmp;
- int pos;
- u8 linkctrl_reg;
+ u16 linkctrl_reg;
/*Link Control Register */
- pos = pci_pcie_cap(pdev);
- pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
- pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
+ pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
pcipriv->ndis_adapter.linkctrl_reg);
@@ -504,7 +502,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
_rtl_update_earlymode_info(hw, skb,
&tcb_desc, tid);
- rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc);
+ rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
}
}
}
@@ -929,7 +927,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
info = IEEE80211_SKB_CB(pskb);
pdesc = &ring->desc[0];
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
- info, pskb, BEACON_QUEUE, &tcb_desc);
+ info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
__skb_queue_tail(&ring->queue, pskb);
@@ -1305,11 +1303,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
}
static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
struct rtl_sta_info *sta_entry = NULL;
u8 tid = rtl_get_tid(skb);
@@ -1337,13 +1334,14 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
return true;
}
-static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct rtl_tcb_desc *ptcb_desc)
+static int rtl_pci_tx(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_sta_info *sta_entry = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
u8 idx;
@@ -1418,7 +1416,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
- info, skb, hw_queue, ptcb_desc);
+ info, sta, skb, hw_queue, ptcb_desc);
__skb_queue_tail(&ring->queue, skb);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index a45afda8259c..1ca4e25c143b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -167,7 +167,7 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
dm_digtable->cur_igvalue = 0x20;
dm_digtable->pre_igvalue = 0x0;
- dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+ dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -190,7 +190,7 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
long rssi_val_min = 0;
if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
- (dm_digtable->cursta_connectctate == DIG_STA_CONNECT)) {
+ (dm_digtable->cursta_connectstate == DIG_STA_CONNECT)) {
if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
rssi_val_min =
(rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
@@ -199,8 +199,8 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
else
rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
- } else if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT ||
- dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
+ } else if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT ||
+ dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT) {
rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
} else if (dm_digtable->curmultista_connectstate ==
DIG_MULTISTA_CONNECT) {
@@ -334,7 +334,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
multi_sta = true;
if (!multi_sta ||
- dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
+ dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
initialized = false;
dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
return;
@@ -378,15 +378,15 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "presta_connectstate = %x, cursta_connectctate = %x\n",
+ "presta_connectstate = %x, cursta_connectstate = %x\n",
dm_digtable->presta_connectstate,
- dm_digtable->cursta_connectctate);
+ dm_digtable->cursta_connectstate);
- if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectctate
- || dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT
- || dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+ if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectstate
+ || dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT
+ || dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
- if (dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
+ if (dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) {
dm_digtable->rssi_val_min =
rtl92c_dm_initial_gain_min_pwdb(hw);
rtl92c_dm_ctrl_initgain_by_rssi(hw);
@@ -407,7 +407,7 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+ if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT) {
dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
@@ -484,15 +484,15 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
return;
if (mac->link_state >= MAC80211_LINKED)
- dm_digtable->cursta_connectctate = DIG_STA_CONNECT;
+ dm_digtable->cursta_connectstate = DIG_STA_CONNECT;
else
- dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+ dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
rtl92c_dm_initial_gain_sta(hw);
rtl92c_dm_initial_gain_multi_sta(hw);
rtl92c_dm_cck_packet_detection_thresh(hw);
- dm_digtable->presta_connectstate = dm_digtable->cursta_connectctate;
+ dm_digtable->presta_connectstate = dm_digtable->cursta_connectstate;
}
@@ -1214,18 +1214,13 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
"PreState = %d, CurState = %d\n",
p_ra->pre_ratr_state, p_ra->ratr_state);
- /* Only the PCI card uses sta in the update rate table
- * callback routine */
- if (rtlhal->interface == INTF_PCI) {
- rcu_read_lock();
- sta = ieee80211_find_sta(mac->vif, mac->bssid);
- }
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
p_ra->ratr_state);
p_ra->pre_ratr_state = p_ra->ratr_state;
- if (rtlhal->interface == INTF_PCI)
- rcu_read_unlock();
+ rcu_read_unlock();
}
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 44febfde9493..883f23ae9519 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -315,7 +315,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
u16 box_reg = 0, box_extreg = 0;
u8 u1b_tmp;
bool isfw_read = false;
- bool bwrite_sucess = false;
+ bool bwrite_success = false;
u8 wait_h2c_limmit = 100;
u8 wait_writeh2c_limmit = 100;
u8 boxcontent[4], boxextcontent[2];
@@ -354,7 +354,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
}
}
- while (!bwrite_sucess) {
+ while (!bwrite_success) {
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -491,7 +491,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
break;
}
- bwrite_sucess = true;
+ bwrite_success = true;
rtlhal->last_hmeboxnum = boxnum + 1;
if (rtlhal->last_hmeboxnum == 4)
@@ -577,8 +577,7 @@ static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
ring = &rtlpci->tx_ring[BEACON_QUEUE];
pskb = __skb_dequeue(&ring->queue);
- if (pskb)
- kfree_skb(pskb);
+ kfree_skb(pskb);
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index dd4bb0950a57..86d73b32d995 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -1914,8 +1914,8 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
}
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
"ratr_bitmap :%x\n", ratr_bitmap);
- *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
- (ratr_index << 28));
+ *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
+ (ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
"Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 7d8f96405f42..ea2e1bd847c8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -344,7 +344,7 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
.maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
};
-DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
+static DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = {
{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)},
{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)},
{RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)},
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52166640f167..390d6d4fcaa0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -596,7 +596,9 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -604,7 +606,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool defaultadapter = true;
- struct ieee80211_sta *sta;
u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index c4adb9777365..a7cdd514cb2e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -713,6 +713,7 @@ struct rx_desc_92c {
void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr,
u8 *pdesc, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 2e6eb356a93e..6e66f04c363f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,12 +491,14 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
for (index = 0; index < 16; index++)
checksum = checksum ^ (*(ptr + index));
- SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
+ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
}
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
u8 queue_index,
struct rtl_tcb_desc *tcb_desc)
{
@@ -504,7 +506,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool defaultadapter = true;
- struct ieee80211_sta *sta = info->control.sta = info->control.sta;
u8 *qc = ieee80211_get_qos_ctl(hdr);
u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
u16 seq_number;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
index 332b06e78b00..725c53accc58 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -420,7 +420,9 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
struct sk_buff_head *);
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
u8 queue_index,
struct rtl_tcb_desc *tcb_desc);
void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index c0201ed69dd7..ed868c396c25 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -164,7 +164,7 @@ static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
de_digtable->cur_igvalue = 0x20;
de_digtable->pre_igvalue = 0x0;
- de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+ de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
@@ -310,7 +310,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
struct dig_t *de_digtable = &rtlpriv->dm_digtable;
unsigned long flag = 0;
- if (de_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+ if (de_digtable->cursta_connectstate == DIG_STA_CONNECT) {
if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
de_digtable->cur_cck_pd_state =
@@ -342,7 +342,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
- de_digtable->cursta_connectctate == DIG_STA_CONNECT ?
+ de_digtable->cursta_connectstate == DIG_STA_CONNECT ?
"DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
@@ -428,9 +428,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
/* Decide the current status and if modify initial gain or not */
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
- de_digtable->cursta_connectctate = DIG_STA_CONNECT;
+ de_digtable->cursta_connectstate = DIG_STA_CONNECT;
else
- de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+ de_digtable->cursta_connectstate = DIG_STA_DISCONNECT;
/* adjust initial gain according to false alarm counter */
if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index 895ae6c1f354..23177076b97f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -365,7 +365,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
u8 u1b_tmp;
bool isfw_read = false;
u8 buf_index = 0;
- bool bwrite_sucess = false;
+ bool bwrite_success = false;
u8 wait_h2c_limmit = 100;
u8 wait_writeh2c_limmit = 100;
u8 boxcontent[4], boxextcontent[2];
@@ -408,7 +408,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
break;
}
}
- while (!bwrite_sucess) {
+ while (!bwrite_success) {
wait_writeh2c_limmit--;
if (wait_writeh2c_limmit == 0) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -515,7 +515,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
"switch case not processed\n");
break;
}
- bwrite_sucess = true;
+ bwrite_success = true;
rtlhal->last_hmeboxnum = boxnum + 1;
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
@@ -570,8 +570,7 @@ static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
ring = &rtlpci->tx_ring[BEACON_QUEUE];
pskb = __skb_dequeue(&ring->queue);
- if (pskb)
- kfree_skb(pskb);
+ kfree_skb(pskb);
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pdesc = &ring->desc[idx];
/* discard output from call below */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 442031256bce..db0086062d05 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -1314,7 +1314,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
- /*----Restore RFENV control type----*/ ;
+ /*----Restore RFENV control type----*/
switch (rfpath) {
case RF90_PATH_A:
case RF90_PATH_C:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index f80690d82c11..4686f340b9d6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -551,7 +551,9 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -559,7 +561,6 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct ieee80211_sta *sta = info->control.sta;
u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 057a52431b00..c1b5dfb79d53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -730,6 +730,7 @@ struct rx_desc_92d {
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr,
u8 *pdesc, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 36d1cb3aef8a..e3cf4c02122a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -591,14 +591,15 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
- struct ieee80211_tx_info *info, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct ieee80211_sta *sta = info->control.sta;
u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
@@ -755,7 +756,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
/* DOWRD 8 */
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
@@ -785,7 +786,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
/* 92SE need not to set TX packet size when firmware download */
SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
wmb();
SET_TX_DESC_OWN(pdesc, 1);
@@ -804,7 +805,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
wmb();
SET_TX_DESC_OWN(pdesc, 1);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
index 011e7b0695f2..64dd66f287c1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h
@@ -31,6 +31,7 @@
void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
u8 *pdesc, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index aa970fc18a21..030beb45d8b0 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -120,7 +120,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
if (status < 0 && count++ < 4)
pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
- value, status, le32_to_cpu(*(u32 *)pdata));
+ value, status, *(u32 *)pdata);
return status;
}
@@ -848,8 +848,10 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
_rtl_submit_tx_urb(hw, _urb);
}
-static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
- u16 hw_queue)
+static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u16 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -891,7 +893,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
seq_number += 1;
seq_number <<= 4;
}
- rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
hw_queue, &tcb_desc);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
if (qc)
@@ -901,7 +903,9 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
}
-static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+static int rtl_usb_tx(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
struct rtl_tcb_desc *dummy)
{
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
@@ -913,7 +917,7 @@ static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
if (unlikely(is_hal_stop(rtlhal)))
goto err_free;
hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
- _rtl_usb_tx_preprocess(hw, skb, hw_queue);
+ _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue);
_rtl_usb_transmit(hw, skb, hw_queue);
return NETDEV_TX_OK;
@@ -923,6 +927,7 @@ err_free:
}
static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
struct sk_buff *skb)
{
return false;
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index cdaa21f29710..f1b6bc693b0a 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -122,7 +122,7 @@ enum rt_eeprom_type {
EEPROM_BOOT_EFUSE,
};
-enum rtl_status {
+enum ttl_status {
RTL_STATUS_INTERFACE_START = 0,
};
@@ -135,7 +135,7 @@ enum hardware_type {
HARDWARE_TYPE_RTL8192CU,
HARDWARE_TYPE_RTL8192DE,
HARDWARE_TYPE_RTL8192DU,
- HARDWARE_TYPE_RTL8723E,
+ HARDWARE_TYPE_RTL8723AE,
HARDWARE_TYPE_RTL8723U,
/* keep it last */
@@ -389,6 +389,7 @@ enum rt_enc_alg {
RSERVED_ENCRYPTION = 3,
AESCCMP_ENCRYPTION = 4,
WEP104_ENCRYPTION = 5,
+ AESCMAC_ENCRYPTION = 6, /*IEEE802.11w */
};
enum rtl_hal_state {
@@ -873,6 +874,7 @@ struct rtl_phy {
u32 adda_backup[16];
u32 iqk_mac_backup[IQK_MAC_REG_NUM];
u32 iqk_bb_backup[10];
+ bool iqk_initialized;
/* Dual mac */
bool need_iqk;
@@ -910,6 +912,8 @@ struct rtl_phy {
#define RTL_AGG_OPERATIONAL 3
#define RTL_AGG_OFF 0
#define RTL_AGG_ON 1
+#define RTL_RX_AGG_START 1
+#define RTL_RX_AGG_STOP 0
#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
#define RTL_AGG_EMPTYING_HW_QUEUE_DELBA 3
@@ -920,6 +924,7 @@ struct rtl_ht_agg {
u64 bitmap;
u32 rate_n_flags;
u8 agg_state;
+ u8 rx_agg_state;
};
struct rtl_tid_data {
@@ -927,11 +932,19 @@ struct rtl_tid_data {
struct rtl_ht_agg agg;
};
+struct rssi_sta {
+ long undecorated_smoothed_pwdb;
+};
+
struct rtl_sta_info {
+ struct list_head list;
u8 ratr_index;
u8 wireless_mode;
u8 mimo_ps;
struct rtl_tid_data tids[MAX_TID_COUNT];
+
+ /* just used for ap adhoc or mesh*/
+ struct rssi_sta rssi_stat;
} __packed;
struct rtl_priv;
@@ -1034,6 +1047,11 @@ struct rtl_mac {
struct rtl_hal {
struct ieee80211_hw *hw;
+ bool up_first_time;
+ bool first_init;
+ bool being_init_adapter;
+ bool bbrf_ready;
+
enum intf_type interface;
u16 hw_type; /*92c or 92d or 92s and so on */
u8 ic_class;
@@ -1048,6 +1066,7 @@ struct rtl_hal {
u16 fw_subversion;
bool h2c_setinprogress;
u8 last_hmeboxnum;
+ bool fw_ready;
/*Reserve page start offset except beacon in TxQ. */
u8 fw_rsvdpage_startoffset;
u8 h2c_txcmd_seq;
@@ -1083,6 +1102,8 @@ struct rtl_hal {
bool load_imrandiqk_setting_for2g;
bool disable_amsdu_8k;
+ bool master_of_dmsp;
+ bool slave_of_dmsp;
};
struct rtl_security {
@@ -1144,6 +1165,9 @@ struct rtl_dm {
bool disable_tx_int;
char ofdm_index[2];
char cck_index;
+
+ /* DMSP */
+ bool supp_phymode_switch;
};
#define EFUSE_MAX_LOGICAL_SIZE 256
@@ -1337,6 +1361,10 @@ struct rtl_stats {
};
struct rt_link_detect {
+ /* count for roaming */
+ u32 bcn_rx_inperiod;
+ u32 roam_times;
+
u32 num_tx_in4period[4];
u32 num_rx_in4period[4];
@@ -1344,6 +1372,8 @@ struct rt_link_detect {
u32 num_rx_inperiod;
bool busytraffic;
+ bool tx_busy_traffic;
+ bool rx_busy_traffic;
bool higher_busytraffic;
bool higher_busyrxtraffic;
@@ -1418,6 +1448,7 @@ struct rtl_hal_ops {
void (*fill_tx_desc) (struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc,
@@ -1454,7 +1485,12 @@ struct rtl_hal_ops {
u32 regaddr, u32 bitmask);
void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
u32 regaddr, u32 bitmask, u32 data);
+ void (*allow_all_destaddr)(struct ieee80211_hw *hw,
+ bool allow_all_da, bool write_into_reg);
void (*linked_set_reg) (struct ieee80211_hw *hw);
+ void (*check_switch_to_dmdp) (struct ieee80211_hw *hw);
+ void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
+ void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw);
bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
u8 *powerlevel);
@@ -1474,12 +1510,18 @@ struct rtl_intf_ops {
void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
int (*adapter_start) (struct ieee80211_hw *hw);
void (*adapter_stop) (struct ieee80211_hw *hw);
+ bool (*check_buddy_priv)(struct ieee80211_hw *hw,
+ struct rtl_priv **buddy_priv);
- int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb,
- struct rtl_tcb_desc *ptcb_desc);
+ int (*adapter_tx) (struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *ptcb_desc);
void (*flush)(struct ieee80211_hw *hw, bool drop);
int (*reset_trx_ring) (struct ieee80211_hw *hw);
- bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
+ bool (*waitq_insert) (struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb);
/*pci */
void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1554,11 +1596,16 @@ struct rtl_locks {
spinlock_t h2c_lock;
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
+ spinlock_t lps_lock;
spinlock_t waitq_lock;
+ spinlock_t entry_list_lock;
spinlock_t usb_lock;
/*Dual mac*/
spinlock_t cck_and_rw_pagea_lock;
+
+ /*Easy concurrent*/
+ spinlock_t check_sendpkt_lock;
};
struct rtl_works {
@@ -1566,6 +1613,7 @@ struct rtl_works {
/*timer */
struct timer_list watchdog_timer;
+ struct timer_list dualmac_easyconcurrent_retrytimer;
/*task */
struct tasklet_struct irq_tasklet;
@@ -1593,6 +1641,31 @@ struct rtl_debug {
char proc_name[20];
};
+#define MIMO_PS_STATIC 0
+#define MIMO_PS_DYNAMIC 1
+#define MIMO_PS_NOLIMIT 3
+
+struct rtl_dualmac_easy_concurrent_ctl {
+ enum band_type currentbandtype_backfordmdp;
+ bool close_bbandrf_for_dmsp;
+ bool change_to_dmdp;
+ bool change_to_dmsp;
+ bool switch_in_process;
+};
+
+struct rtl_dmsp_ctl {
+ bool activescan_for_slaveofdmsp;
+ bool scan_for_anothermac_fordmsp;
+ bool scan_for_itself_fordmsp;
+ bool writedig_for_anothermacofdmsp;
+ u32 curdigvalue_for_anothermacofdmsp;
+ bool changecckpdstate_for_anothermacofdmsp;
+ u8 curcckpdstate_for_anothermacofdmsp;
+ bool changetxhighpowerlvl_for_anothermacofdmsp;
+ u8 curtxhighlvl_for_anothermacofdmsp;
+ long rssivalmin_for_anothermacofdmsp;
+};
+
struct ps_t {
u8 pre_ccastate;
u8 cur_ccasate;
@@ -1619,7 +1692,7 @@ struct dig_t {
u8 dig_twoport_algorithm;
u8 dig_dbgmode;
u8 dig_slgorithm_switch;
- u8 cursta_connectctate;
+ u8 cursta_connectstate;
u8 presta_connectstate;
u8 curmultista_connectstate;
char backoff_val;
@@ -1652,8 +1725,20 @@ struct dig_t {
char backoffval_range_min;
};
+struct rtl_global_var {
+ /* from this list we can get
+ * other adapter's rtl_priv */
+ struct list_head glb_priv_list;
+ spinlock_t glb_list_lock;
+};
+
struct rtl_priv {
struct completion firmware_loading_complete;
+ struct list_head list;
+ struct rtl_priv *buddy_priv;
+ struct rtl_global_var *glb_var;
+ struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl;
+ struct rtl_dmsp_ctl dmsp_ctl;
struct rtl_locks locks;
struct rtl_works works;
struct rtl_mac mac80211;
@@ -1674,6 +1759,9 @@ struct rtl_priv {
struct rtl_rate_priv *rate_priv;
+ /* sta entry list for ap adhoc or mesh */
+ struct list_head entry_list;
+
struct rtl_debug dbg;
int max_fw_size;
@@ -1815,9 +1903,9 @@ struct bt_coexist_info {
EF1BYTE(*((u8 *)(_ptr)))
/* Read le16 data from memory and convert to host ordering */
#define READEF2BYTE(_ptr) \
- EF2BYTE(*((u16 *)(_ptr)))
+ EF2BYTE(*(_ptr))
#define READEF4BYTE(_ptr) \
- EF4BYTE(*((u32 *)(_ptr)))
+ EF4BYTE(*(_ptr))
/* Write data to memory */
#define WRITEEF1BYTE(_ptr, _val) \
@@ -1826,7 +1914,7 @@ struct bt_coexist_info {
#define WRITEEF2BYTE(_ptr, _val) \
(*((u16 *)(_ptr))) = EF2BYTE(_val)
#define WRITEEF4BYTE(_ptr, _val) \
- (*((u16 *)(_ptr))) = EF2BYTE(_val)
+ (*((u32 *)(_ptr))) = EF2BYTE(_val)
/* Create a bit mask
* Examples:
@@ -1859,9 +1947,9 @@ struct bt_coexist_info {
* 4-byte pointer in little-endian system.
*/
#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
- (EF4BYTE(*((u32 *)(__pstart))))
+ (EF4BYTE(*((__le32 *)(__pstart))))
#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
- (EF2BYTE(*((u16 *)(__pstart))))
+ (EF2BYTE(*((__le16 *)(__pstart))))
#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
(EF1BYTE(*((u8 *)(__pstart))))
@@ -1908,13 +1996,13 @@ value to host byte ordering.*/
* Set subfield of little-endian 4-byte value to specified value.
*/
#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((u32 *)(__pstart)) = EF4BYTE \
+ *((u32 *)(__pstart)) = \
( \
LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
);
#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
- *((u16 *)(__pstart)) = EF2BYTE \
+ *((u16 *)(__pstart)) = \
( \
LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
@@ -2100,4 +2188,11 @@ static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
return ieee80211_find_sta(vif, bssid);
}
+static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw,
+ u8 *mac_addr)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ return ieee80211_find_sta(mac->vif, mac_addr);
+}
+
#endif
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 3118c425bcf1..441cbccbd381 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -354,7 +354,9 @@ out:
return ret;
}
-static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1251_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct wl1251 *wl = hw->priv;
unsigned long flags;
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index f429fc110cb0..dadf1dbb002a 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -32,7 +32,6 @@
#include "../wlcore/acx.h"
#include "../wlcore/tx.h"
#include "../wlcore/rx.h"
-#include "../wlcore/io.h"
#include "../wlcore/boot.h"
#include "wl12xx.h"
@@ -1185,9 +1184,16 @@ static int wl12xx_enable_interrupts(struct wl1271 *wl)
ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
if (ret < 0)
- goto out;
+ goto disable_interrupts;
ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
+ if (ret < 0)
+ goto disable_interrupts;
+
+ return ret;
+
+disable_interrupts:
+ wlcore_disable_interrupts(wl);
out:
return ret;
@@ -1583,7 +1589,10 @@ static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
return wlcore_set_key(wl, cmd, vif, sta, key_conf);
}
+static int wl12xx_setup(struct wl1271 *wl);
+
static struct wlcore_ops wl12xx_ops = {
+ .setup = wl12xx_setup,
.identify_chip = wl12xx_identify_chip,
.identify_fw = wl12xx_identify_fw,
.boot = wl12xx_boot,
@@ -1624,26 +1633,15 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
},
};
-static int __devinit wl12xx_probe(struct platform_device *pdev)
+static int wl12xx_setup(struct wl1271 *wl)
{
- struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
- struct wl1271 *wl;
- struct ieee80211_hw *hw;
- struct wl12xx_priv *priv;
-
- hw = wlcore_alloc_hw(sizeof(*priv));
- if (IS_ERR(hw)) {
- wl1271_error("can't allocate hw");
- return PTR_ERR(hw);
- }
+ struct wl12xx_priv *priv = wl->priv;
+ struct wl12xx_platform_data *pdata = wl->pdev->dev.platform_data;
- wl = hw->priv;
- priv = wl->priv;
- wl->ops = &wl12xx_ops;
- wl->ptable = wl12xx_ptable;
wl->rtable = wl12xx_rtable;
- wl->num_tx_desc = 16;
- wl->num_rx_desc = 8;
+ wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
+ wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
+ wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1695,7 +1693,36 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
wl1271_error("Invalid tcxo parameter %s", tcxo_param);
}
- return wlcore_probe(wl, pdev);
+ return 0;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+ struct wl1271 *wl;
+ struct ieee80211_hw *hw;
+ int ret;
+
+ hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv),
+ WL12XX_AGGR_BUFFER_SIZE);
+ if (IS_ERR(hw)) {
+ wl1271_error("can't allocate hw");
+ ret = PTR_ERR(hw);
+ goto out;
+ }
+
+ wl = hw->priv;
+ wl->ops = &wl12xx_ops;
+ wl->ptable = wl12xx_ptable;
+ ret = wlcore_probe(wl, pdev);
+ if (ret)
+ goto out_free;
+
+ return ret;
+
+out_free:
+ wlcore_free_hw(wl);
+out:
+ return ret;
}
static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
@@ -1714,17 +1741,7 @@ static struct platform_driver wl12xx_driver = {
}
};
-static int __init wl12xx_init(void)
-{
- return platform_driver_register(&wl12xx_driver);
-}
-module_init(wl12xx_init);
-
-static void __exit wl12xx_exit(void)
-{
- platform_driver_unregister(&wl12xx_driver);
-}
-module_exit(wl12xx_exit);
+module_platform_driver(wl12xx_driver);
module_param_named(fref, fref_param, charp, 0);
MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 26990fb4edea..7182bbf6625d 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -38,6 +38,13 @@
#define WL128X_SUBTYPE_VER 2
#define WL128X_MINOR_VER 115
+#define WL12XX_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+
+#define WL12XX_NUM_TX_DESCRIPTORS 16
+#define WL12XX_NUM_RX_DESCRIPTORS 8
+
+#define WL12XX_NUM_MAC_ADDRESSES 2
+
struct wl127x_rx_mem_pool_addr {
u32 addr;
u32 addr_extra;
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 3ce6f1039af3..7f1669cdea09 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -220,7 +220,7 @@ static ssize_t clear_fw_stats_write(struct file *file,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl18xx_acx_clear_statistics(wl);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 69042bb9a097..a39682a7c25f 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -30,7 +30,6 @@
#include "../wlcore/acx.h"
#include "../wlcore/tx.h"
#include "../wlcore/rx.h"
-#include "../wlcore/io.h"
#include "../wlcore/boot.h"
#include "reg.h"
@@ -46,7 +45,6 @@
static char *ht_mode_param = NULL;
static char *board_type_param = NULL;
static bool checksum_param = false;
-static bool enable_11a_param = true;
static int num_rx_desc_param = -1;
/* phy paramters */
@@ -416,7 +414,7 @@ static struct wlcore_conf wl18xx_conf = {
.snr_threshold = 0,
},
.ht = {
- .rx_ba_win_size = 10,
+ .rx_ba_win_size = 32,
.tx_ba_win_size = 64,
.inactivity_timeout = 10000,
.tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
@@ -506,8 +504,8 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.rdl = 0x01,
.auto_detect = 0x00,
.dedicated_fem = FEM_NONE,
- .low_band_component = COMPONENT_2_WAY_SWITCH,
- .low_band_component_type = 0x06,
+ .low_band_component = COMPONENT_3_WAY_SWITCH,
+ .low_band_component_type = 0x04,
.high_band_component = COMPONENT_2_WAY_SWITCH,
.high_band_component_type = 0x09,
.tcxo_ldo_voltage = 0x00,
@@ -813,6 +811,13 @@ static int wl18xx_enable_interrupts(struct wl1271 *wl)
ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
WL1271_ACX_INTR_ALL & ~intr_mask);
+ if (ret < 0)
+ goto disable_interrupts;
+
+ return ret;
+
+disable_interrupts:
+ wlcore_disable_interrupts(wl);
out:
return ret;
@@ -1203,6 +1208,12 @@ static int wl18xx_handle_static_data(struct wl1271 *wl,
struct wl18xx_static_data_priv *static_data_priv =
(struct wl18xx_static_data_priv *) static_data->priv;
+ strncpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version,
+ sizeof(wl->chip.phy_fw_ver_str));
+
+ /* make sure the string is NULL-terminated */
+ wl->chip.phy_fw_ver_str[sizeof(wl->chip.phy_fw_ver_str) - 1] = '\0';
+
wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
return 0;
@@ -1241,13 +1252,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
if (!change_spare)
return wlcore_set_key(wl, cmd, vif, sta, key_conf);
- /*
- * stop the queues and flush to ensure the next packets are
- * in sync with FW spare block accounting
- */
- wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
- wl1271_tx_flush(wl);
-
ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
if (ret < 0)
goto out;
@@ -1270,7 +1274,6 @@ static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
}
out:
- wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
return ret;
}
@@ -1293,7 +1296,10 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
return buf_offset;
}
+static int wl18xx_setup(struct wl1271 *wl);
+
static struct wlcore_ops wl18xx_ops = {
+ .setup = wl18xx_setup,
.identify_chip = wl18xx_identify_chip,
.boot = wl18xx_boot,
.plt_init = wl18xx_plt_init,
@@ -1374,27 +1380,15 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
},
};
-static int __devinit wl18xx_probe(struct platform_device *pdev)
+static int wl18xx_setup(struct wl1271 *wl)
{
- struct wl1271 *wl;
- struct ieee80211_hw *hw;
- struct wl18xx_priv *priv;
+ struct wl18xx_priv *priv = wl->priv;
int ret;
- hw = wlcore_alloc_hw(sizeof(*priv));
- if (IS_ERR(hw)) {
- wl1271_error("can't allocate hw");
- ret = PTR_ERR(hw);
- goto out;
- }
-
- wl = hw->priv;
- priv = wl->priv;
- wl->ops = &wl18xx_ops;
- wl->ptable = wl18xx_ptable;
wl->rtable = wl18xx_rtable;
- wl->num_tx_desc = 32;
- wl->num_rx_desc = 32;
+ wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
+ wl->num_rx_desc = WL18XX_NUM_TX_DESCRIPTORS;
+ wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
@@ -1405,9 +1399,9 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
if (num_rx_desc_param != -1)
wl->num_rx_desc = num_rx_desc_param;
- ret = wl18xx_conf_init(wl, &pdev->dev);
+ ret = wl18xx_conf_init(wl, wl->dev);
if (ret < 0)
- goto out_free;
+ return ret;
/* If the module param is set, update it in conf */
if (board_type_param) {
@@ -1424,27 +1418,14 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
} else {
wl1271_error("invalid board type '%s'",
board_type_param);
- ret = -EINVAL;
- goto out_free;
+ return -EINVAL;
}
}
- /* HACK! Just for now we hardcode COM8 and HDK to 0x06 */
- switch (priv->conf.phy.board_type) {
- case BOARD_TYPE_HDK_18XX:
- case BOARD_TYPE_COM8_18XX:
- priv->conf.phy.low_band_component_type = 0x06;
- break;
- case BOARD_TYPE_FPGA_18XX:
- case BOARD_TYPE_DVP_18XX:
- case BOARD_TYPE_EVB_18XX:
- priv->conf.phy.low_band_component_type = 0x05;
- break;
- default:
+ if (priv->conf.phy.board_type >= NUM_BOARD_TYPES) {
wl1271_error("invalid board type '%d'",
priv->conf.phy.board_type);
- ret = -EINVAL;
- goto out_free;
+ return -EINVAL;
}
if (low_band_component_param != -1)
@@ -1476,22 +1457,21 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
priv->conf.ht.mode = HT_MODE_SISO20;
else {
wl1271_error("invalid ht_mode '%s'", ht_mode_param);
- ret = -EINVAL;
- goto out_free;
+ return -EINVAL;
}
}
if (priv->conf.ht.mode == HT_MODE_DEFAULT) {
/*
* Only support mimo with multiple antennas. Fall back to
- * siso20.
+ * siso40.
*/
if (wl18xx_is_mimo_supported(wl))
wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
&wl18xx_mimo_ht_cap_2ghz);
else
wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
- &wl18xx_siso20_ht_cap);
+ &wl18xx_siso40_ht_cap_2ghz);
/* 5Ghz is always wide */
wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
@@ -1513,9 +1493,34 @@ static int __devinit wl18xx_probe(struct platform_device *pdev)
wl18xx_ops.init_vif = NULL;
}
- wl->enable_11a = enable_11a_param;
+ /* Enable 11a Band only if we have 5G antennas */
+ wl->enable_11a = (priv->conf.phy.number_of_assembled_ant5 != 0);
+
+ return 0;
+}
+
+static int __devinit wl18xx_probe(struct platform_device *pdev)
+{
+ struct wl1271 *wl;
+ struct ieee80211_hw *hw;
+ int ret;
+
+ hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv),
+ WL18XX_AGGR_BUFFER_SIZE);
+ if (IS_ERR(hw)) {
+ wl1271_error("can't allocate hw");
+ ret = PTR_ERR(hw);
+ goto out;
+ }
+
+ wl = hw->priv;
+ wl->ops = &wl18xx_ops;
+ wl->ptable = wl18xx_ptable;
+ ret = wlcore_probe(wl, pdev);
+ if (ret)
+ goto out_free;
- return wlcore_probe(wl, pdev);
+ return ret;
out_free:
wlcore_free_hw(wl);
@@ -1539,18 +1544,7 @@ static struct platform_driver wl18xx_driver = {
}
};
-static int __init wl18xx_init(void)
-{
- return platform_driver_register(&wl18xx_driver);
-}
-module_init(wl18xx_init);
-
-static void __exit wl18xx_exit(void)
-{
- platform_driver_unregister(&wl18xx_driver);
-}
-module_exit(wl18xx_exit);
-
+module_platform_driver(wl18xx_driver);
module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR);
MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
@@ -1561,9 +1555,6 @@ MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
module_param_named(checksum, checksum_param, bool, S_IRUSR);
MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
-module_param_named(enable_11a, enable_11a_param, bool, S_IRUSR);
-MODULE_PARM_DESC(enable_11a, "Enable 11a (5GHz): boolean (defaults to true)");
-
module_param_named(dc2dc, dc2dc_param, int, S_IRUSR);
MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 6452396fa1d4..96a1e438d677 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -33,6 +33,13 @@
#define WL18XX_CMD_MAX_SIZE 740
+#define WL18XX_AGGR_BUFFER_SIZE (13 * PAGE_SIZE)
+
+#define WL18XX_NUM_TX_DESCRIPTORS 32
+#define WL18XX_NUM_RX_DESCRIPTORS 32
+
+#define WL18XX_NUM_MAC_ADDRESSES 3
+
struct wl18xx_priv {
/* buffer for sending commands to FW */
u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 20e1bd923832..eaef3f41b252 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -59,6 +59,9 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
u16 status;
u16 poll_count = 0;
+ if (WARN_ON(unlikely(wl->state == WLCORE_STATE_RESTARTING)))
+ return -EIO;
+
cmd = buf;
cmd->id = cpu_to_le16(id);
cmd->status = 0;
@@ -990,7 +993,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV,
skb->data, skb->len,
- CMD_TEMPL_KLV_IDX_NULL_DATA,
+ wlvif->sta.klv_template_id,
wlvif->basic_rate);
out:
@@ -1785,10 +1788,17 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wlvif->bss_type == BSS_TYPE_IBSS)))
return -EINVAL;
- ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+ ret = wl12xx_cmd_role_enable(wl,
+ wl12xx_wlvif_to_vif(wlvif)->addr,
+ WL1271_ROLE_DEVICE,
+ &wlvif->dev_role_id);
if (ret < 0)
goto out;
+ ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+ if (ret < 0)
+ goto out_disable;
+
ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
if (ret < 0)
goto out_stop;
@@ -1797,6 +1807,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
out_stop:
wl12xx_cmd_role_stop_dev(wl, wlvif);
+out_disable:
+ wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
out:
return ret;
}
@@ -1824,6 +1836,11 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
+
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 4ef0b095f0d6..2409f3d71f63 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -157,11 +157,6 @@ enum wl1271_commands {
#define MAX_CMD_PARAMS 572
-enum {
- CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
- CMD_TEMPL_KLV_IDX_MAX = 4
-};
-
enum cmd_templ {
CMD_TEMPL_NULL_DATA = 0,
CMD_TEMPL_BEACON,
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index d77224f2ac6b..9e40760bafe1 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -412,8 +412,7 @@ struct conf_rx_settings {
#define CONF_TX_RATE_RETRY_LIMIT 10
/* basic rates for p2p operations (probe req/resp, etc.) */
-#define CONF_TX_RATE_MASK_BASIC_P2P (CONF_HW_BIT_RATE_6MBPS | \
- CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS)
+#define CONF_TX_RATE_MASK_BASIC_P2P CONF_HW_BIT_RATE_6MBPS
/*
* Rates supported for data packets when operating as AP. Note the absence
diff --git a/drivers/net/wireless/ti/wlcore/debug.h b/drivers/net/wireless/ti/wlcore/debug.h
index 6b800b3cbea5..db4bf5a68ce2 100644
--- a/drivers/net/wireless/ti/wlcore/debug.h
+++ b/drivers/net/wireless/ti/wlcore/debug.h
@@ -28,7 +28,7 @@
#include <linux/bitops.h>
#include <linux/printk.h>
-#define DRIVER_NAME "wl12xx"
+#define DRIVER_NAME "wlcore"
#define DRIVER_PREFIX DRIVER_NAME ": "
enum {
@@ -73,11 +73,21 @@ extern u32 wl12xx_debug_level;
#define wl1271_info(fmt, arg...) \
pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+/* define the debug macro differently if dynamic debug is supported */
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define wl1271_debug(level, fmt, arg...) \
do { \
- if (level & wl12xx_debug_level) \
- pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+ if (unlikely(level & wl12xx_debug_level)) \
+ dynamic_pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+ } while (0)
+#else
+#define wl1271_debug(level, fmt, arg...) \
+ do { \
+ if (unlikely(level & wl12xx_debug_level)) \
+ printk(KERN_DEBUG pr_fmt(DRIVER_PREFIX fmt "\n"), \
+ ##arg); \
} while (0)
+#endif
/* TODO: use pr_debug_hex_dump when it becomes available */
#define wl1271_dump(level, prefix, buf, len) \
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 80dbc5304fac..c86bb00c2488 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -62,11 +62,14 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl)
mutex_lock(&wl->mutex);
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- if (wl->state == WL1271_STATE_ON && !wl->plt &&
+ if (!wl->plt &&
time_after(jiffies, wl->stats.fw_stats_update +
msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
wl1271_acx_statistics(wl, wl->stats.fw_stats);
@@ -286,7 +289,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
wl->conf.conn.dynamic_ps_timeout = value;
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -353,7 +356,7 @@ static ssize_t forced_ps_write(struct file *file,
wl->conf.conn.forced_ps = value;
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -486,6 +489,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_HEX(platform_quirks);
DRIVER_STATE_PRINT_HEX(chip.id);
DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
+ DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
DRIVER_STATE_PRINT_INT(sched_scanning);
#undef DRIVER_STATE_PRINT_INT
@@ -999,7 +1003,7 @@ static ssize_t sleep_auth_write(struct file *file,
wl->conf.conn.sta_sleep_auth = value;
- if (wl->state == WL1271_STATE_OFF) {
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
/* this will show up on "read" in case we are off */
wl->sleep_auth = value;
goto out;
@@ -1060,14 +1064,16 @@ static ssize_t dev_mem_read(struct file *file,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF) {
+ if (unlikely(wl->state == WLCORE_STATE_OFF)) {
ret = -EFAULT;
goto skip_read;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto skip_read;
+ /*
+ * Don't fail if elp_wakeup returns an error, so the device's memory
+ * could be read even if the FW crashed
+ */
+ wl1271_ps_elp_wakeup(wl);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
@@ -1145,14 +1151,16 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF) {
+ if (unlikely(wl->state == WLCORE_STATE_OFF)) {
ret = -EFAULT;
goto skip_write;
}
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto skip_write;
+ /*
+ * Don't fail if elp_wakeup returns an error, so the device's memory
+ * could be read even if the FW crashed
+ */
+ wl1271_ps_elp_wakeup(wl);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index a3c867786df8..32d157f62f31 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -141,7 +141,7 @@ int wl1271_init_templates_config(struct wl1271 *wl)
if (ret < 0)
return ret;
- for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ for (i = 0; i < WLCORE_MAX_KLV_TEMPLATES; i++) {
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_KLV, NULL,
sizeof(struct ieee80211_qos_hdr),
@@ -371,15 +371,7 @@ static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
- int ret, i;
-
- /* disable all keep-alive templates */
- for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
- ACX_KEEP_ALIVE_TPL_INVALID);
- if (ret < 0)
- return ret;
- }
+ int ret;
/* disable the keep-alive feature */
ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 259149f36fae..f48530fec14f 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -64,7 +64,7 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
return -EIO;
ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
- if (ret && wl->state != WL1271_STATE_OFF)
+ if (ret && wl->state != WLCORE_STATE_OFF)
set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
return ret;
@@ -80,7 +80,7 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
return -EIO;
ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
- if (ret && wl->state != WL1271_STATE_OFF)
+ if (ret && wl->state != WLCORE_STATE_OFF)
set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 72548609f711..25530c8760cb 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -248,7 +248,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/* Tx went out in the meantime - everything is ok */
@@ -512,7 +512,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
wl1271_debug(DEBUG_IRQ, "IRQ work");
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -696,7 +696,7 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
* we can't call wl12xx_get_vif_count() here because
* wl->mutex is taken, so use the cached last_vif_count value
*/
- if (wl->last_vif_count > 1) {
+ if (wl->last_vif_count > 1 && wl->mr_fw_name) {
fw_type = WL12XX_FW_TYPE_MULTI;
fw_name = wl->mr_fw_name;
} else {
@@ -744,38 +744,14 @@ out:
return ret;
}
-static void wl1271_fetch_nvs(struct wl1271 *wl)
-{
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
-
- if (ret < 0) {
- wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
- WL12XX_NVS_NAME, ret);
- return;
- }
-
- wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
- if (!wl->nvs) {
- wl1271_error("could not allocate memory for the nvs file");
- goto out;
- }
-
- wl->nvs_len = fw->size;
-
-out:
- release_firmware(fw);
-}
-
void wl12xx_queue_recovery_work(struct wl1271 *wl)
{
WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
/* Avoid a recursive recovery */
- if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
+ if (wl->state == WLCORE_STATE_ON) {
+ wl->state = WLCORE_STATE_RESTARTING;
+ set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
wlcore_disable_interrupts_nosync(wl);
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
@@ -913,7 +889,7 @@ static void wl1271_recovery_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (wl->state != WL1271_STATE_ON || wl->plt)
+ if (wl->state == WLCORE_STATE_OFF || wl->plt)
goto out_unlock;
if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
@@ -1081,7 +1057,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
wl1271_notice("power up");
- if (wl->state != WL1271_STATE_OFF) {
+ if (wl->state != WLCORE_STATE_OFF) {
wl1271_error("cannot go into PLT state because not "
"in off state: %d", wl->state);
ret = -EBUSY;
@@ -1102,7 +1078,7 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
if (ret < 0)
goto power_off;
- wl->state = WL1271_STATE_ON;
+ wl->state = WLCORE_STATE_ON;
wl1271_notice("firmware booted in PLT mode %s (%s)",
PLT_MODE[plt_mode],
wl->chip.fw_ver_str);
@@ -1171,7 +1147,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
wl1271_power_off(wl);
wl->flags = 0;
wl->sleep_auth = WL1271_PSM_ILLEGAL;
- wl->state = WL1271_STATE_OFF;
+ wl->state = WLCORE_STATE_OFF;
wl->plt = false;
wl->plt_mode = PLT_OFF;
wl->rx_counter = 0;
@@ -1181,7 +1157,9 @@ out:
return ret;
}
-static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1197,7 +1175,7 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
- hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -1600,12 +1578,6 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
- if ((wl->conf.conn.suspend_wake_up_event ==
- wl->conf.conn.wake_up_event) &&
- (wl->conf.conn.suspend_listen_interval ==
- wl->conf.conn.listen_interval))
- goto out;
-
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1614,6 +1586,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (ret < 0)
goto out_sleep;
+ if ((wl->conf.conn.suspend_wake_up_event ==
+ wl->conf.conn.wake_up_event) &&
+ (wl->conf.conn.suspend_listen_interval ==
+ wl->conf.conn.listen_interval))
+ goto out_sleep;
+
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.suspend_wake_up_event,
wl->conf.conn.suspend_listen_interval);
@@ -1669,11 +1647,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
if ((!is_ap) && (!is_sta))
return;
- if (is_sta &&
- ((wl->conf.conn.suspend_wake_up_event ==
- wl->conf.conn.wake_up_event) &&
- (wl->conf.conn.suspend_listen_interval ==
- wl->conf.conn.listen_interval)))
+ if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
return;
ret = wl1271_ps_elp_wakeup(wl);
@@ -1683,6 +1657,12 @@ static void wl1271_configure_resume(struct wl1271 *wl,
if (is_sta) {
wl1271_configure_wowlan(wl, NULL);
+ if ((wl->conf.conn.suspend_wake_up_event ==
+ wl->conf.conn.wake_up_event) &&
+ (wl->conf.conn.suspend_listen_interval ==
+ wl->conf.conn.listen_interval))
+ goto out_sleep;
+
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.wake_up_event,
wl->conf.conn.listen_interval);
@@ -1695,6 +1675,7 @@ static void wl1271_configure_resume(struct wl1271 *wl,
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
}
+out_sleep:
wl1271_ps_elp_sleep(wl);
}
@@ -1831,7 +1812,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
{
int i;
- if (wl->state == WL1271_STATE_OFF) {
+ if (wl->state == WLCORE_STATE_OFF) {
if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
&wl->flags))
wlcore_enable_interrupts(wl);
@@ -1843,7 +1824,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
* this must be before the cancel_work calls below, so that the work
* functions don't perform further work.
*/
- wl->state = WL1271_STATE_OFF;
+ wl->state = WLCORE_STATE_OFF;
/*
* Use the nosync variant to disable interrupts, so the mutex could be
@@ -1854,6 +1835,8 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
mutex_unlock(&wl->mutex);
wlcore_synchronize_interrupts(wl);
+ if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
+ cancel_work_sync(&wl->recovery_work);
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
@@ -1956,6 +1939,27 @@ static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
*idx = WL12XX_MAX_RATE_POLICIES;
}
+static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
+{
+ u8 policy = find_first_zero_bit(wl->klv_templates_map,
+ WLCORE_MAX_KLV_TEMPLATES);
+ if (policy >= WLCORE_MAX_KLV_TEMPLATES)
+ return -EBUSY;
+
+ __set_bit(policy, wl->klv_templates_map);
+ *idx = policy;
+ return 0;
+}
+
+static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
+{
+ if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
+ return;
+
+ __clear_bit(*idx, wl->klv_templates_map);
+ *idx = WLCORE_MAX_KLV_TEMPLATES;
+}
+
static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
switch (wlvif->bss_type) {
@@ -2020,6 +2024,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
@@ -2096,7 +2101,7 @@ irq_disable:
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
because we need to let any possibly pending IRQ out of
- the system (and while we are WL1271_STATE_OFF the IRQ
+ the system (and while we are WLCORE_STATE_OFF the IRQ
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
@@ -2131,7 +2136,7 @@ power_off:
wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
wl->enable_11a ? "" : "not ");
- wl->state = WL1271_STATE_ON;
+ wl->state = WLCORE_STATE_ON;
out:
return booted;
}
@@ -2165,7 +2170,11 @@ static bool wl12xx_need_fw_change(struct wl1271 *wl,
wl->last_vif_count = vif_count;
/* no need for fw change if the device is OFF */
- if (wl->state == WL1271_STATE_OFF)
+ if (wl->state == WLCORE_STATE_OFF)
+ return false;
+
+ /* no need for fw change if a single fw is used */
+ if (!wl->mr_fw_name)
return false;
if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
@@ -2247,7 +2256,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
* TODO: after the nvs issue will be solved, move this block
* to start(), and make sure here the driver is ON.
*/
- if (wl->state == WL1271_STATE_OFF) {
+ if (wl->state == WLCORE_STATE_OFF) {
/*
* we still need this in order to configure the fw
* while uploading the nvs
@@ -2261,21 +2270,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
}
}
- if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
- wlvif->bss_type == BSS_TYPE_IBSS) {
- /*
- * The device role is a special role used for
- * rx and tx frames prior to association (as
- * the STA role can get packets only from
- * its associated bssid)
- */
- ret = wl12xx_cmd_role_enable(wl, vif->addr,
- WL1271_ROLE_DEVICE,
- &wlvif->dev_role_id);
- if (ret < 0)
- goto out;
- }
-
ret = wl12xx_cmd_role_enable(wl, vif->addr,
role_type, &wlvif->role_id);
if (ret < 0)
@@ -2314,7 +2308,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
return;
/* because of hardware recovery, we may get here twice */
- if (wl->state != WL1271_STATE_ON)
+ if (wl->state == WLCORE_STATE_OFF)
return;
wl1271_info("down");
@@ -2344,10 +2338,6 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
wlvif->bss_type == BSS_TYPE_IBSS) {
if (wl12xx_dev_role_started(wlvif))
wl12xx_stop_dev(wl, wlvif);
-
- ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
- if (ret < 0)
- goto deinit;
}
ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
@@ -2366,6 +2356,7 @@ deinit:
wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
} else {
wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
@@ -2430,12 +2421,11 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl12xx_vif *iter;
struct vif_counter_data vif_count;
- bool cancel_recovery = true;
wl12xx_get_vif_count(hw, vif, &vif_count);
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF ||
+ if (wl->state == WLCORE_STATE_OFF ||
!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
@@ -2455,12 +2445,9 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
wl12xx_force_active_psm(wl);
set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
wl12xx_queue_recovery_work(wl);
- cancel_recovery = false;
}
out:
mutex_unlock(&wl->mutex);
- if (cancel_recovery)
- cancel_work_sync(&wl->recovery_work);
}
static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
@@ -2534,7 +2521,7 @@ static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
goto out;
ret = wl1271_acx_keep_alive_config(wl, wlvif,
- CMD_TEMPL_KLV_IDX_NULL_DATA,
+ wlvif->sta.klv_template_id,
ACX_KEEP_ALIVE_TPL_VALID);
if (ret < 0)
goto out;
@@ -2554,6 +2541,11 @@ static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ieee80211_chswitch_done(vif, false);
}
+ /* invalidate keep-alive template */
+ wl1271_acx_keep_alive_config(wl, wlvif,
+ wlvif->sta.klv_template_id,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+
/* to stop listening to a channel, we disconnect */
ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
if (ret < 0)
@@ -2594,11 +2586,6 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_acx_keep_alive_config(
- wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
- ACX_KEEP_ALIVE_TPL_INVALID);
- if (ret < 0)
- goto out;
clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
} else {
/* The current firmware only supports sched_scan in idle */
@@ -2770,7 +2757,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_POWER)
wl->power_level = conf->power_level;
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -2804,10 +2791,6 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
{
struct wl1271_filter_params *fp;
struct netdev_hw_addr *ha;
- struct wl1271 *wl = hw->priv;
-
- if (unlikely(wl->state == WL1271_STATE_OFF))
- return 0;
fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
if (!fp) {
@@ -2856,7 +2839,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
*total &= WL1271_SUPPORTED_FILTERS;
changed &= WL1271_SUPPORTED_FILTERS;
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -3080,8 +3063,45 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
+ int ret;
+ bool might_change_spare =
+ key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
+ key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
- return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
+ if (might_change_spare) {
+ /*
+ * stop the queues and flush to ensure the next packets are
+ * in sync with FW spare block accounting
+ */
+ mutex_lock(&wl->mutex);
+ wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+ mutex_unlock(&wl->mutex);
+
+ wl1271_tx_flush(wl);
+ }
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
+ ret = -EAGAIN;
+ goto out_wake_queues;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_wake_queues;
+
+ ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
+
+ wl1271_ps_elp_sleep(wl);
+
+out_wake_queues:
+ if (might_change_spare)
+ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+
+ mutex_unlock(&wl->mutex);
+
+ return ret;
}
int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
@@ -3103,17 +3123,6 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
key_conf->keylen, key_conf->flags);
wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out_unlock;
-
switch (key_conf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -3143,8 +3152,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
- ret = -EOPNOTSUPP;
- goto out_sleep;
+ return -EOPNOTSUPP;
}
switch (cmd) {
@@ -3155,7 +3163,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
tx_seq_32, tx_seq_16, sta);
if (ret < 0) {
wl1271_error("Could not add or replace key");
- goto out_sleep;
+ return ret;
}
/*
@@ -3169,7 +3177,7 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
- goto out_sleep;
+ return ret;
}
}
break;
@@ -3181,22 +3189,15 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
0, 0, sta);
if (ret < 0) {
wl1271_error("Could not remove key");
- goto out_sleep;
+ return ret;
}
break;
default:
wl1271_error("Unsupported key cmd 0x%x", cmd);
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
-out_sleep:
- wl1271_ps_elp_sleep(wl);
-
-out_unlock:
- mutex_unlock(&wl->mutex);
-
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_set_key);
@@ -3219,7 +3220,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF) {
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
/*
* We cannot return -EBUSY here because cfg80211 will expect
* a call to ieee80211_scan_completed if we do - in this case
@@ -3259,7 +3260,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -3308,7 +3309,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF) {
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out;
}
@@ -3345,7 +3346,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -3366,7 +3367,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out;
}
@@ -3395,7 +3396,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out;
}
@@ -4171,7 +4172,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
@@ -4255,7 +4256,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -4454,7 +4455,7 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EBUSY;
goto out;
}
@@ -4493,7 +4494,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out;
}
@@ -4611,7 +4612,7 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
mask->control[i].legacy,
i);
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
@@ -4647,12 +4648,14 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ if (unlikely(wl->state == WLCORE_STATE_OFF)) {
wl12xx_for_each_wlvif_sta(wl, wlvif) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
ieee80211_chswitch_done(vif, false);
}
goto out;
+ } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
+ goto out;
}
ret = wl1271_ps_elp_wakeup(wl);
@@ -4687,7 +4690,7 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/* packets are considered pending if in the TX queue or the FW */
@@ -4936,7 +4939,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
wl->sg_enabled = res;
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -5054,7 +5057,7 @@ static void wl1271_connection_loss_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/* Call mac80211 connection loss */
@@ -5068,18 +5071,17 @@ out:
mutex_unlock(&wl->mutex);
}
-static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
- u32 oui, u32 nic, int n)
+static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
{
int i;
- wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
- oui, nic, n);
+ wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
+ oui, nic);
- if (nic + n - 1 > 0xffffff)
+ if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
wl1271_warning("NIC part of the MAC address wraps around!");
- for (i = 0; i < n; i++) {
+ for (i = 0; i < wl->num_mac_addr; i++) {
wl->addresses[i].addr[0] = (u8)(oui >> 16);
wl->addresses[i].addr[1] = (u8)(oui >> 8);
wl->addresses[i].addr[2] = (u8) oui;
@@ -5089,7 +5091,22 @@ static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
nic++;
}
- wl->hw->wiphy->n_addresses = n;
+ /* we may be one address short at the most */
+ WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
+
+ /*
+ * turn on the LAA bit in the first address and use it as
+ * the last address.
+ */
+ if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
+ int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
+ memcpy(&wl->addresses[idx], &wl->addresses[0],
+ sizeof(wl->addresses[0]));
+ /* LAA bit */
+ wl->addresses[idx].addr[2] |= BIT(1);
+ }
+
+ wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
wl->hw->wiphy->addresses = wl->addresses;
}
@@ -5128,8 +5145,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
if (wl->mac80211_registered)
return 0;
- wl1271_fetch_nvs(wl);
- if (wl->nvs != NULL) {
+ if (wl->nvs_len >= 12) {
/* NOTE: The wl->nvs->nvs element must be first, in
* order to simplify the casting, we assume it is at
* the beginning of the wl->nvs structure.
@@ -5149,7 +5165,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
nic_addr = wl->fuse_nic_addr + 1;
}
- wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
+ wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
ret = ieee80211_register_hw(wl->hw);
if (ret < 0) {
@@ -5179,7 +5195,7 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
{
- .max = 2,
+ .max = 3,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
@@ -5194,7 +5210,7 @@ static const struct ieee80211_iface_combination
wlcore_iface_combinations[] = {
{
.num_different_channels = 1,
- .max_interfaces = 2,
+ .max_interfaces = 3,
.limits = wlcore_iface_limits,
.n_limits = ARRAY_SIZE(wlcore_iface_limits),
},
@@ -5310,7 +5326,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
#define WL1271_DEFAULT_CHANNEL 0
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size)
{
struct ieee80211_hw *hw;
struct wl1271 *wl;
@@ -5390,17 +5406,19 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
spin_lock_init(&wl->wl_lock);
- wl->state = WL1271_STATE_OFF;
+ wl->state = WLCORE_STATE_OFF;
wl->fw_type = WL12XX_FW_TYPE_NONE;
mutex_init(&wl->mutex);
mutex_init(&wl->flush_mutex);
+ init_completion(&wl->nvs_loading_complete);
- order = get_order(WL1271_AGGR_BUFFER_SIZE);
+ order = get_order(aggr_buf_size);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
if (!wl->aggr_buf) {
ret = -ENOMEM;
goto err_wq;
}
+ wl->aggr_buf_size = aggr_buf_size;
wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
if (!wl->dummy_packet) {
@@ -5463,8 +5481,7 @@ int wlcore_free_hw(struct wl1271 *wl)
device_remove_file(wl->dev, &dev_attr_bt_coex_state);
free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
- free_pages((unsigned long)wl->aggr_buf,
- get_order(WL1271_AGGR_BUFFER_SIZE));
+ free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
wl1271_debugfs_exit(wl);
@@ -5514,17 +5531,32 @@ static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
return IRQ_WAKE_THREAD;
}
-int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
+static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
+ struct wl1271 *wl = context;
+ struct platform_device *pdev = wl->pdev;
struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
unsigned long irqflags;
int ret;
- if (!wl->ops || !wl->ptable) {
- ret = -EINVAL;
- goto out_free_hw;
+ if (fw) {
+ wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (!wl->nvs) {
+ wl1271_error("Could not allocate nvs data");
+ goto out;
+ }
+ wl->nvs_len = fw->size;
+ } else {
+ wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
+ WL12XX_NVS_NAME);
+ wl->nvs = NULL;
+ wl->nvs_len = 0;
}
+ ret = wl->ops->setup(wl);
+ if (ret < 0)
+ goto out_free_nvs;
+
BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
/* adjust some runtime configuration parameters */
@@ -5533,11 +5565,8 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
wl->irq = platform_get_irq(pdev, 0);
wl->platform_quirks = pdata->platform_quirks;
wl->set_power = pdata->set_power;
- wl->dev = &pdev->dev;
wl->if_ops = pdata->ops;
- platform_set_drvdata(pdev, wl);
-
if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
irqflags = IRQF_TRIGGER_RISING;
else
@@ -5548,7 +5577,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
pdev->name, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
- goto out_free_hw;
+ goto out_free_nvs;
}
#ifdef CONFIG_PM
@@ -5607,6 +5636,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
goto out_hw_pg_ver;
}
+ wl->initialized = true;
goto out;
out_hw_pg_ver:
@@ -5621,10 +5651,33 @@ out_unreg:
out_irq:
free_irq(wl->irq, wl);
-out_free_hw:
- wlcore_free_hw(wl);
+out_free_nvs:
+ kfree(wl->nvs);
out:
+ release_firmware(fw);
+ complete_all(&wl->nvs_loading_complete);
+}
+
+int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
+{
+ int ret;
+
+ if (!wl->ops || !wl->ptable)
+ return -EINVAL;
+
+ wl->dev = &pdev->dev;
+ wl->pdev = pdev;
+ platform_set_drvdata(pdev, wl);
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
+ wl, wlcore_nvs_cb);
+ if (ret < 0) {
+ wl1271_error("request_firmware_nowait failed: %d", ret);
+ complete_all(&wl->nvs_loading_complete);
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_probe);
@@ -5633,6 +5686,10 @@ int __devexit wlcore_remove(struct platform_device *pdev)
{
struct wl1271 *wl = platform_get_drvdata(pdev);
+ wait_for_completion(&wl->nvs_loading_complete);
+ if (!wl->initialized)
+ return 0;
+
if (wl->irq_wake_enabled) {
device_init_wakeup(wl->dev, 0);
disable_irq_wake(wl->irq);
@@ -5663,3 +5720,4 @@ MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL12XX_NVS_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 46d36fd30eba..4d1414a673fb 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -28,7 +28,7 @@
#define WL1271_WAKEUP_TIMEOUT 500
-#define ELP_ENTRY_DELAY 5
+#define ELP_ENTRY_DELAY 30
void wl1271_elp_work(struct work_struct *work)
{
@@ -44,7 +44,7 @@ void wl1271_elp_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/* our work might have been already cancelled */
@@ -98,11 +98,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
return;
}
- if (wl->conf.conn.forced_ps)
- timeout = ELP_ENTRY_DELAY;
- else
- timeout = wl->conf.conn.dynamic_ps_timeout;
-
+ timeout = ELP_ENTRY_DELAY;
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(timeout));
}
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index f55e2f9e7ac5..9ee0ec6fd1db 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -221,7 +221,7 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
pkt_len = wlcore_rx_get_buf_size(wl, des);
align_pkt_len = wlcore_rx_get_align_buf_size(wl,
pkt_len);
- if (buf_size + align_pkt_len > WL1271_AGGR_BUFFER_SIZE)
+ if (buf_size + align_pkt_len > wl->aggr_buf_size)
break;
buf_size += align_pkt_len;
rx_counter++;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index dbeca1bfbb2c..d00501493dfe 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -46,7 +46,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
@@ -184,11 +184,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
- if (wlvif->bss_type == BSS_TYPE_AP_BSS ||
- test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
- cmd->params.role_id = wlvif->role_id;
- else
- cmd->params.role_id = wlvif->dev_role_id;
+ cmd->params.role_id = wlvif->role_id;
if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
@@ -593,7 +589,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
goto out;
}
- cmd->role_id = wlvif->dev_role_id;
+ cmd->role_id = wlvif->role_id;
if (!n_match_ssids) {
/* No filter, with ssids */
type = SCAN_SSID_FILTER_DISABLED;
@@ -683,7 +679,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
if (!cfg)
return -ENOMEM;
- cfg->role_id = wlvif->dev_role_id;
+ cfg->role_id = wlvif->role_id;
cfg->rssi_threshold = c->rssi_threshold;
cfg->snr_threshold = c->snr_threshold;
cfg->n_probe_reqs = c->num_probe_reqs;
@@ -718,7 +714,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
if (!force_passive && cfg->active[0]) {
u8 band = IEEE80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
- wlvif->dev_role_id, band,
+ wlvif->role_id, band,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[band],
@@ -732,7 +728,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
if (!force_passive && cfg->active[1]) {
u8 band = IEEE80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
- wlvif->dev_role_id, band,
+ wlvif->role_id, band,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[band],
@@ -774,7 +770,7 @@ int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (!start)
return -ENOMEM;
- start->role_id = wlvif->dev_role_id;
+ start->role_id = wlvif->role_id;
start->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
@@ -810,7 +806,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
return;
}
- stop->role_id = wlvif->dev_role_id;
+ stop->role_id = wlvif->role_id;
stop->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 8da4ed243ebc..a519bc3adec1 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -66,7 +66,13 @@
/* HW limitation: maximum possible chunk size is 4095 bytes */
#define WSPI_MAX_CHUNK_SIZE 4092
-#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
+/*
+ * only support SPI for 12xx - this code should be reworked when 18xx
+ * support is introduced
+ */
+#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+
+#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
struct wl12xx_spi_glue {
struct device *dev;
@@ -271,7 +277,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
u32 chunk_len;
int i;
- WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
+ WARN_ON(len > SPI_AGGR_BUFFER_SIZE);
spi_message_init(&m);
memset(t, 0, sizeof(t));
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 49e5ee1525c9..f3442762d884 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -92,7 +92,7 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF) {
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EINVAL;
goto out;
}
@@ -164,7 +164,7 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF) {
+ if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f0081f746482..a90d3cd09408 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -130,16 +130,13 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
}
EXPORT_SYMBOL(wl12xx_is_dummy_packet);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct sk_buff *skb)
+static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, struct ieee80211_sta *sta)
{
- struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
-
- if (control->control.sta) {
+ if (sta) {
struct wl1271_station *wl_sta;
- wl_sta = (struct wl1271_station *)
- control->control.sta->drv_priv;
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
return wl_sta->hlid;
} else {
struct ieee80211_hdr *hdr;
@@ -156,7 +153,7 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct sk_buff *skb)
+ struct sk_buff *skb, struct ieee80211_sta *sta)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -164,7 +161,7 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return wl->system_hlid;
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
- return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
+ return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
@@ -196,7 +193,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int id, ret = -EBUSY, ac;
u32 spare_blocks;
- if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
+ if (buf_offset + total_len > wl->aggr_buf_size)
return -EAGAIN;
spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
@@ -322,8 +319,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (hlid == wlvif->ap.global_hlid)
rate_idx = wlvif->ap.mgmt_rate_idx;
else if (hlid == wlvif->ap.bcast_hlid ||
- skb->protocol == cpu_to_be16(ETH_P_PAE))
- /* send AP bcast and EAPOLs using the min basic rate */
+ skb->protocol == cpu_to_be16(ETH_P_PAE) ||
+ !ieee80211_is_data(frame_control))
+ /*
+ * send non-data, bcast and EAPOLs using the
+ * min basic rate
+ */
rate_idx = wlvif->ap.bcast_rate_idx;
else
rate_idx = wlvif->ap.ucast_rate_idx[ac];
@@ -344,13 +345,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
/* caller must hold wl->mutex */
static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct sk_buff *skb, u32 buf_offset)
+ struct sk_buff *skb, u32 buf_offset, u8 hlid)
{
struct ieee80211_tx_info *info;
u32 extra = 0;
int ret = 0;
u32 total_len;
- u8 hlid;
bool is_dummy;
bool is_gem = false;
@@ -359,9 +359,13 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return -EINVAL;
}
+ if (hlid == WL12XX_INVALID_LINK_ID) {
+ wl1271_error("invalid hlid. dropping skb 0x%p", skb);
+ return -EINVAL;
+ }
+
info = IEEE80211_SKB_CB(skb);
- /* TODO: handle dummy packets on multi-vifs */
is_dummy = wl12xx_is_dummy_packet(wl, skb);
if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
@@ -386,11 +390,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
}
- hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
- if (hlid == WL12XX_INVALID_LINK_ID) {
- wl1271_error("invalid hlid. dropping skb 0x%p", skb);
- return -EINVAL;
- }
ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
is_gem);
@@ -517,7 +516,8 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
}
static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+ struct wl12xx_vif *wlvif,
+ u8 *hlid)
{
struct sk_buff *skb = NULL;
int i, h, start_hlid;
@@ -544,10 +544,11 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
if (!skb)
wlvif->last_tx_hlid = 0;
+ *hlid = wlvif->last_tx_hlid;
return skb;
}
-static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
{
unsigned long flags;
struct wl12xx_vif *wlvif = wl->last_wlvif;
@@ -556,7 +557,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
/* continue from last wlvif (round robin) */
if (wlvif) {
wl12xx_for_each_wlvif_continue(wl, wlvif) {
- skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
if (skb) {
wl->last_wlvif = wlvif;
break;
@@ -565,13 +566,15 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
}
/* dequeue from the system HLID before the restarting wlvif list */
- if (!skb)
+ if (!skb) {
skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
+ *hlid = wl->system_hlid;
+ }
/* do a new pass over the wlvif list */
if (!skb) {
wl12xx_for_each_wlvif(wl, wlvif) {
- skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid);
if (skb) {
wl->last_wlvif = wlvif;
break;
@@ -591,6 +594,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
int q;
skb = wl->dummy_packet;
+ *hlid = wl->system_hlid;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
@@ -602,7 +606,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
}
static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct sk_buff *skb)
+ struct sk_buff *skb, u8 hlid)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -610,7 +614,6 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (wl12xx_is_dummy_packet(wl, skb)) {
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
} else {
- u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
@@ -686,26 +689,30 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
int ret = 0;
int bus_ret = 0;
+ u8 hlid;
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state != WLCORE_STATE_ON))
return 0;
- while ((skb = wl1271_skb_dequeue(wl))) {
+ while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool has_data = false;
wlvif = NULL;
if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
wlvif = wl12xx_vif_to_data(info->control.vif);
+ else
+ hlid = wl->system_hlid;
has_data = wlvif && wl1271_tx_is_data_present(skb);
- ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
+ ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
+ hlid);
if (ret == -EAGAIN) {
/*
* Aggregation buffer is full.
* Flush buffer and try again.
*/
- wl1271_skb_queue_head(wl, wlvif, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb, hlid);
buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
last_len);
@@ -722,7 +729,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
* Firmware buffer is full.
* Queue back last skb, and stop aggregating.
*/
- wl1271_skb_queue_head(wl, wlvif, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb, hlid);
/* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack;
@@ -732,7 +739,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
* fw still expects dummy packet,
* so re-enqueue it
*/
- wl1271_skb_queue_head(wl, wlvif, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb, hlid);
else
ieee80211_free_txskb(wl->hw, skb);
goto out_ack;
@@ -1069,39 +1076,54 @@ void wl12xx_tx_reset(struct wl1271 *wl)
/* caller must *NOT* hold wl->mutex */
void wl1271_tx_flush(struct wl1271 *wl)
{
- unsigned long timeout;
+ unsigned long timeout, start_time;
int i;
- timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
+ start_time = jiffies;
+ timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
/* only one flush should be in progress, for consistent queue state */
mutex_lock(&wl->flush_mutex);
+ mutex_lock(&wl->mutex);
+ if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
+ mutex_unlock(&wl->mutex);
+ goto out;
+ }
+
wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
while (!time_after(jiffies, timeout)) {
- mutex_lock(&wl->mutex);
- wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
+ wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
wl->tx_frames_cnt,
wl1271_tx_total_queue_count(wl));
+
+ /* force Tx and give the driver some time to flush data */
+ mutex_unlock(&wl->mutex);
+ if (wl1271_tx_total_queue_count(wl))
+ wl1271_tx_work(&wl->tx_work);
+ msleep(20);
+ mutex_lock(&wl->mutex);
+
if ((wl->tx_frames_cnt == 0) &&
(wl1271_tx_total_queue_count(wl) == 0)) {
- mutex_unlock(&wl->mutex);
- goto out;
+ wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
+ jiffies_to_msecs(jiffies - start_time));
+ goto out_wake;
}
- mutex_unlock(&wl->mutex);
- msleep(1);
}
- wl1271_warning("Unable to flush all TX buffers, timed out.");
+ wl1271_warning("Unable to flush all TX buffers, "
+ "timed out (timeout %d ms",
+ WL1271_TX_FLUSH_TIMEOUT / 1000);
/* forcibly flush all Tx buffers on our queues */
- mutex_lock(&wl->mutex);
for (i = 0; i < WL12XX_MAX_LINKS; i++)
wl1271_tx_reset_link_queues(wl, i);
- mutex_unlock(&wl->mutex);
-out:
+out_wake:
wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
+ mutex_unlock(&wl->mutex);
+out:
mutex_unlock(&wl->flush_mutex);
}
EXPORT_SYMBOL_GPL(wl1271_tx_flush);
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 1e939b016155..349520d8b724 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -243,10 +243,8 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
enum ieee80211_band rate_band);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct sk_buff *skb);
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- struct sk_buff *skb);
+ struct sk_buff *skb, struct ieee80211_sta *sta);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0ce7a8ebbd46..68584aa0f2b0 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -31,12 +31,19 @@
/* The maximum number of Tx descriptors in all chip families */
#define WLCORE_MAX_TX_DESCRIPTORS 32
+/*
+ * We always allocate this number of mac addresses. If we don't
+ * have enough allocated addresses, the LAA bit is used
+ */
+#define WLCORE_NUM_MAC_ADDRESSES 3
+
/* forward declaration */
struct wl1271_tx_hw_descr;
enum wl_rx_buf_align;
struct wl1271_rx_descriptor;
struct wlcore_ops {
+ int (*setup)(struct wl1271 *wl);
int (*identify_chip)(struct wl1271 *wl);
int (*identify_fw)(struct wl1271 *wl);
int (*boot)(struct wl1271 *wl);
@@ -139,10 +146,12 @@ struct wl1271_stats {
};
struct wl1271 {
+ bool initialized;
struct ieee80211_hw *hw;
bool mac80211_registered;
struct device *dev;
+ struct platform_device *pdev;
void *if_priv;
@@ -153,7 +162,7 @@ struct wl1271 {
spinlock_t wl_lock;
- enum wl1271_state state;
+ enum wlcore_state state;
enum wl12xx_fw_type fw_type;
bool plt;
enum plt_mode plt_mode;
@@ -181,7 +190,7 @@ struct wl1271 {
u32 fuse_nic_addr;
/* we have up to 2 MAC addresses */
- struct mac_address addresses[2];
+ struct mac_address addresses[WLCORE_NUM_MAC_ADDRESSES];
int channel;
u8 system_hlid;
@@ -190,6 +199,8 @@ struct wl1271 {
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long rate_policies_map[
BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+ unsigned long klv_templates_map[
+ BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
struct list_head wlvif_list;
@@ -237,6 +248,7 @@ struct wl1271 {
/* Intermediate buffer, used for packet aggregation */
u8 *aggr_buf;
+ u32 aggr_buf_size;
/* Reusable dummy packet template */
struct sk_buff *dummy_packet;
@@ -393,13 +405,18 @@ struct wl1271 {
/* sleep auth value currently configured to FW */
int sleep_auth;
+ /* the number of allocated MAC addresses in this chip */
+ int num_mac_addr;
+
/* the minimum FW version required for the driver to work */
unsigned int min_fw_ver[NUM_FW_VER];
+
+ struct completion nvs_loading_complete;
};
int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
int __devexit wlcore_remove(struct platform_device *pdev);
-struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size);
int wlcore_free_hw(struct wl1271 *wl);
int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index c0505635bb00..6678d4b18611 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -66,6 +66,7 @@
#define WLCORE_NUM_BANDS 2
#define WL12XX_MAX_RATE_POLICIES 16
+#define WLCORE_MAX_KLV_TEMPLATES 4
/* Defined by FW as 0. Will not be freed or allocated. */
#define WL12XX_SYSTEM_HLID 0
@@ -83,11 +84,10 @@
#define WL1271_AP_BSS_INDEX 0
#define WL1271_AP_DEF_BEACON_EXP 20
-#define WL1271_AGGR_BUFFER_SIZE (5 * PAGE_SIZE)
-
-enum wl1271_state {
- WL1271_STATE_OFF,
- WL1271_STATE_ON,
+enum wlcore_state {
+ WLCORE_STATE_OFF,
+ WLCORE_STATE_RESTARTING,
+ WLCORE_STATE_ON,
};
enum wl12xx_fw_type {
@@ -124,6 +124,7 @@ struct wl1271_chip {
u32 id;
char fw_ver_str[ETHTOOL_BUSINFO_LEN];
unsigned int fw_ver[NUM_FW_VER];
+ char phy_fw_ver_str[ETHTOOL_BUSINFO_LEN];
};
#define NUM_TX_QUEUES 4
@@ -337,6 +338,8 @@ struct wl12xx_vif {
u8 ap_rate_idx;
u8 p2p_rate_idx;
+ u8 klv_template_id;
+
bool qos;
} sta;
struct {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 00f6e69c1dcd..730186d0449b 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1520,13 +1520,12 @@ static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct wl3501_card *this = netdev_priv(dev);
- static const u8 bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 };
int rc = -EINVAL;
/* FIXME: we support other ARPHRDs...*/
if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
goto out;
- if (!memcmp(bcast, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+ if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data)) {
/* FIXME: rescan? */
} else
memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index c9e2660e1263..114364b5d466 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -937,7 +937,9 @@ static int fill_ctrlset(struct zd_mac *mac,
* control block of the skbuff will be initialized. If necessary the incoming
* mac80211 queues will be stopped.
*/
-static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void zd_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1176,7 +1178,7 @@ static void zd_beacon_done(struct zd_mac *mac)
skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
if (!skb)
break;
- zd_op_tx(mac->hw, skb);
+ zd_op_tx(mac->hw, NULL, skb);
}
/*
@@ -1399,7 +1401,8 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SIGNAL_UNSPEC |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_MFP_CAPABLE;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_MESH_POINT) |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index af83c43bcdb1..ef2b171e3514 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1164,8 +1164,7 @@ void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
- cancel_delayed_work(&rx->idle_work);
- queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
+ mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
}
static inline void init_usb_interrupt(struct zd_usb *usb)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 682633bfe00f..05593d882023 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -635,9 +635,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
return;
BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
- ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
- npo.copy_prod);
- BUG_ON(ret != 0);
+ gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
sco = (struct skb_cb_overlay *)skb->cb;
@@ -1460,18 +1458,15 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
static void xen_netbk_tx_action(struct xen_netbk *netbk)
{
unsigned nr_gops;
- int ret;
nr_gops = xen_netbk_tx_build_gops(netbk);
if (nr_gops == 0)
return;
- ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
- netbk->tx_copy_ops, nr_gops);
- BUG_ON(ret);
- xen_netbk_tx_submit(netbk);
+ gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
+ xen_netbk_tx_submit(netbk);
}
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 650f79a1f2bd..c934fe8583f5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1712,7 +1712,7 @@ static void netback_changed(struct xenbus_device *dev,
break;
case XenbusStateConnected:
- netif_notify_peers(netdev);
+ netdev_notify_peers(netdev);
break;
case XenbusStateClosing: