diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-07-23 11:20:10 +0200 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-07-23 11:20:10 +0200 |
commit | 39fe5434cb9de5da40510028b17b96bc4eb312b3 (patch) | |
tree | 7a02a317b9ad57da51ca99887c119e779ccf3f13 /drivers/net | |
parent | [JFFS2] Add declaration of jffs2_lzo_{init,exit} to compr.h (diff) | |
parent | Linux 2.6.23-rc1 (diff) | |
download | linux-39fe5434cb9de5da40510028b17b96bc4eb312b3.tar.xz linux-39fe5434cb9de5da40510028b17b96bc4eb312b3.zip |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/net')
179 files changed, 11507 insertions, 4088 deletions
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index da1a22c13865..ab18343e58ef 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c @@ -990,7 +990,7 @@ static void elmc_rcv_int(struct net_device *dev) if (skb != NULL) { skb_reserve(skb, 2); /* 16 byte alignment */ skb_put(skb,totlen); - eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0); + skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 0877fc372f4b..e89ace109a5d 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c @@ -333,9 +333,9 @@ static int lance_rx (struct net_device *dev) skb_reserve (skb, 2); /* 16 byte align */ skb_put (skb, len); /* make room */ - eth_copy_and_sum(skb, + skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), - len, 0); + len); skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->last_rx = jiffies; diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 58bbc3e6d0de..e970e64bf966 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -26,7 +26,6 @@ TODO: * Test Tx checksumming thoroughly - * Implement dev->tx_timeout Low priority TODO: * Complete reset on PciErr @@ -1218,6 +1217,30 @@ static int cp_close (struct net_device *dev) return 0; } +static void cp_tx_timeout(struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + int rc; + + printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n", + dev->name, cpr8(Cmd), cpr16(CpCmd), + cpr16(IntrStatus), cpr16(IntrMask)); + + spin_lock_irqsave(&cp->lock, flags); + + cp_stop_hw(cp); + cp_clean_rings(cp); + rc = cp_init_rings(cp); + cp_start_hw(cp); + + netif_wake_queue(dev); + + spin_unlock_irqrestore(&cp->lock, flags); + + return; +} + #ifdef BROKEN static int cp_change_mtu(struct net_device *dev, int new_mtu) { @@ -1799,7 +1822,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *regs; resource_size_t pciaddr; unsigned int addr_len, i, pci_using_dac; - u8 pci_rev; #ifndef MODULE static int version_printed; @@ -1807,13 +1829,11 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) printk("%s", version); #endif - pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev); - if (pdev->vendor == PCI_VENDOR_ID_REALTEK && - pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { dev_err(&pdev->dev, "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", - pdev->vendor, pdev->device, pci_rev); + pdev->vendor, pdev->device, pdev->revision); dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n"); return -ENODEV; } @@ -1923,10 +1943,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev->change_mtu = cp_change_mtu; #endif dev->ethtool_ops = &cp_ethtool_ops; -#if 0 dev->tx_timeout = cp_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; -#endif #if CP_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index a844b1fe2dc4..327eaa7b4999 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -931,7 +931,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, int i, addr_len, option; void __iomem *ioaddr; static int board_idx = -1; - u8 pci_rev; assert (pdev != NULL); assert (ent != NULL); @@ -949,13 +948,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, } #endif - pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev); - if (pdev->vendor == PCI_VENDOR_ID_REALTEK && - pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) { + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) { dev_info(&pdev->dev, "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n", - pdev->vendor, pdev->device, pci_rev); + pdev->vendor, pdev->device, pdev->revision); dev_info(&pdev->dev, "Use the \"8139cp\" driver for improved performance and stability.\n"); } @@ -2017,7 +2014,7 @@ no_early_rx: #if RX_BUF_IDX == 3 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); #else - eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); + skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size); #endif skb_put (skb, pkt_size); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b941c74a06c4..f8a602caabcb 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -5,6 +5,7 @@ menuconfig NETDEVICES default y if UML + depends on NET bool "Network device support" ---help--- You can say N here if you don't intend to connect your Linux box to @@ -25,6 +26,14 @@ menuconfig NETDEVICES # that for each of the symbols. if NETDEVICES +config NETDEVICES_MULTIQUEUE + bool "Netdevice multiple hardware queue support" + ---help--- + Say Y here if you want to allow the network stack to use multiple + hardware TX queues on an ethernet device. + + Most people will say N here. + config IFB tristate "Intermediate Functional Block support" depends on NET_CLS_ACT @@ -74,6 +83,16 @@ config BONDING To compile this driver as a module, choose M here: the module will be called bonding. +config MACVLAN + tristate "MAC-VLAN support (EXPERIMENTAL)" + depends on EXPERIMENTAL + ---help--- + This allows one to create virtual interfaces that map packets to + or from specific MAC addresses to a particular interface. + + To compile this driver as a module, choose M here: the module + will be called macvlan. + config EQUALIZER tristate "EQL (serial line load balancing) support" ---help--- @@ -187,7 +206,7 @@ config MII config MACB tristate "Atmel MACB support" depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 - select MII + select PHYLIB help The Atmel MACB ethernet interface is found on many AT32 and AT91 parts. Say Y to include support for the MACB chip. @@ -387,22 +406,6 @@ config ATARILANCE on the AMD Lance chipset: RieblCard (with or without battery), or PAMCard VME (also the version by Rhotron, with different addresses). -config ATARI_BIONET - tristate "BioNet-100 support" - depends on ATARI && ATARI_ACSI && BROKEN - help - Say Y to include support for BioData's BioNet-100 Ethernet adapter - for the ACSI port. The driver works (has to work...) with a polled - I/O scheme, so it's rather slow :-( - -config ATARI_PAMSNET - tristate "PAMsNet support" - depends on ATARI && ATARI_ACSI && BROKEN - help - Say Y to include support for the PAMsNet Ethernet adapter for the - ACSI port ("ACSI node"). The driver works (has to work...) with a - polled I/O scheme, so it's rather slow :-( - config SUN3LANCE tristate "Sun3/Sun3x on-board LANCE support" depends on SUN3 || SUN3X @@ -586,6 +589,12 @@ config CASSINI Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf> +config SUNVNET + tristate "Sun Virtual Network support" + depends on SUN_LDOMS + help + Support for virtual network devices under Sun Logical Domains. + config NET_VENDOR_3COM bool "3COM cards" depends on ISA || EISA || MCA || PCI @@ -830,6 +839,50 @@ config ULTRA32 <file:Documentation/networking/net-modules.txt>. The module will be called smc-ultra32. +config BFIN_MAC + tristate "Blackfin 536/537 on-chip mac support" + depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H) + select CRC32 + select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE + help + This is the driver for blackfin on-chip mac device. Say Y if you want it + compiled into the kernel. This driver is also available as a module + ( = code which can be inserted in and removed from the running kernel + whenever you want). The module will be called bfin_mac. + +config BFIN_MAC_USE_L1 + bool "Use L1 memory for rx/tx packets" + depends on BFIN_MAC && BF537 + default y + help + To get maximum network performace, you should use L1 memory as rx/tx buffers. + Say N here if you want to reserve L1 memory for other uses. + +config BFIN_TX_DESC_NUM + int "Number of transmit buffer packets" + depends on BFIN_MAC + range 6 10 if BFIN_MAC_USE_L1 + range 10 100 + default "10" + help + Set the number of buffer packets used in driver. + +config BFIN_RX_DESC_NUM + int "Number of receive buffer packets" + depends on BFIN_MAC + range 20 100 if BFIN_MAC_USE_L1 + range 20 800 + default "20" + help + Set the number of buffer packets used in driver. + +config BFIN_MAC_RMII + bool "RMII PHY Interface (EXPERIMENTAL)" + depends on BFIN_MAC && EXPERIMENTAL + default n + help + Use Reduced PHY MII Interface + config SMC9194 tristate "SMC 9194 support" depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN) @@ -877,7 +930,7 @@ config NET_NETX config DM9000 tristate "DM9000 support" - depends on ARM || MIPS + depends on ARM || BLACKFIN || MIPS select CRC32 select MII ---help--- @@ -2478,6 +2531,18 @@ source "drivers/atm/Kconfig" source "drivers/s390/net/Kconfig" +config XEN_NETDEV_FRONTEND + tristate "Xen network device frontend driver" + depends on XEN + default y + help + The network device frontend driver allows the kernel to + access network devices exported exported by a virtual + machine containing a physical network device driver. The + frontend driver is intended for unprivileged guest domains; + if you are compiling a kernel for a Xen guest, you almost + certainly want to enable this. + config ISERIES_VETH tristate "iSeries Virtual Ethernet driver support" depends on PPC_ISERIES @@ -2784,6 +2849,19 @@ config PPPOATM which can lead to bad results if the ATM peer loses state and changes its encapsulation unilaterally. +config PPPOL2TP + tristate "PPP over L2TP (EXPERIMENTAL)" + depends on EXPERIMENTAL && PPP + help + Support for PPP-over-L2TP socket family. L2TP is a protocol + used by ISPs and enterprises to tunnel PPP traffic over UDP + tunnels. L2TP is replacing PPTP for VPN uses. + + This kernel component handles only L2TP data packets: a + userland daemon handles L2TP the control protocol (tunnel + and session setup). One such daemon is OpenL2TP + (http://openl2tp.sourceforge.net/). + config SLIP tristate "SLIP (serial line) support" ---help--- diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 1bbcbedad04a..336af0635df8 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_SUNBMAC) += sunbmac.o obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o obj-$(CONFIG_CASSINI) += cassini.o +obj-$(CONFIG_SUNVNET) += sunvnet.o obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o @@ -121,12 +122,16 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o obj-$(CONFIG_PPPOE) += pppox.o pppoe.o +obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o obj-$(CONFIG_SLIP) += slip.o obj-$(CONFIG_SLHC) += slhc.o +obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o + obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_IFB) += ifb.o +obj-$(CONFIG_MACVLAN) += macvlan.o obj-$(CONFIG_DE600) += de600.o obj-$(CONFIG_DE620) += de620.o obj-$(CONFIG_LANCE) += lance.o @@ -172,14 +177,13 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o obj-$(CONFIG_HPLANCE) += hplance.o 7990.o obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o obj-$(CONFIG_EQUALIZER) += eql.o +obj-$(CONFIG_LGUEST_GUEST) += lguest_net.o obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o obj-$(CONFIG_DECLANCE) += declance.o obj-$(CONFIG_ATARILANCE) += atarilance.o -obj-$(CONFIG_ATARI_BIONET) += atari_bionet.o -obj-$(CONFIG_ATARI_PAMSNET) += atari_pamsnet.o obj-$(CONFIG_A2065) += a2065.o obj-$(CONFIG_HYDRA) += hydra.o obj-$(CONFIG_ARIADNE) += ariadne.o @@ -197,6 +201,7 @@ obj-$(CONFIG_S2IO) += s2io.o obj-$(CONFIG_MYRI10GE) += myri10ge/ obj-$(CONFIG_SMC91X) += smc91x.o obj-$(CONFIG_SMC911X) += smc911x.o +obj-$(CONFIG_BFIN_MAC) += bfin_mac.o obj-$(CONFIG_DM9000) += dm9000.o obj-$(CONFIG_FEC_8XX) += fec_8xx/ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 1c3e293fbaf7..3b79c6cf21a3 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -75,8 +75,6 @@ extern struct net_device *atarilance_probe(int unit); extern struct net_device *sun3lance_probe(int unit); extern struct net_device *sun3_82586_probe(int unit); extern struct net_device *apne_probe(int unit); -extern struct net_device *bionet_probe(int unit); -extern struct net_device *pamsnet_probe(int unit); extern struct net_device *cs89x0_probe(int unit); extern struct net_device *hplance_probe(int unit); extern struct net_device *bagetlance_probe(int unit); @@ -264,12 +262,6 @@ static struct devprobe2 m68k_probes[] __initdata = { #ifdef CONFIG_APNE /* A1200 PCMCIA NE2000 */ {apne_probe, 0}, #endif -#ifdef CONFIG_ATARI_BIONET /* Atari Bionet Ethernet board */ - {bionet_probe, 0}, -#endif -#ifdef CONFIG_ATARI_PAMSNET /* Atari PAMsNet Ethernet board */ - {pamsnet_probe, 0}, -#endif #ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */ {mvme147lance_probe, 0}, #endif diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index 81d5a374042a..a45de6975bfe 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -322,9 +322,9 @@ static int lance_rx (struct net_device *dev) skb_reserve (skb, 2); /* 16 byte align */ skb_put (skb, len); /* make room */ - eth_copy_and_sum(skb, + skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), - len, 0); + len); skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->last_rx = jiffies; diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index a241ae7855a3..bc5a38a6705f 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c @@ -746,7 +746,7 @@ static int ariadne_rx(struct net_device *dev) skb_reserve(skb,2); /* 16 byte align */ skb_put(skb,pkt_len); /* Make room */ - eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0); + skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len); skb->protocol=eth_type_trans(skb,dev); #if 0 printk(KERN_DEBUG "RX pkt type 0x%04x from ", diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index 5bf2d33887ac..f9cc2b621fe2 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig @@ -43,6 +43,7 @@ config ARM_AT91_ETHER config EP93XX_ETH tristate "EP93xx Ethernet support" depends on ARM && ARCH_EP93XX + select MII help This is a driver for the ethernet hardware included in EP93xx CPUs. Say Y if you are building a kernel for EP93xx based devices. diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 2438c5bff237..f6ece1d43f6e 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -258,7 +258,7 @@ static int ep93xx_rx(struct net_device *dev, int *budget) skb_reserve(skb, 2); dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, length, DMA_FROM_DEVICE); - eth_copy_and_sum(skb, ep->rx_buf[entry], length, 0); + skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); skb_put(skb, length); skb->protocol = eth_type_trans(skb, dev); diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c index f21148e7b579..80f33b6d5713 100644 --- a/drivers/net/arm/ether1.c +++ b/drivers/net/arm/ether1.c @@ -36,7 +36,6 @@ #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> -#include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> @@ -75,7 +74,7 @@ static void ether1_timeout(struct net_device *dev); /* ------------------------------------------------------------------------- */ -static char version[] __initdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; +static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; #define BUS_16 16 #define BUS_8 8 diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c index da713500654d..3805506a3ab8 100644 --- a/drivers/net/arm/ether3.c +++ b/drivers/net/arm/ether3.c @@ -51,7 +51,6 @@ #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> -#include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> @@ -69,7 +68,7 @@ #include <asm/ecard.h> #include <asm/io.h> -static char version[] __initdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; +static char version[] __devinitdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; #include "ether3.h" @@ -464,7 +463,7 @@ static void ether3_setmulticastlist(struct net_device *dev) if (dev->flags & IFF_PROMISC) { /* promiscuous mode */ priv(dev)->regs.config1 |= CFG1_RECVPROMISC; - } else if (dev->flags & IFF_ALLMULTI) { + } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) { priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; } else priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 769ba69451f4..0d37d9d1fd78 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c @@ -31,7 +31,6 @@ #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> -#include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> diff --git a/drivers/net/atari_bionet.c b/drivers/net/atari_bionet.c deleted file mode 100644 index 3d87bd2b4194..000000000000 --- a/drivers/net/atari_bionet.c +++ /dev/null @@ -1,675 +0,0 @@ -/* bionet.c BioNet-100 device driver for linux68k. - * - * Version: @(#)bionet.c 1.0 02/06/96 - * - * Author: Hartmut Laue <laue@ifk-mp.uni-kiel.de> - * and Torsten Narjes <narjes@ifk-mp.uni-kiel.de> - * - * Little adaptions for integration into pl7 by Roman Hodek - * - * Some changes in bionet_poll_rx by Karl-Heinz Lohner - * - What is it ? - ------------ - This driver controls the BIONET-100 LAN-Adapter which connects - an ATARI ST/TT via the ACSI-port to an Ethernet-based network. - - This version can be compiled as a loadable module (See the - compile command at the bottom of this file). - At load time, you can optionally set the debugging level and the - fastest response time on the command line of 'insmod'. - - 'bionet_debug' - controls the amount of diagnostic messages: - 0 : no messages - >0 : see code for meaning of printed messages - - 'bionet_min_poll_time' (always >=1) - gives the time (in jiffies) between polls. Low values - increase the system load (beware!) - - When loaded, a net device with the name 'bio0' becomes available, - which can be controlled with the usual 'ifconfig' command. - - It is possible to compile this driver into the kernel like other - (net) drivers. For this purpose, some source files (e.g. config-files - makefiles, Space.c) must be changed accordingly. (You may refer to - other drivers how to do it.) In this case, the device will be detected - at boot time and (probably) appear as 'eth0'. - - This code is based on several sources: - - The driver code for a parallel port ethernet adapter by - Donald Becker (see file 'atp.c' from the PC linux distribution) - - The ACSI code by Roman Hodek for the ATARI-ACSI harddisk support - and DMA handling. - - Very limited information about moving packets in and out of the - BIONET-adapter from the TCP package for TOS by BioData GmbH. - - Theory of Operation - ------------------- - Because the ATARI DMA port is usually shared between several - devices (eg. harddisk, floppy) we cannot block the ACSI bus - while waiting for interrupts. Therefore we use a polling mechanism - to fetch packets from the adapter. For the same reason, we send - packets without checking that the previous packet has been sent to - the LAN. We rely on the higher levels of the networking code to detect - missing packets and resend them. - - Before we access the ATARI DMA controller, we check if another - process is using the DMA. If not, we lock the DMA, perform one or - more packet transfers and unlock the DMA before returning. - We do not use 'stdma_lock' unconditionally because it is unclear - if the networking code can be set to sleep, which will happen if - another (possibly slow) device is using the DMA controller. - - The polling is done via timer interrupts which periodically - 'simulate' an interrupt from the Ethernet adapter. The time (in jiffies) - between polls varies depending on an estimate of the net activity. - The allowed range is given by the variable 'bionet_min_poll_time' - for the lower (fastest) limit and the constant 'MAX_POLL_TIME' - for the higher (slowest) limit. - - Whenever a packet arrives, we switch to fastest response by setting - the polling time to its lowest limit. If the following poll fails, - because no packets have arrived, we increase the time for the next - poll. When the net activity is low, the polling time effectively - stays at its maximum value, resulting in the lowest load for the - machine. - */ - -#define MAX_POLL_TIME 10 - -static char version[] = - "bionet.c:v1.0 06-feb-96 (c) Hartmut Laue.\n"; - -#include <linux/module.h> - -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/jiffies.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/delay.h> -#include <linux/timer.h> -#include <linux/init.h> -#include <linux/bitops.h> - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> - -#include <asm/setup.h> -#include <asm/pgtable.h> -#include <asm/system.h> -#include <asm/io.h> -#include <asm/dma.h> -#include <asm/atarihw.h> -#include <asm/atariints.h> -#include <asm/atari_acsi.h> -#include <asm/atari_stdma.h> - - -/* use 0 for production, 1 for verification, >2 for debug - */ -#ifndef NET_DEBUG -#define NET_DEBUG 0 -#endif -/* - * Global variable 'bionet_debug'. Can be set at load time by 'insmod' - */ -unsigned int bionet_debug = NET_DEBUG; -module_param(bionet_debug, int, 0); -MODULE_PARM_DESC(bionet_debug, "bionet debug level (0-2)"); -MODULE_LICENSE("GPL"); - -static unsigned int bionet_min_poll_time = 2; - - -/* Information that need to be kept for each board. - */ -struct net_local { - struct net_device_stats stats; - long open_time; /* for debugging */ - int poll_time; /* polling time varies with net load */ -}; - -static struct nic_pkt_s { /* packet format */ - unsigned char status; - unsigned char dummy; - unsigned char l_lo, l_hi; - unsigned char buffer[3000]; -} *nic_packet; -unsigned char *phys_nic_packet; - -/* Index to functions, as function prototypes. - */ -static int bionet_open(struct net_device *dev); -static int bionet_send_packet(struct sk_buff *skb, struct net_device *dev); -static void bionet_poll_rx(struct net_device *); -static int bionet_close(struct net_device *dev); -static struct net_device_stats *net_get_stats(struct net_device *dev); -static void bionet_tick(unsigned long); - -static DEFINE_TIMER(bionet_timer, bionet_tick, 0, 0); - -#define STRAM_ADDR(a) (((a) & 0xff000000) == 0) - -/* The following routines access the ethernet board connected to the - * ACSI port via the st_dma chip. - */ -#define NODE_ADR 0x60 - -#define C_READ 8 -#define C_WRITE 0x0a -#define C_GETEA 0x0f -#define C_SETCR 0x0e - -static int -sendcmd(unsigned int a0, unsigned int mod, unsigned int cmd) { - unsigned int c; - - dma_wd.dma_mode_status = (mod | ((a0) ? 2 : 0) | 0x88); - dma_wd.fdc_acces_seccount = cmd; - dma_wd.dma_mode_status = (mod | 0x8a); - - if( !acsi_wait_for_IRQ(HZ/2) ) /* wait for cmd ack */ - return -1; /* timeout */ - - c = dma_wd.fdc_acces_seccount; - return (c & 0xff); -} - - -static void -set_status(int cr) { - sendcmd(0,0x100,NODE_ADR | C_SETCR); /* CMD: SET CR */ - sendcmd(1,0x100,cr); - - dma_wd.dma_mode_status = 0x80; -} - -static int -get_status(unsigned char *adr) { - int i,c; - - DISABLE_IRQ(); - c = sendcmd(0,0x00,NODE_ADR | C_GETEA); /* CMD: GET ETH ADR*/ - if( c < 0 ) goto gsend; - - /* now read status bytes */ - - for (i=0; i<6; i++) { - dma_wd.fdc_acces_seccount = 0; /* request next byte */ - - if( !acsi_wait_for_IRQ(HZ/2) ) { /* wait for cmd ack */ - c = -1; - goto gsend; /* timeout */ - } - c = dma_wd.fdc_acces_seccount; - *adr++ = (unsigned char)c; - } - c = 1; -gsend: - dma_wd.dma_mode_status = 0x80; - return c; -} - -static irqreturn_t -bionet_intr(int irq, void *data) { - return IRQ_HANDLED; -} - - -static int -get_frame(unsigned long paddr, int odd) { - int c; - unsigned long flags; - - DISABLE_IRQ(); - local_irq_save(flags); - - dma_wd.dma_mode_status = 0x9a; - dma_wd.dma_mode_status = 0x19a; - dma_wd.dma_mode_status = 0x9a; - dma_wd.fdc_acces_seccount = 0x04; /* sector count (was 5) */ - dma_wd.dma_lo = (unsigned char)paddr; - paddr >>= 8; - dma_wd.dma_md = (unsigned char)paddr; - paddr >>= 8; - dma_wd.dma_hi = (unsigned char)paddr; - local_irq_restore(flags); - - c = sendcmd(0,0x00,NODE_ADR | C_READ); /* CMD: READ */ - if( c < 128 ) goto rend; - - /* now read block */ - - c = sendcmd(1,0x00,odd); /* odd flag for address shift */ - dma_wd.dma_mode_status = 0x0a; - - if( !acsi_wait_for_IRQ(100) ) { /* wait for DMA to complete */ - c = -1; - goto rend; - } - dma_wd.dma_mode_status = 0x8a; - dma_wd.dma_mode_status = 0x18a; - dma_wd.dma_mode_status = 0x8a; - c = dma_wd.fdc_acces_seccount; - - dma_wd.dma_mode_status = 0x88; - c = dma_wd.fdc_acces_seccount; - c = 1; - -rend: - dma_wd.dma_mode_status = 0x80; - udelay(40); - acsi_wait_for_noIRQ(20); - return c; -} - - -static int -hardware_send_packet(unsigned long paddr, int cnt) { - unsigned int c; - unsigned long flags; - - DISABLE_IRQ(); - local_irq_save(flags); - - dma_wd.dma_mode_status = 0x19a; - dma_wd.dma_mode_status = 0x9a; - dma_wd.dma_mode_status = 0x19a; - dma_wd.dma_lo = (unsigned char)paddr; - paddr >>= 8; - dma_wd.dma_md = (unsigned char)paddr; - paddr >>= 8; - dma_wd.dma_hi = (unsigned char)paddr; - - dma_wd.fdc_acces_seccount = 0x4; /* sector count */ - local_irq_restore(flags); - - c = sendcmd(0,0x100,NODE_ADR | C_WRITE); /* CMD: WRITE */ - c = sendcmd(1,0x100,cnt&0xff); - c = sendcmd(1,0x100,cnt>>8); - - /* now write block */ - - dma_wd.dma_mode_status = 0x10a; /* DMA enable */ - if( !acsi_wait_for_IRQ(100) ) /* wait for DMA to complete */ - goto end; - - dma_wd.dma_mode_status = 0x19a; /* DMA disable ! */ - c = dma_wd.fdc_acces_seccount; - -end: - c = sendcmd(1,0x100,0); - c = sendcmd(1,0x100,0); - - dma_wd.dma_mode_status = 0x180; - udelay(40); - acsi_wait_for_noIRQ(20); - return( c & 0x02); -} - - -/* Check for a network adaptor of this type, and return '0' if one exists. - */ -struct net_device * __init bionet_probe(int unit) -{ - struct net_device *dev; - unsigned char station_addr[6]; - static unsigned version_printed; - static int no_more_found; /* avoid "Probing for..." printed 4 times */ - int i; - int err; - - if (!MACH_IS_ATARI || no_more_found) - return ERR_PTR(-ENODEV); - - dev = alloc_etherdev(sizeof(struct net_local)); - if (!dev) - return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - SET_MODULE_OWNER(dev); - - printk("Probing for BioNet 100 Adapter...\n"); - - stdma_lock(bionet_intr, NULL); - i = get_status(station_addr); /* Read the station address PROM. */ - ENABLE_IRQ(); - stdma_release(); - - /* Check the first three octets of the S.A. for the manufactor's code. - */ - - if( i < 0 - || station_addr[0] != 'B' - || station_addr[1] != 'I' - || station_addr[2] != 'O' ) { - no_more_found = 1; - printk( "No BioNet 100 found.\n" ); - free_netdev(dev); - return ERR_PTR(-ENODEV); - } - - if (bionet_debug > 0 && version_printed++ == 0) - printk(version); - - printk("%s: %s found, eth-addr: %02x-%02x-%02x:%02x-%02x-%02x.\n", - dev->name, "BioNet 100", - station_addr[0], station_addr[1], station_addr[2], - station_addr[3], station_addr[4], station_addr[5]); - - /* Initialize the device structure. */ - - nic_packet = (struct nic_pkt_s *)acsi_buffer; - phys_nic_packet = (unsigned char *)phys_acsi_buffer; - if (bionet_debug > 0) { - printk("nic_packet at 0x%p, phys at 0x%p\n", - nic_packet, phys_nic_packet ); - } - - dev->open = bionet_open; - dev->stop = bionet_close; - dev->hard_start_xmit = bionet_send_packet; - dev->get_stats = net_get_stats; - - /* Fill in the fields of the device structure with ethernet-generic - * values. This should be in a common file instead of per-driver. - */ - - for (i = 0; i < ETH_ALEN; i++) { -#if 0 - dev->broadcast[i] = 0xff; -#endif - dev->dev_addr[i] = station_addr[i]; - } - err = register_netdev(dev); - if (!err) - return dev; - free_netdev(dev); - return ERR_PTR(err); -} - -/* Open/initialize the board. This is called (in the current kernel) - sometime after booting when the 'ifconfig' program is run. - - This routine should set everything up anew at each open, even - registers that "should" only need to be set once at boot, so that - there is non-reboot way to recover if something goes wrong. - */ -static int -bionet_open(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); - - if (bionet_debug > 0) - printk("bionet_open\n"); - stdma_lock(bionet_intr, NULL); - - /* Reset the hardware here. - */ - set_status(4); - lp->open_time = 0; /*jiffies*/ - lp->poll_time = MAX_POLL_TIME; - - dev->tbusy = 0; - dev->interrupt = 0; - dev->start = 1; - - stdma_release(); - bionet_timer.data = (long)dev; - bionet_timer.expires = jiffies + lp->poll_time; - add_timer(&bionet_timer); - return 0; -} - -static int -bionet_send_packet(struct sk_buff *skb, struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); - unsigned long flags; - - /* Block a timer-based transmit from overlapping. This could better be - * done with atomic_swap(1, dev->tbusy), but set_bit() works as well. - */ - local_irq_save(flags); - - if (stdma_islocked()) { - local_irq_restore(flags); - lp->stats.tx_errors++; - } - else { - int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - unsigned long buf = virt_to_phys(skb->data); - int stat; - - stdma_lock(bionet_intr, NULL); - local_irq_restore(flags); - if( !STRAM_ADDR(buf+length-1) ) { - skb_copy_from_linear_data(skb, nic_packet->buffer, - length); - buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer; - } - - if (bionet_debug >1) { - u_char *data = nic_packet->buffer, *p; - int i; - - printk( "%s: TX pkt type 0x%4x from ", dev->name, - ((u_short *)data)[6]); - - for( p = &data[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++,i != 5 ? ":" : "" ); - printk(" to "); - - for( p = data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++,i != 5 ? ":" : "" "\n" ); - - printk( "%s: ", dev->name ); - printk(" data %02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x" - " %02x%02x%02x%02x len %d\n", - data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19], - data[20], data[21], data[22], data[23], data[24], data[25], data[26], data[27], - data[28], data[29], data[30], data[31], data[32], data[33], - length ); - } - dma_cache_maintenance(buf, length, 1); - - stat = hardware_send_packet(buf, length); - ENABLE_IRQ(); - stdma_release(); - - dev->trans_start = jiffies; - dev->tbusy = 0; - lp->stats.tx_packets++; - lp->stats.tx_bytes+=length; - } - dev_kfree_skb(skb); - - return 0; -} - -/* We have a good packet(s), get it/them out of the buffers. - */ -static void -bionet_poll_rx(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); - int boguscount = 10; - int pkt_len, status; - unsigned long flags; - - local_irq_save(flags); - /* ++roman: Take care at locking the ST-DMA... This must be done with ints - * off, since otherwise an int could slip in between the question and the - * locking itself, and then we'd go to sleep... And locking itself is - * necessary to keep the floppy_change timer from working with ST-DMA - * registers. */ - if (stdma_islocked()) { - local_irq_restore(flags); - return; - } - stdma_lock(bionet_intr, NULL); - DISABLE_IRQ(); - local_irq_restore(flags); - - if( lp->poll_time < MAX_POLL_TIME ) lp->poll_time++; - - while(boguscount--) { - status = get_frame((unsigned long)phys_nic_packet, 0); - - if( status == 0 ) break; - - /* Good packet... */ - - dma_cache_maintenance((unsigned long)phys_nic_packet, 1520, 0); - - pkt_len = (nic_packet->l_hi << 8) | nic_packet->l_lo; - - lp->poll_time = bionet_min_poll_time; /* fast poll */ - if( pkt_len >= 60 && pkt_len <= 1520 ) { - /* ^^^^ war 1514 KHL */ - /* Malloc up new buffer. - */ - struct sk_buff *skb = dev_alloc_skb( pkt_len + 2 ); - if (skb == NULL) { - printk("%s: Memory squeeze, dropping packet.\n", - dev->name); - lp->stats.rx_dropped++; - break; - } - - skb_reserve( skb, 2 ); /* 16 Byte align */ - skb_put( skb, pkt_len ); /* make room */ - - /* 'skb->data' points to the start of sk_buff data area. - */ - skb_copy_to_linear_data(skb, nic_packet->buffer, - pkt_len); - skb->protocol = eth_type_trans( skb, dev ); - netif_rx(skb); - dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes+=pkt_len; - - /* If any worth-while packets have been received, dev_rint() - has done a mark_bh(INET_BH) for us and will work on them - when we get to the bottom-half routine. - */ - - if (bionet_debug >1) { - u_char *data = nic_packet->buffer, *p; - int i; - - printk( "%s: RX pkt type 0x%4x from ", dev->name, - ((u_short *)data)[6]); - - - for( p = &data[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++,i != 5 ? ":" : "" ); - printk(" to "); - for( p = data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++,i != 5 ? ":" : "" "\n" ); - - printk( "%s: ", dev->name ); - printk(" data %02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x" - " %02x%02x%02x%02x len %d\n", - data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19], - data[20], data[21], data[22], data[23], data[24], data[25], data[26], data[27], - data[28], data[29], data[30], data[31], data[32], data[33], - pkt_len ); - } - } - else { - printk(" Packet has wrong length: %04d bytes\n", pkt_len); - lp->stats.rx_errors++; - } - } - stdma_release(); - ENABLE_IRQ(); - return; -} - -/* bionet_tick: called by bionet_timer. Reads packets from the adapter, - * passes them to the higher layers and restarts the timer. - */ -static void -bionet_tick(unsigned long data) { - struct net_device *dev = (struct net_device *)data; - struct net_local *lp = netdev_priv(dev); - - if( bionet_debug > 0 && (lp->open_time++ & 7) == 8 ) - printk("bionet_tick: %ld\n", lp->open_time); - - if( !stdma_islocked() ) bionet_poll_rx(dev); - - bionet_timer.expires = jiffies + lp->poll_time; - add_timer(&bionet_timer); -} - -/* The inverse routine to bionet_open(). - */ -static int -bionet_close(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); - - if (bionet_debug > 0) - printk("bionet_close, open_time=%ld\n", lp->open_time); - del_timer(&bionet_timer); - stdma_lock(bionet_intr, NULL); - - set_status(0); - lp->open_time = 0; - - dev->tbusy = 1; - dev->start = 0; - - stdma_release(); - return 0; -} - -/* Get the current statistics. - This may be called with the card open or closed. - */ -static struct net_device_stats *net_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - return &lp->stats; -} - - -#ifdef MODULE - -static struct net_device *bio_dev; - -int init_module(void) -{ - bio_dev = bionet_probe(-1); - if (IS_ERR(bio_dev)) - return PTR_ERR(bio_dev); - return 0; -} - -void cleanup_module(void) -{ - unregister_netdev(bio_dev); - free_netdev(bio_dev); -} - -#endif /* MODULE */ - -/* Local variables: - * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include - -b m68k-linuxaout -Wall -Wstrict-prototypes -O2 - -fomit-frame-pointer -pipe -DMODULE -I../../net/inet -c bionet.c" - * version-control: t - * kept-new-versions: 5 - * tab-width: 8 - * End: - */ diff --git a/drivers/net/atari_pamsnet.c b/drivers/net/atari_pamsnet.c deleted file mode 100644 index f7356374a2e7..000000000000 --- a/drivers/net/atari_pamsnet.c +++ /dev/null @@ -1,878 +0,0 @@ -/* atari_pamsnet.c PAMsNet device driver for linux68k. - * - * Version: @(#)PAMsNet.c 0.2ß 03/31/96 - * - * Author: Torsten Lang <Torsten.Lang@ap.physik.uni-giessen.de> - * <Torsten.Lang@jung.de> - * - * This driver is based on my driver PAMSDMA.c for MiNT-Net and - * on the driver bionet.c written by - * Hartmut Laue <laue@ifk-mp.uni-kiel.de> - * and Torsten Narjes <narjes@ifk-mp.uni-kiel.de> - * - * Little adaptions for integration into pl7 by Roman Hodek - * - What is it ? - ------------ - This driver controls the PAMsNet LAN-Adapter which connects - an ATARI ST/TT via the ACSI-port to an Ethernet-based network. - - This version can be compiled as a loadable module (See the - compile command at the bottom of this file). - At load time, you can optionally set the debugging level and the - fastest response time on the command line of 'insmod'. - - 'pamsnet_debug' - controls the amount of diagnostic messages: - 0 : no messages - >0 : see code for meaning of printed messages - - 'pamsnet_min_poll_time' (always >=1) - gives the time (in jiffies) between polls. Low values - increase the system load (beware!) - - When loaded, a net device with the name 'eth?' becomes available, - which can be controlled with the usual 'ifconfig' command. - - It is possible to compile this driver into the kernel like other - (net) drivers. For this purpose, some source files (e.g. config-files - makefiles, Space.c) must be changed accordingly. (You may refer to - other drivers how to do it.) In this case, the device will be detected - at boot time and (probably) appear as 'eth0'. - - Theory of Operation - ------------------- - Because the ATARI DMA port is usually shared between several - devices (eg. harddisk, floppy) we cannot block the ACSI bus - while waiting for interrupts. Therefore we use a polling mechanism - to fetch packets from the adapter. For the same reason, we send - packets without checking that the previous packet has been sent to - the LAN. We rely on the higher levels of the networking code to detect - missing packets and resend them. - - Before we access the ATARI DMA controller, we check if another - process is using the DMA. If not, we lock the DMA, perform one or - more packet transfers and unlock the DMA before returning. - We do not use 'stdma_lock' unconditionally because it is unclear - if the networking code can be set to sleep, which will happen if - another (possibly slow) device is using the DMA controller. - - The polling is done via timer interrupts which periodically - 'simulate' an interrupt from the Ethernet adapter. The time (in jiffies) - between polls varies depending on an estimate of the net activity. - The allowed range is given by the variable 'bionet_min_poll_time' - for the lower (fastest) limit and the constant 'MAX_POLL_TIME' - for the higher (slowest) limit. - - Whenever a packet arrives, we switch to fastest response by setting - the polling time to its lowest limit. If the following poll fails, - because no packets have arrived, we increase the time for the next - poll. When the net activity is low, the polling time effectively - stays at its maximum value, resulting in the lowest load for the - machine. - */ - -#define MAX_POLL_TIME 10 - -static char *version = - "pamsnet.c:v0.2beta 30-mar-96 (c) Torsten Lang.\n"; - -#include <linux/module.h> - -#include <linux/kernel.h> -#include <linux/jiffies.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/bitops.h> -#include <asm/system.h> -#include <asm/pgtable.h> -#include <asm/io.h> -#include <asm/dma.h> -#include <linux/errno.h> -#include <asm/atarihw.h> -#include <asm/atariints.h> -#include <asm/atari_stdma.h> -#include <asm/atari_acsi.h> - -#include <linux/delay.h> -#include <linux/timer.h> -#include <linux/init.h> - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> - -#undef READ -#undef WRITE - -/* use 0 for production, 1 for verification, >2 for debug - */ -#ifndef NET_DEBUG -#define NET_DEBUG 0 -#endif -/* - * Global variable 'pamsnet_debug'. Can be set at load time by 'insmod' - */ -unsigned int pamsnet_debug = NET_DEBUG; -module_param(pamsnet_debug, int, 0); -MODULE_PARM_DESC(pamsnet_debug, "pamsnet debug enable (0-1)"); -MODULE_LICENSE("GPL"); - -static unsigned int pamsnet_min_poll_time = 2; - - -/* Information that need to be kept for each board. - */ -struct net_local { - struct net_device_stats stats; - long open_time; /* for debugging */ - int poll_time; /* polling time varies with net load */ -}; - -static struct nic_pkt_s { /* packet format */ - unsigned char buffer[2048]; -} *nic_packet = 0; -unsigned char *phys_nic_packet; - -typedef unsigned char HADDR[6]; /* 6-byte hardware address of lance */ - -/* Index to functions, as function prototypes. - */ -static void start (int target); -static int stop (int target); -static int testpkt (int target); -static int sendpkt (int target, unsigned char *buffer, int length); -static int receivepkt (int target, unsigned char *buffer); -static int inquiry (int target, unsigned char *buffer); -static HADDR *read_hw_addr(int target, unsigned char *buffer); -static void setup_dma (void *address, unsigned rw_flag, int num_blocks); -static int send_first (int target, unsigned char byte); -static int send_1_5 (int lun, unsigned char *command, int dma); -static int get_status (void); -static int calc_received (void *start_address); - -static int pamsnet_open(struct net_device *dev); -static int pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev); -static void pamsnet_poll_rx(struct net_device *); -static int pamsnet_close(struct net_device *dev); -static struct net_device_stats *net_get_stats(struct net_device *dev); -static void pamsnet_tick(unsigned long); - -static irqreturn_t pamsnet_intr(int irq, void *data); - -static DEFINE_TIMER(pamsnet_timer, pamsnet_tick, 0, 0); - -#define STRAM_ADDR(a) (((a) & 0xff000000) == 0) - -typedef struct -{ - unsigned char reserved1[0x38]; - HADDR hwaddr; - unsigned char reserved2[0x1c2]; -} DMAHWADDR; - -/* - * Definitions of commands understood by the PAMs DMA adaptor. - * - * In general the DMA adaptor uses LUN 0, 5, 6 and 7 on one ID changeable - * by the PAM's Net software. - * - * LUN 0 works as a harddisk. You can boot the PAM's Net driver there. - * LUN 5 works as a harddisk and lets you access the RAM and some I/O HW - * area. In sector 0, bytes 0x38-0x3d you find the ethernet HW address - * of the adaptor. - * LUN 6 works as a harddisk and lets you access the firmware ROM. - * LUN 7 lets you send and receive packets. - * - * Some commands like the INQUIRY command work identical on all used LUNs. - * - * UNKNOWN1 seems to read some data. - * Command length is 6 bytes. - * UNKNOWN2 seems to read some data (command byte 1 must be !=0). The - * following bytes seem to be something like an allocation length. - * Command length is 6 bytes. - * READPKT reads a packet received by the DMA adaptor. - * Command length is 6 bytes. - * WRITEPKT sends a packet transferred by the following DMA phase. The length - * of the packet is transferred in command bytes 3 and 4. - * The adaptor automatically replaces the src hw address in an ethernet - * packet by its own hw address. - * Command length is 6 bytes. - * INQUIRY has the same function as the INQUIRY command supported by harddisks - * and other SCSI devices. It lets you detect which device you found - * at a given address. - * Command length is 6 bytes. - * START initializes the DMA adaptor. After this command it is able to send - * and receive packets. There is no status byte returned! - * Command length is 1 byte. - * NUMPKTS gives back the number of received packets waiting in the queue in - * the status byte. - * Command length is 1 byte. - * UNKNOWN3 - * UNKNOWN4 Function of these three commands is unknown. - * UNKNOWN5 The command length of these three commands is 1 byte. - * DESELECT immediately deselects the DMA adaptor. May important with interrupt - * driven operation. - * Command length is 1 byte. - * STOP resets the DMA adaptor. After this command packets can no longer - * be received or transferred. - * Command length is 6 byte. - */ - -enum {UNKNOWN1=3, READPKT=8, UNKNOWN2, WRITEPKT=10, INQUIRY=18, START, - NUMPKTS=22, UNKNOWN3, UNKNOWN4, UNKNOWN5, DESELECT, STOP}; - -#define READSECTOR READPKT -#define WRITESECTOR WRITEPKT - -u_char *inquire8="MV PAM's NET/GK"; - -#define DMALOW dma_wd.dma_lo -#define DMAMID dma_wd.dma_md -#define DMAHIGH dma_wd.dma_hi -#define DACCESS dma_wd.fdc_acces_seccount - -#define MFP_GPIP mfp.par_dt_reg - -/* Some useful functions */ - -#define INT (!(MFP_GPIP & 0x20)) -#define DELAY ({MFP_GPIP; MFP_GPIP; MFP_GPIP;}) -#define WRITEMODE(value) \ - ({ u_short dummy = value; \ - __asm__ volatile("movew %0, 0xFFFF8606" : : "d"(dummy)); \ - DELAY; \ - }) -#define WRITEBOTH(value1, value2) \ - ({ u_long dummy = (u_long)(value1)<<16 | (u_short)(value2); \ - __asm__ volatile("movel %0, 0xFFFF8604" : : "d"(dummy)); \ - DELAY; \ - }) - -/* Definitions for DMODE */ - -#define READ 0x000 -#define WRITE 0x100 - -#define DMA_FDC 0x080 -#define DMA_ACSI 0x000 - -#define DMA_DISABLE 0x040 - -#define SEC_COUNT 0x010 -#define DMA_WINDOW 0x000 - -#define REG_ACSI 0x008 -#define REG_FDC 0x000 - -#define A1 0x002 - -/* Timeout constants */ - -#define TIMEOUTCMD HZ/2 /* ca. 500ms */ -#define TIMEOUTDMA HZ /* ca. 1s */ -#define COMMAND_DELAY 500 /* ca. 0.5ms */ - -unsigned rw; -int lance_target = -1; -int if_up = 0; - -/* The following routines access the ethernet board connected to the - * ACSI port via the st_dma chip. - */ - -/* The following lowlevel routines work on physical addresses only and assume - * that eventually needed buffers are - * - completely located in ST RAM - * - are contigous in the physical address space - */ - -/* Setup the DMA counter */ - -static void -setup_dma (void *address, unsigned rw_flag, int num_blocks) -{ - WRITEMODE((unsigned) rw_flag | DMA_FDC | SEC_COUNT | REG_ACSI | - A1); - WRITEMODE((unsigned)(rw_flag ^ WRITE) | DMA_FDC | SEC_COUNT | REG_ACSI | - A1); - WRITEMODE((unsigned) rw_flag | DMA_FDC | SEC_COUNT | REG_ACSI | - A1); - DMALOW = (unsigned char)((unsigned long)address & 0xFF); - DMAMID = (unsigned char)(((unsigned long)address >> 8) & 0xFF); - DMAHIGH = (unsigned char)(((unsigned long)address >> 16) & 0xFF); - WRITEBOTH((unsigned)num_blocks & 0xFF, - rw_flag | DMA_FDC | DMA_WINDOW | REG_ACSI | A1); - rw = rw_flag; -} - -/* Send the first byte of an command block */ - -static int -send_first (int target, unsigned char byte) -{ - rw = READ; - acsi_delay_end(COMMAND_DELAY); - /* - * wake up ACSI - */ - WRITEMODE(DMA_FDC | DMA_WINDOW | REG_ACSI); - /* - * write command byte - */ - WRITEBOTH((target << 5) | (byte & 0x1F), DMA_FDC | - DMA_WINDOW | REG_ACSI | A1); - return (!acsi_wait_for_IRQ(TIMEOUTCMD)); -} - -/* Send the rest of an command block */ - -static int -send_1_5 (int lun, unsigned char *command, int dma) -{ - int i, j; - - for (i=0; i<5; i++) { - WRITEBOTH((!i ? (((lun & 0x7) << 5) | (command[i] & 0x1F)) - : command[i]), - rw | REG_ACSI | DMA_WINDOW | - ((i < 4) ? DMA_FDC - : (dma ? DMA_ACSI - : DMA_FDC)) | A1); - if (i < 4 && (j = !acsi_wait_for_IRQ(TIMEOUTCMD))) - return (j); - } - return (0); -} - -/* Read a status byte */ - -static int -get_status (void) -{ - WRITEMODE(DMA_FDC | DMA_WINDOW | REG_ACSI | A1); - acsi_delay_start(); - return ((int)(DACCESS & 0xFF)); -} - -/* Calculate the number of received bytes */ - -static int -calc_received (void *start_address) -{ - return (int)( - (((unsigned long)DMAHIGH << 16) | ((unsigned)DMAMID << 8) | DMALOW) - - (unsigned long)start_address); -} - -/* The following midlevel routines still work on physical addresses ... */ - -/* start() starts the PAM's DMA adaptor */ - -static void -start (int target) -{ - send_first(target, START); -} - -/* stop() stops the PAM's DMA adaptor and returns a value of zero in case of success */ - -static int -stop (int target) -{ - int ret = -1; - unsigned char cmd_buffer[5]; - - if (send_first(target, STOP)) - goto bad; - cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = - cmd_buffer[3] = cmd_buffer[4] = 0; - if (send_1_5(7, cmd_buffer, 0) || - !acsi_wait_for_IRQ(TIMEOUTDMA) || - get_status()) - goto bad; - ret = 0; -bad: - return (ret); -} - -/* testpkt() returns the number of received packets waiting in the queue */ - -static int -testpkt(int target) -{ - int ret = -1; - - if (send_first(target, NUMPKTS)) - goto bad; - ret = get_status(); -bad: - return (ret); -} - -/* inquiry() returns 0 when PAM's DMA found, -1 when timeout, -2 otherwise */ -/* Please note: The buffer is for internal use only but must be defined! */ - -static int -inquiry (int target, unsigned char *buffer) -{ - int ret = -1; - unsigned char *vbuffer = phys_to_virt((unsigned long)buffer); - unsigned char cmd_buffer[5]; - - if (send_first(target, INQUIRY)) - goto bad; - setup_dma(buffer, READ, 1); - vbuffer[8] = vbuffer[27] = 0; /* Avoid confusion with previous read data */ - cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0; - cmd_buffer[3] = 48; - if (send_1_5(5, cmd_buffer, 1) || - !acsi_wait_for_IRQ(TIMEOUTDMA) || - get_status() || - (calc_received(buffer) < 32)) - goto bad; - dma_cache_maintenance((unsigned long)(buffer+8), 20, 0); - if (memcmp(inquire8, vbuffer+8, 20)) - goto bad; - ret = 0; -bad: - if (!!NET_DEBUG) { - vbuffer[8+20]=0; - printk("inquiry of target %d: %s\n", target, vbuffer+8); - } - return (ret); -} - -/* - * read_hw_addr() reads the sector containing the hwaddr and returns - * a pointer to it (virtual address!) or 0 in case of an error - */ - -static HADDR -*read_hw_addr(int target, unsigned char *buffer) -{ - HADDR *ret = 0; - unsigned char cmd_buffer[5]; - - if (send_first(target, READSECTOR)) - goto bad; - setup_dma(buffer, READ, 1); - cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0; - cmd_buffer[3] = 1; - if (send_1_5(5, cmd_buffer, 1) || - !acsi_wait_for_IRQ(TIMEOUTDMA) || - get_status()) - goto bad; - ret = phys_to_virt((unsigned long)&(((DMAHWADDR *)buffer)->hwaddr)); - dma_cache_maintenance((unsigned long)buffer, 512, 0); -bad: - return (ret); -} - -static irqreturn_t -pamsnet_intr(int irq, void *data) -{ - return IRQ_HANDLED; -} - -/* receivepkt() loads a packet to a given buffer and returns its length */ - -static int -receivepkt (int target, unsigned char *buffer) -{ - int ret = -1; - unsigned char cmd_buffer[5]; - - if (send_first(target, READPKT)) - goto bad; - setup_dma(buffer, READ, 3); - cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0; - cmd_buffer[3] = 3; - if (send_1_5(7, cmd_buffer, 1) || - !acsi_wait_for_IRQ(TIMEOUTDMA) || - get_status()) - goto bad; - ret = calc_received(buffer); -bad: - return (ret); -} - -/* sendpkt() sends a packet and returns a value of zero when the packet was sent - successfully */ - -static int -sendpkt (int target, unsigned char *buffer, int length) -{ - int ret = -1; - unsigned char cmd_buffer[5]; - - if (send_first(target, WRITEPKT)) - goto bad; - setup_dma(buffer, WRITE, 3); - cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[4] = 0; - cmd_buffer[2] = length >> 8; - cmd_buffer[3] = length & 0xFF; - if (send_1_5(7, cmd_buffer, 1) || - !acsi_wait_for_IRQ(TIMEOUTDMA) || - get_status()) - goto bad; - ret = 0; -bad: - return (ret); -} - -/* The following higher level routines work on virtual addresses and convert them to - * physical addresses when passed to the lowlevel routines. It's up to the higher level - * routines to copy data from Alternate RAM to ST RAM if neccesary! - */ - -/* Check for a network adaptor of this type, and return '0' if one exists. - */ - -struct net_device * __init pamsnet_probe (int unit) -{ - struct net_device *dev; - int i; - HADDR *hwaddr; - int err; - - unsigned char station_addr[6]; - static unsigned version_printed; - /* avoid "Probing for..." printed 4 times - the driver is supporting only one adapter now! */ - static int no_more_found; - - if (no_more_found) - return ERR_PTR(-ENODEV); - no_more_found = 1; - - dev = alloc_etherdev(sizeof(struct net_local)); - if (!dev) - return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - SET_MODULE_OWNER(dev); - - printk("Probing for PAM's Net/GK Adapter...\n"); - - /* Allocate the DMA buffer here since we need it for probing! */ - - nic_packet = (struct nic_pkt_s *)acsi_buffer; - phys_nic_packet = (unsigned char *)phys_acsi_buffer; - if (pamsnet_debug > 0) { - printk("nic_packet at 0x%p, phys at 0x%p\n", - nic_packet, phys_nic_packet ); - } - - stdma_lock(pamsnet_intr, NULL); - DISABLE_IRQ(); - - for (i=0; i<8; i++) { - /* Do two inquiries to cover cases with strange equipment on previous ID */ - /* blocking the ACSI bus (like the SLMC804 laser printer controller... */ - inquiry(i, phys_nic_packet); - if (!inquiry(i, phys_nic_packet)) { - lance_target = i; - break; - } - } - - if (!!NET_DEBUG) - printk("ID: %d\n",i); - - if (lance_target >= 0) { - if (!(hwaddr = read_hw_addr(lance_target, phys_nic_packet))) - lance_target = -1; - else - memcpy (station_addr, hwaddr, ETH_ALEN); - } - - ENABLE_IRQ(); - stdma_release(); - - if (lance_target < 0) { - printk("No PAM's Net/GK found.\n"); - free_netdev(dev); - return ERR_PTR(-ENODEV); - } - - if (pamsnet_debug > 0 && version_printed++ == 0) - printk(version); - - printk("%s: %s found on target %01d, eth-addr: %02x:%02x:%02x:%02x:%02x:%02x.\n", - dev->name, "PAM's Net/GK", lance_target, - station_addr[0], station_addr[1], station_addr[2], - station_addr[3], station_addr[4], station_addr[5]); - - /* Initialize the device structure. */ - dev->open = pamsnet_open; - dev->stop = pamsnet_close; - dev->hard_start_xmit = pamsnet_send_packet; - dev->get_stats = net_get_stats; - - /* Fill in the fields of the device structure with ethernet-generic - * values. This should be in a common file instead of per-driver. - */ - - for (i = 0; i < ETH_ALEN; i++) { -#if 0 - dev->broadcast[i] = 0xff; -#endif - dev->dev_addr[i] = station_addr[i]; - } - err = register_netdev(dev); - if (!err) - return dev; - - free_netdev(dev); - return ERR_PTR(err); -} - -/* Open/initialize the board. This is called (in the current kernel) - sometime after booting when the 'ifconfig' program is run. - - This routine should set everything up anew at each open, even - registers that "should" only need to be set once at boot, so that - there is non-reboot way to recover if something goes wrong. - */ -static int -pamsnet_open(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - if (pamsnet_debug > 0) - printk("pamsnet_open\n"); - stdma_lock(pamsnet_intr, NULL); - DISABLE_IRQ(); - - /* Reset the hardware here. - */ - if (!if_up) - start(lance_target); - if_up = 1; - lp->open_time = 0; /*jiffies*/ - lp->poll_time = MAX_POLL_TIME; - - dev->tbusy = 0; - dev->interrupt = 0; - dev->start = 1; - - ENABLE_IRQ(); - stdma_release(); - pamsnet_timer.data = (long)dev; - pamsnet_timer.expires = jiffies + lp->poll_time; - add_timer(&pamsnet_timer); - return 0; -} - -static int -pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - unsigned long flags; - - /* Block a timer-based transmit from overlapping. This could better be - * done with atomic_swap(1, dev->tbusy), but set_bit() works as well. - */ - local_irq_save(flags); - - if (stdma_islocked()) { - local_irq_restore(flags); - lp->stats.tx_errors++; - } - else { - int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - unsigned long buf = virt_to_phys(skb->data); - int stat; - - stdma_lock(pamsnet_intr, NULL); - DISABLE_IRQ(); - - local_irq_restore(flags); - if( !STRAM_ADDR(buf+length-1) ) { - skb_copy_from_linear_data(skb, nic_packet->buffer, - length); - buf = (unsigned long)phys_nic_packet; - } - - dma_cache_maintenance(buf, length, 1); - - stat = sendpkt(lance_target, (unsigned char *)buf, length); - ENABLE_IRQ(); - stdma_release(); - - dev->trans_start = jiffies; - dev->tbusy = 0; - lp->stats.tx_packets++; - lp->stats.tx_bytes+=length; - } - dev_kfree_skb(skb); - - return 0; -} - -/* We have a good packet(s), get it/them out of the buffers. - */ -static void -pamsnet_poll_rx(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - int boguscount; - int pkt_len; - struct sk_buff *skb; - unsigned long flags; - - local_irq_save(flags); - /* ++roman: Take care at locking the ST-DMA... This must be done with ints - * off, since otherwise an int could slip in between the question and the - * locking itself, and then we'd go to sleep... And locking itself is - * necessary to keep the floppy_change timer from working with ST-DMA - * registers. */ - if (stdma_islocked()) { - local_irq_restore(flags); - return; - } - stdma_lock(pamsnet_intr, NULL); - DISABLE_IRQ(); - local_irq_restore(flags); - - boguscount = testpkt(lance_target); - if( lp->poll_time < MAX_POLL_TIME ) lp->poll_time++; - - while(boguscount--) { - pkt_len = receivepkt(lance_target, phys_nic_packet); - - if( pkt_len < 60 ) break; - - /* Good packet... */ - - dma_cache_maintenance((unsigned long)phys_nic_packet, pkt_len, 0); - - lp->poll_time = pamsnet_min_poll_time; /* fast poll */ - if( pkt_len >= 60 && pkt_len <= 2048 ) { - if (pkt_len > 1514) - pkt_len = 1514; - - /* Malloc up new buffer. - */ - skb = alloc_skb(pkt_len, GFP_ATOMIC); - if (skb == NULL) { - printk("%s: Memory squeeze, dropping packet.\n", - dev->name); - lp->stats.rx_dropped++; - break; - } - skb->len = pkt_len; - skb->dev = dev; - - /* 'skb->data' points to the start of sk_buff data area. - */ - skb_copy_to_linear_data(skb, nic_packet->buffer, - pkt_len); - netif_rx(skb); - dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes+=pkt_len; - } - } - - /* If any worth-while packets have been received, dev_rint() - has done a mark_bh(INET_BH) for us and will work on them - when we get to the bottom-half routine. - */ - - ENABLE_IRQ(); - stdma_release(); - return; -} - -/* pamsnet_tick: called by pamsnet_timer. Reads packets from the adapter, - * passes them to the higher layers and restarts the timer. - */ -static void -pamsnet_tick(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct net_local *lp = netdev_priv(dev); - - if( pamsnet_debug > 0 && (lp->open_time++ & 7) == 8 ) - printk("pamsnet_tick: %ld\n", lp->open_time); - - pamsnet_poll_rx(dev); - - pamsnet_timer.expires = jiffies + lp->poll_time; - add_timer(&pamsnet_timer); -} - -/* The inverse routine to pamsnet_open(). - */ -static int -pamsnet_close(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - if (pamsnet_debug > 0) - printk("pamsnet_close, open_time=%ld\n", lp->open_time); - del_timer(&pamsnet_timer); - stdma_lock(pamsnet_intr, NULL); - DISABLE_IRQ(); - - if (if_up) - stop(lance_target); - if_up = 0; - - lp->open_time = 0; - - dev->tbusy = 1; - dev->start = 0; - - ENABLE_IRQ(); - stdma_release(); - return 0; -} - -/* Get the current statistics. - This may be called with the card open or closed. - */ -static struct net_device_stats *net_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - return &lp->stats; -} - - -#ifdef MODULE - -static struct net_device *pam_dev; - -int init_module(void) -{ - pam_dev = pamsnet_probe(-1); - if (IS_ERR(pam_dev)) - return PTR_ERR(pam_dev); - return 0; -} - -void cleanup_module(void) -{ - unregister_netdev(pam_dev); - free_netdev(pam_dev); -} - -#endif /* MODULE */ - -/* Local variables: - * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include - -b m68k-linuxaout -Wall -Wstrict-prototypes -O2 - -fomit-frame-pointer -pipe -DMODULE -I../../net/inet -c atari_pamsnet.c" - * version-control: t - * kept-new-versions: 5 - * tab-width: 8 - * End: - */ diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h index b1c6034e68fa..ff4765f6c3de 100644 --- a/drivers/net/atl1/atl1.h +++ b/drivers/net/atl1/atl1.h @@ -43,6 +43,7 @@ extern const struct ethtool_ops atl1_ethtool_ops; struct atl1_adapter; #define ATL1_MAX_INTR 3 +#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */ #define ATL1_DEFAULT_TPD 256 #define ATL1_MAX_TPD 1024 @@ -57,29 +58,45 @@ struct atl1_adapter; #define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) /* + * This detached comment is preserved for documentation purposes only. + * It was originally attached to some code that got deleted, but seems + * important enough to keep around... + * + * <begin detached comment> * Some workarounds require millisecond delays and are run during interrupt * context. Most notably, when establishing link, the phy may need tweaking * but cannot process phy register reads/writes faster than millisecond * intervals...and we establish link due to a "link status change" interrupt. + * <end detached comment> + */ + +/* + * atl1_ring_header represents a single, contiguous block of DMA space + * mapped for the three descriptor rings (tpd, rfd, rrd) and the two + * message blocks (cmb, smb) described below */ +struct atl1_ring_header { + void *desc; /* virtual address */ + dma_addr_t dma; /* physical address*/ + unsigned int size; /* length in bytes */ +}; /* - * wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer + * atl1_buffer is wrapper around a pointer to a socket buffer + * so a DMA handle can be stored along with the skb */ struct atl1_buffer { - struct sk_buff *skb; - u16 length; - u16 alloced; + struct sk_buff *skb; /* socket buffer */ + u16 length; /* rx buffer length */ + u16 alloced; /* 1 if skb allocated */ dma_addr_t dma; }; -#define MAX_TX_BUF_LEN 0x3000 /* 12KB */ - +/* transmit packet descriptor (tpd) ring */ struct atl1_tpd_ring { - void *desc; /* pointer to the descriptor ring memory */ - dma_addr_t dma; /* physical adress of the descriptor ring */ - u16 size; /* length of descriptor ring in bytes */ + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ u16 count; /* number of descriptors in the ring */ u16 hw_idx; /* hardware index */ atomic_t next_to_clean; @@ -87,36 +104,34 @@ struct atl1_tpd_ring { struct atl1_buffer *buffer_info; }; +/* receive free descriptor (rfd) ring */ struct atl1_rfd_ring { - void *desc; - dma_addr_t dma; - u16 size; - u16 count; + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ atomic_t next_to_use; u16 next_to_clean; struct atl1_buffer *buffer_info; }; +/* receive return descriptor (rrd) ring */ struct atl1_rrd_ring { - void *desc; - dma_addr_t dma; - unsigned int size; - u16 count; + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + unsigned int size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ u16 next_to_use; atomic_t next_to_clean; }; -struct atl1_ring_header { - void *desc; /* pointer to the descriptor ring memory */ - dma_addr_t dma; /* physical adress of the descriptor ring */ - unsigned int size; /* length of descriptor ring in bytes */ -}; - +/* coalescing message block (cmb) */ struct atl1_cmb { struct coals_msg_block *cmb; dma_addr_t dma; }; +/* statistics message block (smb) */ struct atl1_smb { struct stats_msg_block *smb; dma_addr_t dma; @@ -141,24 +156,26 @@ struct atl1_sft_stats { u64 tx_aborted_errors; u64 tx_window_errors; u64 tx_carrier_errors; - - u64 tx_pause; /* num Pause packet transmitted. */ - u64 excecol; /* num tx packets aborted due to excessive collisions. */ - u64 deffer; /* num deferred tx packets */ - u64 scc; /* num packets subsequently transmitted successfully w/ single prior collision. */ - u64 mcc; /* num packets subsequently transmitted successfully w/ multiple prior collisions. */ + u64 tx_pause; /* num pause packets transmitted. */ + u64 excecol; /* num tx packets w/ excessive collisions. */ + u64 deffer; /* num tx packets deferred */ + u64 scc; /* num packets subsequently transmitted + * successfully w/ single prior collision. */ + u64 mcc; /* num packets subsequently transmitted + * successfully w/ multiple prior collisions. */ u64 latecol; /* num tx packets w/ late collisions. */ - u64 tx_underun; /* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ - u64 tx_trunc; /* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */ + u64 tx_underun; /* num tx packets aborted due to transmit + * FIFO underrun, or TRD FIFO underrun */ + u64 tx_trunc; /* num tx packets truncated due to size + * exceeding MTU, regardless whether truncated + * by the chip or not. (The name doesn't really + * reflect the meaning in this case.) */ u64 rx_pause; /* num Pause packets received. */ u64 rx_rrd_ov; u64 rx_trunc; }; -/* board specific private data structure */ -#define ATL1_REGS_LEN 8 - -/* Structure containing variables used by the shared code */ +/* hardware structure */ struct atl1_hw { u8 __iomem *hw_addr; struct atl1_adapter *back; @@ -167,24 +184,35 @@ struct atl1_hw { enum atl1_dma_req_block dmar_block; enum atl1_dma_req_block dmaw_block; u8 preamble_len; - u8 max_retry; /* Retransmission maximum, after which the packet will be discarded */ - u8 jam_ipg; /* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */ - u8 ipgt; /* Desired back to back inter-packet gap. The default is 96-bit time */ - u8 min_ifg; /* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */ + u8 max_retry; /* Retransmission maximum, after which the + * packet will be discarded */ + u8 jam_ipg; /* IPG to start JAM for collision based flow + * control in half-duplex mode. In units of + * 8-bit time */ + u8 ipgt; /* Desired back to back inter-packet gap. + * The default is 96-bit time */ + u8 min_ifg; /* Minimum number of IFG to enforce in between + * receive frames. Frame gap below such IFP + * is dropped */ u8 ipgr1; /* 64bit Carrier-Sense window */ u8 ipgr2; /* 96-bit IPG window */ - u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */ - u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */ + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned + * burst. Each TPD is 16 bytes long */ + u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned + * burst. Each RFD is 12 bytes long */ u8 rfd_fetch_gap; - u8 rrd_burst; /* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */ + u8 rrd_burst; /* Threshold number of RRDs that can be retired + * in a burst. Each RRD is 16 bytes long */ u8 tpd_fetch_th; u8 tpd_fetch_gap; u16 tx_jumbo_task_th; - u16 txf_burst; /* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is - 8 bytes long */ - u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */ + u16 txf_burst; /* Number of data bytes to read in a cache- + * aligned burst. Each SRAM entry is 8 bytes */ + u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN + * packets should add 4 bytes */ u16 rx_jumbo_lkah; - u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after every 512ns passes. */ + u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after + * every 512ns passes. */ u16 lcol; /* Collision Window */ u16 cmb_tpd; @@ -194,49 +222,35 @@ struct atl1_hw { u32 smb_timer; u16 media_type; u16 autoneg_advertised; - u16 pci_cmd_word; u16 mii_autoneg_adv_reg; u16 mii_1000t_ctrl_reg; - u32 mem_rang; - u32 txcw; u32 max_frame_size; u32 min_frame_size; - u32 mc_filter_type; - u32 num_mc_addrs; - u32 collision_delta; - u32 tx_packet_delta; - u16 phy_spd_default; u16 dev_rev; - u8 revision_id; /* spi flash */ u8 flash_vendor; - u8 dma_fairness; u8 mac_addr[ETH_ALEN]; u8 perm_mac_addr[ETH_ALEN]; - /* bool phy_preamble_sup; */ bool phy_configured; }; struct atl1_adapter { - /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; struct net_device_stats net_stats; struct atl1_sft_stats soft_stats; - struct vlan_group *vlgrp; u32 rx_buffer_len; u32 wol; u16 link_speed; u16 link_duplex; spinlock_t lock; - atomic_t irq_sem; struct work_struct tx_timeout_task; struct work_struct link_chg_task; struct work_struct pcie_dma_to_rst_task; @@ -244,9 +258,7 @@ struct atl1_adapter { struct timer_list phy_config_timer; bool phy_timer_pending; - bool mac_disabled; - - /* All descriptor rings' memory */ + /* all descriptor rings' memory */ struct atl1_ring_header ring_header; /* TX */ @@ -259,25 +271,16 @@ struct atl1_adapter { u64 hw_csum_err; u64 hw_csum_good; - u32 gorcl; - u64 gorcl_old; - - /* Interrupt Moderator timer ( 2us resolution) */ - u16 imt; - /* Interrupt Clear timer (2us resolution) */ - u16 ict; - - /* MII interface info */ - struct mii_if_info mii; + u16 imt; /* interrupt moderator timer (2us resolution */ + u16 ict; /* interrupt clear timer (2us resolution */ + struct mii_if_info mii; /* MII interface info */ /* structs defined in atl1_hw.h */ - u32 bd_number; /* board number */ + u32 bd_number; /* board number */ bool pci_using_64; struct atl1_hw hw; struct atl1_smb smb; struct atl1_cmb cmb; - - u32 pci_state[16]; }; #endif /* _ATL1_H_ */ diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 3bb40dd4a410..fd1e156f1747 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -38,7 +38,7 @@ * TODO: * Fix TSO; tx performance is horrible with TSO enabled. * Wake on LAN. - * Add more ethtool functions, including set ring parameters. + * Add more ethtool functions. * Fix abstruse irq enable/disable condition described here: * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 * @@ -75,6 +75,7 @@ #include <linux/compiler.h> #include <linux/delay.h> #include <linux/mii.h> +#include <linux/interrupt.h> #include <net/checksum.h> #include <asm/atomic.h> @@ -118,10 +119,6 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; - - /* PCI config space info */ - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; @@ -162,13 +159,70 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter) hw->cmb_tx_timer = 1; /* about 2us */ hw->smb_timer = 100000; /* about 200ms */ - atomic_set(&adapter->irq_sem, 0); spin_lock_init(&adapter->lock); spin_lock_init(&adapter->mb_lock); return 0; } +static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); + + return result; +} + +static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, + int val) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + atl1_write_phy_reg(&adapter->hw, reg_num, val); +} + +/* + * atl1_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + int retval; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->lock, flags); + retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); + spin_unlock_irqrestore(&adapter->lock, flags); + + return retval; +} + +/* + * atl1_ioctl - + * @netdev: + * @ifreq: + * @cmd: + */ +static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + /* * atl1_setup_mem_resources - allocate Tx / RX descriptor resources * @adapter: board private structure @@ -192,19 +246,22 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) goto err_nomem; } rfd_ring->buffer_info = - (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); + (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); - /* real ring DMA buffer */ - ring_header->size = size = sizeof(struct tx_packet_desc) * - tpd_ring->count - + sizeof(struct rx_free_desc) * rfd_ring->count - + sizeof(struct rx_return_desc) * rrd_ring->count - + sizeof(struct coals_msg_block) - + sizeof(struct stats_msg_block) - + 40; /* "40: for 8 bytes align" huh? -- CHS */ + /* real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional 40 bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct tx_packet_desc) * tpd_ring->count + + sizeof(struct rx_free_desc) * rfd_ring->count + + sizeof(struct rx_return_desc) * rrd_ring->count + + sizeof(struct coals_msg_block) + + sizeof(struct stats_msg_block) + + 40; ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, - &ring_header->dma); + &ring_header->dma); if (unlikely(!ring_header->desc)) { dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); goto err_nomem; @@ -218,8 +275,6 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) tpd_ring->dma += offset; tpd_ring->desc = (u8 *) ring_header->desc + offset; tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; - atomic_set(&tpd_ring->next_to_use, 0); - atomic_set(&tpd_ring->next_to_clean, 0); /* init RFD ring */ rfd_ring->dma = tpd_ring->dma + tpd_ring->size; @@ -227,9 +282,7 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) rfd_ring->dma += offset; rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; - rfd_ring->next_to_clean = 0; - /* rfd_ring->next_to_use = rfd_ring->count - 1; */ - atomic_set(&rfd_ring->next_to_use, 0); + /* init RRD ring */ rrd_ring->dma = rfd_ring->dma + rfd_ring->size; @@ -237,23 +290,22 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) rrd_ring->dma += offset; rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; - rrd_ring->next_to_use = 0; - atomic_set(&rrd_ring->next_to_clean, 0); + /* init CMB */ adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; adapter->cmb.dma += offset; - adapter->cmb.cmb = - (struct coals_msg_block *) ((u8 *) rrd_ring->desc + - (rrd_ring->size + offset)); + adapter->cmb.cmb = (struct coals_msg_block *) + ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); /* init SMB */ adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; adapter->smb.dma += offset; adapter->smb.smb = (struct stats_msg_block *) - ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset)); + ((u8 *) adapter->cmb.cmb + + (sizeof(struct coals_msg_block) + offset)); return ATL1_SUCCESS; @@ -262,559 +314,133 @@ err_nomem: return -ENOMEM; } -/* - * atl1_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - */ -static void atl1_irq_enable(struct atl1_adapter *adapter) -{ - if (likely(!atomic_dec_and_test(&adapter->irq_sem))) - iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); -} - -static void atl1_clear_phy_int(struct atl1_adapter *adapter) -{ - u16 phy_data; - unsigned long flags; - - spin_lock_irqsave(&adapter->lock, flags); - atl1_read_phy_reg(&adapter->hw, 19, &phy_data); - spin_unlock_irqrestore(&adapter->lock, flags); -} - -static void atl1_inc_smb(struct atl1_adapter *adapter) -{ - struct stats_msg_block *smb = adapter->smb.smb; - - /* Fill out the OS statistics structure */ - adapter->soft_stats.rx_packets += smb->rx_ok; - adapter->soft_stats.tx_packets += smb->tx_ok; - adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; - adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; - adapter->soft_stats.multicast += smb->rx_mcast; - adapter->soft_stats.collisions += (smb->tx_1_col + - smb->tx_2_col * 2 + - smb->tx_late_col + - smb->tx_abort_col * - adapter->hw.max_retry); - - /* Rx Errors */ - adapter->soft_stats.rx_errors += (smb->rx_frag + - smb->rx_fcs_err + - smb->rx_len_err + - smb->rx_sz_ov + - smb->rx_rxf_ov + - smb->rx_rrd_ov + smb->rx_align_err); - adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; - adapter->soft_stats.rx_length_errors += smb->rx_len_err; - adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; - adapter->soft_stats.rx_frame_errors += smb->rx_align_err; - adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + - smb->rx_rxf_ov); - - adapter->soft_stats.rx_pause += smb->rx_pause; - adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; - adapter->soft_stats.rx_trunc += smb->rx_sz_ov; - - /* Tx Errors */ - adapter->soft_stats.tx_errors += (smb->tx_late_col + - smb->tx_abort_col + - smb->tx_underrun + smb->tx_trunc); - adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; - adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; - adapter->soft_stats.tx_window_errors += smb->tx_late_col; - - adapter->soft_stats.excecol += smb->tx_abort_col; - adapter->soft_stats.deffer += smb->tx_defer; - adapter->soft_stats.scc += smb->tx_1_col; - adapter->soft_stats.mcc += smb->tx_2_col; - adapter->soft_stats.latecol += smb->tx_late_col; - adapter->soft_stats.tx_underun += smb->tx_underrun; - adapter->soft_stats.tx_trunc += smb->tx_trunc; - adapter->soft_stats.tx_pause += smb->tx_pause; - - adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; - adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; - adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; - adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; - adapter->net_stats.multicast = adapter->soft_stats.multicast; - adapter->net_stats.collisions = adapter->soft_stats.collisions; - adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; - adapter->net_stats.rx_over_errors = - adapter->soft_stats.rx_missed_errors; - adapter->net_stats.rx_length_errors = - adapter->soft_stats.rx_length_errors; - adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; - adapter->net_stats.rx_frame_errors = - adapter->soft_stats.rx_frame_errors; - adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; - adapter->net_stats.rx_missed_errors = - adapter->soft_stats.rx_missed_errors; - adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; - adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; - adapter->net_stats.tx_aborted_errors = - adapter->soft_stats.tx_aborted_errors; - adapter->net_stats.tx_window_errors = - adapter->soft_stats.tx_window_errors; - adapter->net_stats.tx_carrier_errors = - adapter->soft_stats.tx_carrier_errors; -} - -static void atl1_rx_checksum(struct atl1_adapter *adapter, - struct rx_return_desc *rrd, - struct sk_buff *skb) +void atl1_init_ring_ptrs(struct atl1_adapter *adapter) { - skb->ip_summed = CHECKSUM_NONE; - - if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { - if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | - ERR_FLAG_CODE | ERR_FLAG_OV)) { - adapter->hw_csum_err++; - dev_dbg(&adapter->pdev->dev, "rx checksum error\n"); - return; - } - } + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - /* not IPv4 */ - if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) - /* checksum is invalid, but it's not an IPv4 pkt, so ok */ - return; + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); - /* IPv4 packet */ - if (likely(!(rrd->err_flg & - (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - adapter->hw_csum_good++; - return; - } + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); - /* IPv4, but hardware thinks its checksum is wrong */ - dev_dbg(&adapter->pdev->dev, - "hw csum wrong, pkt_flag:%x, err_flag:%x\n", - rrd->pkt_flg, rrd->err_flg); - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); - adapter->hw_csum_err++; - return; + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); } /* - * atl1_alloc_rx_buffers - Replace used receive buffers - * @adapter: address of board private structure + * atl1_clean_rx_ring - Free RFD Buffers + * @adapter: board private structure */ -static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) -{ - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct pci_dev *pdev = adapter->pdev; - struct page *page; - unsigned long offset; - struct atl1_buffer *buffer_info, *next_info; - struct sk_buff *skb; - u16 num_alloc = 0; - u16 rfd_next_to_use, next_next; - struct rx_free_desc *rfd_desc; - - next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); - if (++next_next == rfd_ring->count) - next_next = 0; - buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; - next_info = &rfd_ring->buffer_info[next_next]; - - while (!buffer_info->alloced && !next_info->alloced) { - if (buffer_info->skb) { - buffer_info->alloced = 1; - goto next; - } - - rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); - - skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); - if (unlikely(!skb)) { /* Better luck next round */ - adapter->net_stats.rx_dropped++; - break; - } - - /* - * Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - - buffer_info->alloced = 1; - buffer_info->skb = skb; - buffer_info->length = (u16) adapter->rx_buffer_len; - page = virt_to_page(skb->data); - offset = (unsigned long)skb->data & ~PAGE_MASK; - buffer_info->dma = pci_map_page(pdev, page, offset, - adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); - rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); - rfd_desc->coalese = 0; - -next: - rfd_next_to_use = next_next; - if (unlikely(++next_next == rfd_ring->count)) - next_next = 0; - - buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; - next_info = &rfd_ring->buffer_info[next_next]; - num_alloc++; - } - - if (num_alloc) { - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); - } - return num_alloc; -} - -static void atl1_intr_rx(struct atl1_adapter *adapter) +static void atl1_clean_rx_ring(struct atl1_adapter *adapter) { - int i, count; - u16 length; - u16 rrd_next_to_clean; - u32 value; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_buffer *buffer_info; - struct rx_return_desc *rrd; - struct sk_buff *skb; - - count = 0; - - rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); - - while (1) { - rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); - i = 1; - if (likely(rrd->xsz.valid)) { /* packet valid */ -chk_rrd: - /* check rrd status */ - if (likely(rrd->num_buf == 1)) - goto rrd_ok; - - /* rrd seems to be bad */ - if (unlikely(i-- > 0)) { - /* rrd may not be DMAed completely */ - dev_dbg(&adapter->pdev->dev, - "incomplete RRD DMA transfer\n"); - udelay(1); - goto chk_rrd; - } - /* bad rrd */ - dev_dbg(&adapter->pdev->dev, "bad RRD\n"); - /* see if update RFD index */ - if (rrd->num_buf > 1) { - u16 num_buf; - num_buf = - (rrd->xsz.xsum_sz.pkt_size + - adapter->rx_buffer_len - - 1) / adapter->rx_buffer_len; - if (rrd->num_buf == num_buf) { - /* clean alloc flag for bad rrd */ - while (rfd_ring->next_to_clean != - (rrd->buf_indx + num_buf)) { - rfd_ring->buffer_info[rfd_ring-> - next_to_clean].alloced = 0; - if (++rfd_ring->next_to_clean == - rfd_ring->count) { - rfd_ring-> - next_to_clean = 0; - } - } - } - } - - /* update rrd */ - rrd->xsz.valid = 0; - if (++rrd_next_to_clean == rrd_ring->count) - rrd_next_to_clean = 0; - count++; - continue; - } else { /* current rrd still not be updated */ + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; - break; - } -rrd_ok: - /* clean alloc flag for bad rrd */ - while (rfd_ring->next_to_clean != rrd->buf_indx) { - rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = - 0; - if (++rfd_ring->next_to_clean == rfd_ring->count) - rfd_ring->next_to_clean = 0; + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rfd_ring->count; i++) { + buffer_info = &rfd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; } - - buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; - if (++rfd_ring->next_to_clean == rfd_ring->count) - rfd_ring->next_to_clean = 0; - - /* update rrd next to clean */ - if (++rrd_next_to_clean == rrd_ring->count) - rrd_next_to_clean = 0; - count++; - - if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { - if (!(rrd->err_flg & - (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM - | ERR_FLAG_LEN))) { - /* packet error, don't need upstream */ - buffer_info->alloced = 0; - rrd->xsz.valid = 0; - continue; - } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; } - - /* Good Receive */ - pci_unmap_page(adapter->pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_FROMDEVICE); - skb = buffer_info->skb; - length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); - - skb_put(skb, length - ETHERNET_FCS_SIZE); - - /* Receive Checksum Offload */ - atl1_rx_checksum(adapter, rrd, skb); - skb->protocol = eth_type_trans(skb, adapter->netdev); - - if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { - u16 vlan_tag = (rrd->vlan_tag >> 4) | - ((rrd->vlan_tag & 7) << 13) | - ((rrd->vlan_tag & 8) << 9); - vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); - } else - netif_rx(skb); - - /* let protocol layer free skb */ - buffer_info->skb = NULL; - buffer_info->alloced = 0; - rrd->xsz.valid = 0; - - adapter->netdev->last_rx = jiffies; } - atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); - - atl1_alloc_rx_buffers(adapter); + size = sizeof(struct atl1_buffer) * rfd_ring->count; + memset(rfd_ring->buffer_info, 0, size); - /* update mailbox ? */ - if (count) { - u32 tpd_next_to_use; - u32 rfd_next_to_use; - u32 rrd_next_to_clean; + /* Zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); - spin_lock(&adapter->mb_lock); + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); - tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); - rfd_next_to_use = - atomic_read(&adapter->rfd_ring.next_to_use); - rrd_next_to_clean = - atomic_read(&adapter->rrd_ring.next_to_clean); - value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << - MB_RFD_PROD_INDX_SHIFT) | - ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << - MB_RRD_CONS_INDX_SHIFT) | - ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << - MB_TPD_PROD_INDX_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); - spin_unlock(&adapter->mb_lock); - } + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); } -static void atl1_intr_tx(struct atl1_adapter *adapter) +/* + * atl1_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + */ +static void atl1_clean_tx_ring(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; - u16 sw_tpd_next_to_clean; - u16 cmb_tpd_next_to_clean; - - sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); - cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); - - while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { - struct tx_packet_desc *tpd; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; - tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); - buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->dma) { - pci_unmap_page(adapter->pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_TODEVICE); + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); buffer_info->dma = 0; } + } + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->skb) { - dev_kfree_skb_irq(buffer_info->skb); + dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; } - tpd->buffer_addr = 0; - tpd->desc.data = 0; - - if (++sw_tpd_next_to_clean == tpd_ring->count) - sw_tpd_next_to_clean = 0; - } - atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); - - if (netif_queue_stopped(adapter->netdev) - && netif_carrier_ok(adapter->netdev)) - netif_wake_queue(adapter->netdev); -} - -static void atl1_check_for_link(struct atl1_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - u16 phy_data = 0; - - spin_lock(&adapter->lock); - adapter->phy_timer_pending = false; - atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); - atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); - spin_unlock(&adapter->lock); - - /* notify upper layer link down ASAP */ - if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ - if (netif_carrier_ok(netdev)) { /* old link state: Up */ - dev_info(&adapter->pdev->dev, "%s link is down\n", - netdev->name); - adapter->link_speed = SPEED_0; - netif_carrier_off(netdev); - netif_stop_queue(netdev); - } } - schedule_work(&adapter->link_chg_task); -} -/* - * atl1_intr - Interrupt Handler - * @irq: interrupt number - * @data: pointer to a network interface device structure - * @pt_regs: CPU registers structure - */ -static irqreturn_t atl1_intr(int irq, void *data) -{ - /*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/ - struct atl1_adapter *adapter = netdev_priv(data); - u32 status; - u8 update_rx; - int max_ints = 10; - - status = adapter->cmb.cmb->int_stats; - if (!status) - return IRQ_NONE; - - update_rx = 0; - - do { - /* clear CMB interrupt status at once */ - adapter->cmb.cmb->int_stats = 0; - - if (status & ISR_GPHY) /* clear phy status */ - atl1_clear_phy_int(adapter); - - /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ - iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); - - /* check if SMB intr */ - if (status & ISR_SMB) - atl1_inc_smb(adapter); - - /* check if PCIE PHY Link down */ - if (status & ISR_PHY_LINKDOWN) { - dev_dbg(&adapter->pdev->dev, "pcie phy link down %x\n", - status); - if (netif_running(adapter->netdev)) { /* reset MAC */ - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); - return IRQ_HANDLED; - } - } - - /* check if DMA read/write error ? */ - if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { - dev_dbg(&adapter->pdev->dev, - "pcie DMA r/w error (status = 0x%x)\n", - status); - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); - return IRQ_HANDLED; - } - - /* link event */ - if (status & ISR_GPHY) { - adapter->soft_stats.tx_carrier_errors++; - atl1_check_for_link(adapter); - } - - /* transmit event */ - if (status & ISR_CMB_TX) - atl1_intr_tx(adapter); - - /* rx exception */ - if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | - ISR_HOST_RRD_OV | ISR_CMB_RX))) { - if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | - ISR_HOST_RRD_OV)) - dev_dbg(&adapter->pdev->dev, - "rx exception, ISR = 0x%x\n", status); - atl1_intr_rx(adapter); - } - - if (--max_ints < 0) - break; + size = sizeof(struct atl1_buffer) * tpd_ring->count; + memset(tpd_ring->buffer_info, 0, size); - } while ((status = adapter->cmb.cmb->int_stats)); + /* Zero out the descriptor ring */ + memset(tpd_ring->desc, 0, tpd_ring->size); - /* re-enable Interrupt */ - iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); - return IRQ_HANDLED; + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); } /* - * atl1_set_multi - Multicast and Promiscuous mode set - * @netdev: network interface device structure + * atl1_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure * - * The set_multi entry point is called whenever the multicast address - * list or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper multicast, - * promiscuous mode, and all-multi behavior. + * Free all transmit software resources */ -static void atl1_set_multi(struct net_device *netdev) +void atl1_free_ring_resources(struct atl1_adapter *adapter) { - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; - u32 rctl; - u32 hash_value; + struct pci_dev *pdev = adapter->pdev; + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; - /* Check for Promiscuous and All Multicast modes */ - rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); - if (netdev->flags & IFF_PROMISC) - rctl |= MAC_CTRL_PROMIS_EN; - else if (netdev->flags & IFF_ALLMULTI) { - rctl |= MAC_CTRL_MC_ALL_EN; - rctl &= ~MAC_CTRL_PROMIS_EN; - } else - rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); - iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); + kfree(tpd_ring->buffer_info); + pci_free_consistent(pdev, ring_header->size, ring_header->desc, + ring_header->dma); - /* clear the old settings from the multicast hash table */ - iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); - iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + tpd_ring->buffer_info = NULL; + tpd_ring->desc = NULL; + tpd_ring->dma = 0; - /* compute mc addresses' hash value ,and put it into hash table */ - for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { - hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); - atl1_hash_set(hw, hash_value); - } + rfd_ring->buffer_info = NULL; + rfd_ring->desc = NULL; + rfd_ring->dma = 0; + + rrd_ring->desc = NULL; + rrd_ring->dma = 0; } static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) @@ -855,6 +481,31 @@ static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) iowrite32(value, hw->hw_addr + REG_MAC_CTRL); } +/* + * atl1_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1_set_mac(struct net_device *netdev, void *p) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (netif_running(netdev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1_set_mac_addr(&adapter->hw); + return 0; +} + static u32 atl1_check_link(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; @@ -962,6 +613,103 @@ static u32 atl1_check_link(struct atl1_adapter *adapter) return ATL1_SUCCESS; } +static void atl1_check_for_link(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + + spin_lock(&adapter->lock); + adapter->phy_timer_pending = false; + atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->lock); + + /* notify upper layer link down ASAP */ + if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + dev_info(&adapter->pdev->dev, "%s link is down\n", + netdev->name); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +/* + * atl1_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1_set_multi(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u32 rctl; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); + if (netdev->flags & IFF_PROMISC) + rctl |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) { + rctl |= MAC_CTRL_MC_ALL_EN; + rctl &= ~MAC_CTRL_PROMIS_EN; + } else + rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + + iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); + + /* clear the old settings from the multicast hash table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + + /* compute mc addresses' hash value ,and put it into hash table */ + for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { + hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); + atl1_hash_set(hw, hash_value); + } +} + +/* + * atl1_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + + if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + + adapter->hw.max_frame_size = max_frame; + adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; + adapter->rx_buffer_len = (max_frame + 7) & ~7; + adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; + + netdev->mtu = new_mtu; + if ((old_mtu != new_mtu) && netif_running(netdev)) { + atl1_down(adapter); + atl1_up(adapter); + } + + return 0; +} + static void set_flow_ctrl_old(struct atl1_adapter *adapter) { u32 hi, lo, value; @@ -974,7 +722,7 @@ static void set_flow_ctrl_old(struct atl1_adapter *adapter) lo = value * 7 / 8; value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); /* RRD Flow Control */ @@ -984,7 +732,7 @@ static void set_flow_ctrl_old(struct atl1_adapter *adapter) if (lo < 2) lo = 2; value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } @@ -1001,7 +749,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw) if (hi < lo) hi = lo + 16; value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); /* RRD Flow Control */ @@ -1013,7 +761,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw) if (hi < lo) hi = lo + 3; value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | - ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } @@ -1062,7 +810,8 @@ static u32 atl1_configure(struct atl1_adapter *adapter) value <<= 16; value += adapter->rfd_ring.count; iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); - iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE); + iowrite32(adapter->tpd_ring.count, hw->hw_addr + + REG_DESC_TPD_RING_SIZE); /* Load Ptr */ iowrite32(1, hw->hw_addr + REG_LOAD_PTR); @@ -1070,31 +819,31 @@ static u32 atl1_configure(struct atl1_adapter *adapter) /* config Mailbox */ value = ((atomic_read(&adapter->tpd_ring.next_to_use) & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | - ((atomic_read(&adapter->rrd_ring.next_to_clean) - & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | - ((atomic_read(&adapter->rfd_ring.next_to_use) - & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); + ((atomic_read(&adapter->rrd_ring.next_to_clean) + & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | + ((atomic_read(&adapter->rfd_ring.next_to_use) + & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); iowrite32(value, hw->hw_addr + REG_MAILBOX); /* config IPG/IFG */ value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) << MAC_IPG_IFG_IPGT_SHIFT) | - (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) - << MAC_IPG_IFG_MIFG_SHIFT) | - (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) - << MAC_IPG_IFG_IPGR1_SHIFT) | - (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) - << MAC_IPG_IFG_IPGR2_SHIFT); + (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) + << MAC_IPG_IFG_MIFG_SHIFT) | + (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) + << MAC_IPG_IFG_IPGR1_SHIFT) | + (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) + << MAC_IPG_IFG_IPGR2_SHIFT); iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); /* config Half-Duplex Control */ value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | - (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) - << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | - MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | - (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | - (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) - << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); + (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) + << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | + MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | + (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | + (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) + << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); /* set Interrupt Moderator Timer */ @@ -1110,10 +859,10 @@ static u32 atl1_configure(struct atl1_adapter *adapter) /* jumbo size & rrd retirement timer */ value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << RXQ_JMBOSZ_TH_SHIFT) | - (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) - << RXQ_JMBO_LKAH_SHIFT) | - (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) - << RXQ_RRD_TIMER_SHIFT); + (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) + << RXQ_JMBO_LKAH_SHIFT) | + (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) + << RXQ_RRD_TIMER_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); /* Flow Control */ @@ -1132,35 +881,36 @@ static u32 atl1_configure(struct atl1_adapter *adapter) /* config TXQ */ value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | - (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) - << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | - (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) - << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN; + (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) + << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | + (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) + << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | + TXQ_CTRL_EN; iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) - << TX_JUMBO_TASK_TH_SHIFT) | - (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) - << TX_TPD_MIN_IPG_SHIFT); + << TX_JUMBO_TASK_TH_SHIFT) | + (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) + << TX_TPD_MIN_IPG_SHIFT); iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); /* config RXQ */ value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) - << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | - (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) - << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | - (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) - << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | - RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; + << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | + (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) + << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | + (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) + << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | + RXQ_CTRL_EN; iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); /* config DMA Engine */ value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) - << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | - ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) - << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | - DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN; + << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | + ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | + DMA_CTRL_DMAW_EN; value |= (u32) hw->dma_ord; if (atl1_rcb_128 == hw->rcb_value) value |= DMA_CTRL_RCB_VALUE; @@ -1190,56 +940,495 @@ static u32 atl1_configure(struct atl1_adapter *adapter) } /* + * atl1_pcie_patch - Patch for PCIE module + */ +static void atl1_pcie_patch(struct atl1_adapter *adapter) +{ + u32 value; + + /* much vendor magic here */ + value = 0x6500; + iowrite32(value, adapter->hw.hw_addr + 0x12FC); + /* pcie flow control mode change */ + value = ioread32(adapter->hw.hw_addr + 0x1008); + value |= 0x8000; + iowrite32(value, adapter->hw.hw_addr + 0x1008); +} + +/* + * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 + * on PCI Command register is disable. + * The function enable this bit. + * Brackett, 2006/03/15 + */ +static void atl1_via_workaround(struct atl1_adapter *adapter) +{ + unsigned long value; + + value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); + if (value & PCI_COMMAND_INTX_DISABLE) + value &= ~PCI_COMMAND_INTX_DISABLE; + iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); +} + +/* + * atl1_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void atl1_irq_enable(struct atl1_adapter *adapter) +{ + iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); + ioread32(adapter->hw.hw_addr + REG_IMR); +} + +/* * atl1_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ static void atl1_irq_disable(struct atl1_adapter *adapter) { - atomic_inc(&adapter->irq_sem); iowrite32(0, adapter->hw.hw_addr + REG_IMR); ioread32(adapter->hw.hw_addr + REG_IMR); synchronize_irq(adapter->pdev->irq); } -static void atl1_vlan_rx_register(struct net_device *netdev, - struct vlan_group *grp) +static void atl1_clear_phy_int(struct atl1_adapter *adapter) { - struct atl1_adapter *adapter = netdev_priv(netdev); + u16 phy_data; unsigned long flags; - u32 ctrl; spin_lock_irqsave(&adapter->lock, flags); - /* atl1_irq_disable(adapter); */ - adapter->vlgrp = grp; + atl1_read_phy_reg(&adapter->hw, 19, &phy_data); + spin_unlock_irqrestore(&adapter->lock, flags); +} - if (grp) { - /* enable VLAN tag insert/strip */ - ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); - ctrl |= MAC_CTRL_RMV_VLAN; - iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); - } else { - /* disable VLAN tag insert/strip */ - ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); - ctrl &= ~MAC_CTRL_RMV_VLAN; - iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); +static void atl1_inc_smb(struct atl1_adapter *adapter) +{ + struct stats_msg_block *smb = adapter->smb.smb; + + /* Fill out the OS statistics structure */ + adapter->soft_stats.rx_packets += smb->rx_ok; + adapter->soft_stats.tx_packets += smb->tx_ok; + adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; + adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; + adapter->soft_stats.multicast += smb->rx_mcast; + adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + + smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); + + /* Rx Errors */ + adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + + smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + + smb->rx_rrd_ov + smb->rx_align_err); + adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; + adapter->soft_stats.rx_length_errors += smb->rx_len_err; + adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; + adapter->soft_stats.rx_frame_errors += smb->rx_align_err; + adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + + smb->rx_rxf_ov); + + adapter->soft_stats.rx_pause += smb->rx_pause; + adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; + adapter->soft_stats.rx_trunc += smb->rx_sz_ov; + + /* Tx Errors */ + adapter->soft_stats.tx_errors += (smb->tx_late_col + + smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); + adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; + adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; + adapter->soft_stats.tx_window_errors += smb->tx_late_col; + + adapter->soft_stats.excecol += smb->tx_abort_col; + adapter->soft_stats.deffer += smb->tx_defer; + adapter->soft_stats.scc += smb->tx_1_col; + adapter->soft_stats.mcc += smb->tx_2_col; + adapter->soft_stats.latecol += smb->tx_late_col; + adapter->soft_stats.tx_underun += smb->tx_underrun; + adapter->soft_stats.tx_trunc += smb->tx_trunc; + adapter->soft_stats.tx_pause += smb->tx_pause; + + adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; + adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; + adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; + adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; + adapter->net_stats.multicast = adapter->soft_stats.multicast; + adapter->net_stats.collisions = adapter->soft_stats.collisions; + adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; + adapter->net_stats.rx_over_errors = + adapter->soft_stats.rx_missed_errors; + adapter->net_stats.rx_length_errors = + adapter->soft_stats.rx_length_errors; + adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; + adapter->net_stats.rx_frame_errors = + adapter->soft_stats.rx_frame_errors; + adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; + adapter->net_stats.rx_missed_errors = + adapter->soft_stats.rx_missed_errors; + adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; + adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; + adapter->net_stats.tx_aborted_errors = + adapter->soft_stats.tx_aborted_errors; + adapter->net_stats.tx_window_errors = + adapter->soft_stats.tx_window_errors; + adapter->net_stats.tx_carrier_errors = + adapter->soft_stats.tx_carrier_errors; +} + +/* + * atl1_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1_get_stats(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + return &adapter->net_stats; +} + +static void atl1_update_mailbox(struct atl1_adapter *adapter) +{ + unsigned long flags; + u32 tpd_next_to_use; + u32 rfd_next_to_use; + u32 rrd_next_to_clean; + u32 value; + + spin_lock_irqsave(&adapter->mb_lock, flags); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); + + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + + spin_unlock_irqrestore(&adapter->mb_lock, flags); +} + +static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, u16 offset) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + + while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { + rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; + if (++rfd_ring->next_to_clean == rfd_ring->count) { + rfd_ring->next_to_clean = 0; + } } +} - /* atl1_irq_enable(adapter); */ - spin_unlock_irqrestore(&adapter->lock, flags); +static void atl1_update_rfd_index(struct atl1_adapter *adapter, + struct rx_return_desc *rrd) +{ + u16 num_buf; + + num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / + adapter->rx_buffer_len; + if (rrd->num_buf == num_buf) + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, num_buf); } -static void atl1_restore_vlan(struct atl1_adapter *adapter) +static void atl1_rx_checksum(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, struct sk_buff *skb) { - atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); + struct pci_dev *pdev = adapter->pdev; + + skb->ip_summed = CHECKSUM_NONE; + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | + ERR_FLAG_CODE | ERR_FLAG_OV)) { + adapter->hw_csum_err++; + dev_printk(KERN_DEBUG, &pdev->dev, + "rx checksum error\n"); + return; + } + } + + /* not IPv4 */ + if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) + /* checksum is invalid, but it's not an IPv4 pkt, so ok */ + return; + + /* IPv4 packet */ + if (likely(!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_good++; + return; + } + + /* IPv4, but hardware thinks its checksum is wrong */ + dev_printk(KERN_DEBUG, &pdev->dev, + "hw csum wrong, pkt_flag:%x, err_flag:%x\n", + rrd->pkt_flg, rrd->err_flg); + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); + adapter->hw_csum_err++; + return; +} + +/* + * atl1_alloc_rx_buffers - Replace used receive buffers + * @adapter: address of board private structure + */ +static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct pci_dev *pdev = adapter->pdev; + struct page *page; + unsigned long offset; + struct atl1_buffer *buffer_info, *next_info; + struct sk_buff *skb; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct rx_free_desc *rfd_desc; + + next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + + while (!buffer_info->alloced && !next_info->alloced) { + if (buffer_info->skb) { + buffer_info->alloced = 1; + goto next; + } + + rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); + + skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); + if (unlikely(!skb)) { /* Better luck next round */ + adapter->net_stats.rx_dropped++; + break; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->alloced = 1; + buffer_info->skb = skb; + buffer_info->length = (u16) adapter->rx_buffer_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(pdev, page, offset, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); + rfd_desc->coalese = 0; + +next: + rfd_next_to_use = next_next; + if (unlikely(++next_next == rfd_ring->count)) + next_next = 0; + + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; + } + + if (num_alloc) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); + } + return num_alloc; +} + +static void atl1_intr_rx(struct atl1_adapter *adapter) +{ + int i, count; + u16 length; + u16 rrd_next_to_clean; + u32 value; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct rx_return_desc *rrd; + struct sk_buff *skb; + + count = 0; + + rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); + + while (1) { + rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); + i = 1; + if (likely(rrd->xsz.valid)) { /* packet valid */ +chk_rrd: + /* check rrd status */ + if (likely(rrd->num_buf == 1)) + goto rrd_ok; + + /* rrd seems to be bad */ + if (unlikely(i-- > 0)) { + /* rrd may not be DMAed completely */ + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "incomplete RRD DMA transfer\n"); + udelay(1); + goto chk_rrd; + } + /* bad rrd */ + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "bad RRD\n"); + /* see if update RFD index */ + if (rrd->num_buf > 1) + atl1_update_rfd_index(adapter, rrd); + + /* update rrd */ + rrd->xsz.valid = 0; + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + continue; + } else { /* current rrd still not be updated */ + + break; + } +rrd_ok: + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, 0); + + buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; + if (++rfd_ring->next_to_clean == rfd_ring->count) + rfd_ring->next_to_clean = 0; + + /* update rrd next to clean */ + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM + | ERR_FLAG_LEN))) { + /* packet error, don't need upstream */ + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + continue; + } + } + + /* Good Receive */ + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + skb = buffer_info->skb; + length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); + + skb_put(skb, length - ETHERNET_FCS_SIZE); + + /* Receive Checksum Offload */ + atl1_rx_checksum(adapter, rrd, skb); + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { + u16 vlan_tag = (rrd->vlan_tag >> 4) | + ((rrd->vlan_tag & 7) << 13) | + ((rrd->vlan_tag & 8) << 9); + vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); + } else + netif_rx(skb); + + /* let protocol layer free skb */ + buffer_info->skb = NULL; + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + + adapter->netdev->last_rx = jiffies; + } + + atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); + + atl1_alloc_rx_buffers(adapter); + + /* update mailbox ? */ + if (count) { + u32 tpd_next_to_use; + u32 rfd_next_to_use; + u32 rrd_next_to_clean; + + spin_lock(&adapter->mb_lock); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = + atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = + atomic_read(&adapter->rrd_ring.next_to_clean); + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + spin_unlock(&adapter->mb_lock); + } +} + +static void atl1_intr_tx(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + u16 sw_tpd_next_to_clean; + u16 cmb_tpd_next_to_clean; + + sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); + cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); + + while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { + struct tx_packet_desc *tpd; + + tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); + buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; + if (buffer_info->dma) { + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb_irq(buffer_info->skb); + buffer_info->skb = NULL; + } + tpd->buffer_addr = 0; + tpd->desc.data = 0; + + if (++sw_tpd_next_to_clean == tpd_ring->count) + sw_tpd_next_to_clean = 0; + } + atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); + + if (netif_queue_stopped(adapter->netdev) + && netif_carrier_ok(adapter->netdev)) + netif_wake_queue(adapter->netdev); } static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) { u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 next_to_use = atomic_read(&tpd_ring->next_to_use); - return ((next_to_clean > - next_to_use) ? next_to_clean - next_to_use - - 1 : tpd_ring->count + next_to_clean - next_to_use - 1); + return ((next_to_clean > next_to_use) ? + next_to_clean - next_to_use - 1 : + tpd_ring->count + next_to_clean - next_to_use - 1); } static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, @@ -1262,9 +1451,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + iph->daddr, 0, IPPROTO_TCP, 0); ipofst = skb_network_offset(skb); if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; @@ -1272,7 +1459,8 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, tso->tsopl |= (iph->ihl & CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; tso->tsopl |= (tcp_hdrlen(skb) & - TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; + TSO_PARAM_TCPHDRLEN_MASK) << + TSO_PARAM_TCPHDRLEN_SHIFT; tso->tsopl |= (skb_shinfo(skb)->gso_size & TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; @@ -1285,7 +1473,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, } static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, - struct csum_param *csum) + struct csum_param *csum) { u8 css, cso; @@ -1293,7 +1481,7 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, cso = skb_transport_offset(skb); css = cso + skb->csum_offset; if (unlikely(cso & 0x1)) { - dev_dbg(&adapter->pdev->dev, + dev_printk(KERN_DEBUG, &adapter->pdev->dev, "payload offset not an even number\n"); return -1; } @@ -1308,8 +1496,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, return true; } -static void atl1_tx_map(struct atl1_adapter *adapter, - struct sk_buff *skb, bool tcp_seg) +static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + bool tcp_seg) { /* We enter this function holding a spinlock. */ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; @@ -1346,26 +1534,25 @@ static void atl1_tx_map(struct atl1_adapter *adapter, if (first_buf_len > proto_hdr_len) { len12 = first_buf_len - proto_hdr_len; - m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; + m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; for (i = 0; i < m; i++) { buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; buffer_info->skb = NULL; buffer_info->length = - (MAX_TX_BUF_LEN >= - len12) ? MAX_TX_BUF_LEN : len12; + (ATL1_MAX_TX_BUF_LEN >= + len12) ? ATL1_MAX_TX_BUF_LEN : len12; len12 -= buffer_info->length; page = virt_to_page(skb->data + - (proto_hdr_len + - i * MAX_TX_BUF_LEN)); + (proto_hdr_len + + i * ATL1_MAX_TX_BUF_LEN)); offset = (unsigned long)(skb->data + - (proto_hdr_len + - i * MAX_TX_BUF_LEN)) & - ~PAGE_MASK; - buffer_info->dma = - pci_map_page(adapter->pdev, page, offset, - buffer_info->length, - PCI_DMA_TODEVICE); + (proto_hdr_len + + i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, + page, offset, buffer_info->length, + PCI_DMA_TODEVICE); if (++tpd_next_to_use == tpd_ring->count) tpd_next_to_use = 0; } @@ -1376,8 +1563,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, - offset, first_buf_len, - PCI_DMA_TODEVICE); + offset, first_buf_len, PCI_DMA_TODEVICE); if (++tpd_next_to_use == tpd_ring->count) tpd_next_to_use = 0; } @@ -1389,19 +1575,19 @@ static void atl1_tx_map(struct atl1_adapter *adapter, frag = &skb_shinfo(skb)->frags[f]; lenf = frag->size; - m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; + m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; for (i = 0; i < m; i++) { buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; if (unlikely(buffer_info->skb)) BUG(); buffer_info->skb = NULL; - buffer_info->length = - (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf; + buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? + ATL1_MAX_TX_BUF_LEN : lenf; lenf -= buffer_info->length; - buffer_info->dma = - pci_map_page(adapter->pdev, frag->page, - frag->page_offset + i * MAX_TX_BUF_LEN, - buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = pci_map_page(adapter->pdev, + frag->page, + frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), + buffer_info->length, PCI_DMA_TODEVICE); if (++tpd_next_to_use == tpd_ring->count) tpd_next_to_use = 0; @@ -1413,7 +1599,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, } static void atl1_tx_queue(struct atl1_adapter *adapter, int count, - union tpd_descr *descr) + union tpd_descr *descr) { /* We enter this function holding a spinlock. */ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; @@ -1457,31 +1643,6 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count, atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); } -static void atl1_update_mailbox(struct atl1_adapter *adapter) -{ - unsigned long flags; - u32 tpd_next_to_use; - u32 rfd_next_to_use; - u32 rrd_next_to_clean; - u32 value; - - spin_lock_irqsave(&adapter->mb_lock, flags); - - tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); - rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); - rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); - - value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << - MB_RFD_PROD_INDX_SHIFT) | - ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << - MB_RRD_CONS_INDX_SHIFT) | - ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << - MB_TPD_PROD_INDX_SHIFT); - iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); - - spin_unlock_irqrestore(&adapter->mb_lock, flags); -} - static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); @@ -1517,8 +1678,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) for (f = 0; f < nr_frags; f++) { frag_size = skb_shinfo(skb)->frags[f].size; if (frag_size) - count += - (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; + count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; } /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ @@ -1534,7 +1695,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* need additional TPD ? */ if (proto_hdr_len != len) count += (len - proto_hdr_len + - MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; } } @@ -1542,7 +1704,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (!spin_trylock(&adapter->lock)) { /* Can't get lock - tell upper layer to requeue */ local_irq_restore(flags); - dev_dbg(&adapter->pdev->dev, "tx locked\n"); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); return NETDEV_TX_LOCKED; } @@ -1550,7 +1712,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* not enough descriptors */ netif_stop_queue(netdev); spin_unlock_irqrestore(&adapter->lock, flags); - dev_dbg(&adapter->pdev->dev, "tx busy\n"); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); return NETDEV_TX_BUSY; } @@ -1592,131 +1754,208 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } /* - * atl1_get_stats - Get System Network Statistics - * @netdev: network interface device structure - * - * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. + * atl1_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure */ -static struct net_device_stats *atl1_get_stats(struct net_device *netdev) +static irqreturn_t atl1_intr(int irq, void *data) { - struct atl1_adapter *adapter = netdev_priv(netdev); - return &adapter->net_stats; -} + struct atl1_adapter *adapter = netdev_priv(data); + u32 status; + u8 update_rx; + int max_ints = 10; -/* - * atl1_clean_rx_ring - Free RFD Buffers - * @adapter: board private structure - */ -static void atl1_clean_rx_ring(struct atl1_adapter *adapter) -{ - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; + status = adapter->cmb.cmb->int_stats; + if (!status) + return IRQ_NONE; - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rfd_ring->count; i++) { - buffer_info = &rfd_ring->buffer_info[i]; - if (buffer_info->dma) { - pci_unmap_page(pdev, - buffer_info->dma, - buffer_info->length, - PCI_DMA_FROMDEVICE); - buffer_info->dma = 0; + update_rx = 0; + + do { + /* clear CMB interrupt status at once */ + adapter->cmb.cmb->int_stats = 0; + + if (status & ISR_GPHY) /* clear phy status */ + atl1_clear_phy_int(adapter); + + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ + iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); + + /* check if SMB intr */ + if (status & ISR_SMB) + atl1_inc_smb(adapter); + + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie phy link down %x\n", status); + if (netif_running(adapter->netdev)) { /* reset MAC */ + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; + } } - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; + + /* check if DMA read/write error ? */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie DMA r/w error (status = 0x%x)\n", + status); + iowrite32(0, adapter->hw.hw_addr + REG_IMR); + schedule_work(&adapter->pcie_dma_to_rst_task); + return IRQ_HANDLED; } - } - size = sizeof(struct atl1_buffer) * rfd_ring->count; - memset(rfd_ring->buffer_info, 0, size); + /* link event */ + if (status & ISR_GPHY) { + adapter->soft_stats.tx_carrier_errors++; + atl1_check_for_link(adapter); + } - /* Zero out the descriptor ring */ - memset(rfd_ring->desc, 0, rfd_ring->size); + /* transmit event */ + if (status & ISR_CMB_TX) + atl1_intr_tx(adapter); - rfd_ring->next_to_clean = 0; - atomic_set(&rfd_ring->next_to_use, 0); + /* rx exception */ + if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV | ISR_CMB_RX))) { + if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "rx exception, ISR = 0x%x\n", status); + atl1_intr_rx(adapter); + } - rrd_ring->next_to_use = 0; - atomic_set(&rrd_ring->next_to_clean, 0); + if (--max_ints < 0) + break; + + } while ((status = adapter->cmb.cmb->int_stats)); + + /* re-enable Interrupt */ + iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); + return IRQ_HANDLED; } /* - * atl1_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure + * atl1_watchdog - Timer Call-back + * @data: pointer to netdev cast into an unsigned long */ -static void atl1_clean_tx_ring(struct atl1_adapter *adapter) +static void atl1_watchdog(unsigned long data) { - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; - unsigned long size; - unsigned int i; + struct atl1_adapter *adapter = (struct atl1_adapter *)data; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tpd_ring->count; i++) { - buffer_info = &tpd_ring->buffer_info[i]; - if (buffer_info->dma) { - pci_unmap_page(pdev, buffer_info->dma, - buffer_info->length, PCI_DMA_TODEVICE); - buffer_info->dma = 0; - } - } + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); +} - for (i = 0; i < tpd_ring->count; i++) { - buffer_info = &tpd_ring->buffer_info[i]; - if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } - } +/* + * atl1_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1_phy_config(unsigned long data) +{ + struct atl1_adapter *adapter = (struct atl1_adapter *)data; + struct atl1_hw *hw = &adapter->hw; + unsigned long flags; - size = sizeof(struct atl1_buffer) * tpd_ring->count; - memset(tpd_ring->buffer_info, 0, size); + spin_lock_irqsave(&adapter->lock, flags); + adapter->phy_timer_pending = false; + atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); + atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); + spin_unlock_irqrestore(&adapter->lock, flags); +} - /* Zero out the descriptor ring */ - memset(tpd_ring->desc, 0, tpd_ring->size); +/* + * atl1_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1_tx_timeout(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->tx_timeout_task); +} - atomic_set(&tpd_ring->next_to_use, 0); - atomic_set(&tpd_ring->next_to_clean, 0); +/* + * Orphaned vendor comment left intact here: + * <vendor comment> + * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT + * will assert. We do soft reset <0x1400=1> according + * with the SPEC. BUT, it seemes that PCIE or DMA + * state-machine will not be reset. DMAR_TO_INT will + * assert again and again. + * </vendor comment> + */ +static void atl1_tx_timeout_task(struct work_struct *work) +{ + struct atl1_adapter *adapter = + container_of(work, struct atl1_adapter, tx_timeout_task); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + atl1_down(adapter); + atl1_up(adapter); + netif_device_attach(netdev); } /* - * atl1_free_ring_resources - Free Tx / RX descriptor Resources - * @adapter: board private structure - * - * Free all transmit software resources + * atl1_link_chg_task - deal with link change event Out of interrupt context */ -void atl1_free_ring_resources(struct atl1_adapter *adapter) +static void atl1_link_chg_task(struct work_struct *work) { - struct pci_dev *pdev = adapter->pdev; - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; - struct atl1_ring_header *ring_header = &adapter->ring_header; + struct atl1_adapter *adapter = + container_of(work, struct atl1_adapter, link_chg_task); + unsigned long flags; - atl1_clean_tx_ring(adapter); - atl1_clean_rx_ring(adapter); + spin_lock_irqsave(&adapter->lock, flags); + atl1_check_link(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); +} - kfree(tpd_ring->buffer_info); - pci_free_consistent(pdev, ring_header->size, ring_header->desc, - ring_header->dma); +static void atl1_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u32 ctrl; - tpd_ring->buffer_info = NULL; - tpd_ring->desc = NULL; - tpd_ring->dma = 0; + spin_lock_irqsave(&adapter->lock, flags); + /* atl1_irq_disable(adapter); */ + adapter->vlgrp = grp; - rfd_ring->buffer_info = NULL; - rfd_ring->desc = NULL; - rfd_ring->dma = 0; + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); + ctrl |= MAC_CTRL_RMV_VLAN; + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); + } else { + /* disable VLAN tag insert/strip */ + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); + ctrl &= ~MAC_CTRL_RMV_VLAN; + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); + } - rrd_ring->desc = NULL; - rrd_ring->dma = 0; + /* atl1_irq_enable(adapter); */ + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void atl1_restore_vlan(struct atl1_adapter *adapter) +{ + atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); +} + +int atl1_reset(struct atl1_adapter *adapter) +{ + int ret; + + ret = atl1_reset_hw(&adapter->hw); + if (ret != ATL1_SUCCESS) + return ret; + return atl1_init_hw(&adapter->hw); } s32 atl1_up(struct atl1_adapter *adapter) @@ -1727,6 +1966,7 @@ s32 atl1_up(struct atl1_adapter *adapter) /* hardware has been reset, we need to reload some things */ atl1_set_multi(netdev); + atl1_init_ring_ptrs(adapter); atl1_restore_vlan(adapter); err = atl1_alloc_rx_buffers(adapter); if (unlikely(!err)) /* no RX BUFFER allocated */ @@ -1754,11 +1994,6 @@ s32 atl1_up(struct atl1_adapter *adapter) atl1_check_link(adapter); return 0; - /* FIXME: unreachable code! -- CHS */ - /* free irq disable any interrupt */ - iowrite32(0, adapter->hw.hw_addr + REG_IMR); - free_irq(adapter->pdev->irq, netdev); - err_up: pci_disable_msi(adapter->pdev); /* free rx_buffers */ @@ -1790,172 +2025,6 @@ void atl1_down(struct atl1_adapter *adapter) } /* - * atl1_change_mtu - Change the Maximum Transfer Unit - * @netdev: network interface device structure - * @new_mtu: new value for maximum frame size - * - * Returns 0 on success, negative on failure - */ -static int atl1_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - int old_mtu = netdev->mtu; - int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; - - if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || - (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); - return -EINVAL; - } - - adapter->hw.max_frame_size = max_frame; - adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; - adapter->rx_buffer_len = (max_frame + 7) & ~7; - adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; - - netdev->mtu = new_mtu; - if ((old_mtu != new_mtu) && netif_running(netdev)) { - atl1_down(adapter); - atl1_up(adapter); - } - - return 0; -} - -/* - * atl1_set_mac - Change the Ethernet Address of the NIC - * @netdev: network interface device structure - * @p: pointer to an address structure - * - * Returns 0 on success, negative on failure - */ -static int atl1_set_mac(struct net_device *netdev, void *p) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - struct sockaddr *addr = p; - - if (netif_running(netdev)) - return -EBUSY; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); - - atl1_set_mac_addr(&adapter->hw); - return 0; -} - -/* - * atl1_watchdog - Timer Call-back - * @data: pointer to netdev cast into an unsigned long - */ -static void atl1_watchdog(unsigned long data) -{ - struct atl1_adapter *adapter = (struct atl1_adapter *)data; - - /* Reset the timer */ - mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); -} - -static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - u16 result; - - atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); - - return result; -} - -static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - - atl1_write_phy_reg(&adapter->hw, reg_num, val); -} - -/* - * atl1_mii_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ -static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - unsigned long flags; - int retval; - - if (!netif_running(netdev)) - return -EINVAL; - - spin_lock_irqsave(&adapter->lock, flags); - retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); - spin_unlock_irqrestore(&adapter->lock, flags); - - return retval; -} - -/* - * atl1_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ -static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return atl1_mii_ioctl(netdev, ifr, cmd); - default: - return -EOPNOTSUPP; - } -} - -/* - * atl1_tx_timeout - Respond to a Tx Hang - * @netdev: network interface device structure - */ -static void atl1_tx_timeout(struct net_device *netdev) -{ - struct atl1_adapter *adapter = netdev_priv(netdev); - /* Do the reset outside of interrupt context */ - schedule_work(&adapter->tx_timeout_task); -} - -/* - * atl1_phy_config - Timer Call-back - * @data: pointer to netdev cast into an unsigned long - */ -static void atl1_phy_config(unsigned long data) -{ - struct atl1_adapter *adapter = (struct atl1_adapter *)data; - struct atl1_hw *hw = &adapter->hw; - unsigned long flags; - - spin_lock_irqsave(&adapter->lock, flags); - adapter->phy_timer_pending = false; - atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); - atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); - atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); - spin_unlock_irqrestore(&adapter->lock, flags); -} - -int atl1_reset(struct atl1_adapter *adapter) -{ - int ret; - - ret = atl1_reset_hw(&adapter->hw); - if (ret != ATL1_SUCCESS) - return ret; - return atl1_init_hw(&adapter->hw); -} - -/* * atl1_open - Called when a network interface is made active * @netdev: network interface device structure * @@ -2007,77 +2076,113 @@ static int atl1_close(struct net_device *netdev) return 0; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void atl1_poll_controller(struct net_device *netdev) -{ - disable_irq(netdev->irq); - atl1_intr(netdev->irq, netdev); - enable_irq(netdev->irq); -} -#endif - -/* - * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT - * will assert. We do soft reset <0x1400=1> according - * with the SPEC. BUT, it seemes that PCIE or DMA - * state-machine will not be reset. DMAR_TO_INT will - * assert again and again. - */ -static void atl1_tx_timeout_task(struct work_struct *work) +#ifdef CONFIG_PM +static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) { - struct atl1_adapter *adapter = - container_of(work, struct atl1_adapter, tx_timeout_task); - struct net_device *netdev = adapter->netdev; + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 wufc = adapter->wol; netif_device_detach(netdev); - atl1_down(adapter); - atl1_up(adapter); - netif_device_attach(netdev); -} + if (netif_running(netdev)) + atl1_down(adapter); -/* - * atl1_link_chg_task - deal with link change event Out of interrupt context - */ -static void atl1_link_chg_task(struct work_struct *work) -{ - struct atl1_adapter *adapter = - container_of(work, struct atl1_adapter, link_chg_task); - unsigned long flags; + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + if (ctrl & BMSR_LSTATUS) + wufc &= ~ATL1_WUFC_LNKC; - spin_lock_irqsave(&adapter->lock, flags); - atl1_check_link(adapter); - spin_unlock_irqrestore(&adapter->lock, flags); + /* reduce speed to 10/100M */ + if (wufc) { + atl1_phy_enter_power_saving(hw); + /* if resume, let driver to re- setup link */ + hw->phy_configured = false; + atl1_set_mac_addr(hw); + atl1_set_multi(netdev); + + ctrl = 0; + /* turn on magic packet wol */ + if (wufc & ATL1_WUFC_MAG) + ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + + /* turn on Link change WOL */ + if (wufc & ATL1_WUFC_LNKC) + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + + /* turn on all-multi mode if wake on multicast is enabled */ + ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); + ctrl &= ~MAC_CTRL_DBG; + ctrl &= ~MAC_CTRL_PROMIS_EN; + if (wufc & ATL1_WUFC_MC) + ctrl |= MAC_CTRL_MC_ALL_EN; + else + ctrl &= ~MAC_CTRL_MC_ALL_EN; + + /* turn on broadcast mode if wake on-BC is enabled */ + if (wufc & ATL1_WUFC_BC) + ctrl |= MAC_CTRL_BC_EN; + else + ctrl &= ~MAC_CTRL_BC_EN; + + /* enable RX */ + ctrl |= MAC_CTRL_RX_EN; + iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_enable_wake(pdev, PCI_D3cold, 1); + } else { + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + } + + pci_save_state(pdev); + pci_disable_device(pdev); + + pci_set_power_state(pdev, PCI_D3hot); + + return 0; } -/* - * atl1_pcie_patch - Patch for PCIE module - */ -static void atl1_pcie_patch(struct atl1_adapter *adapter) +static int atl1_resume(struct pci_dev *pdev) { - u32 value; - value = 0x6500; - iowrite32(value, adapter->hw.hw_addr + 0x12FC); - /* pcie flow control mode change */ - value = ioread32(adapter->hw.hw_addr + 0x1008); - value |= 0x8000; - iowrite32(value, adapter->hw.hw_addr + 0x1008); + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + u32 ret_val; + + pci_set_power_state(pdev, 0); + pci_restore_state(pdev); + + ret_val = pci_enable_device(pdev); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); + atl1_reset(adapter); + + if (netif_running(netdev)) + atl1_up(adapter); + netif_device_attach(netdev); + + atl1_via_workaround(adapter); + + return 0; } +#else +#define atl1_suspend NULL +#define atl1_resume NULL +#endif -/* - * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 - * on PCI Command register is disable. - * The function enable this bit. - * Brackett, 2006/03/15 - */ -static void atl1_via_workaround(struct atl1_adapter *adapter) +#ifdef CONFIG_NET_POLL_CONTROLLER +static void atl1_poll_controller(struct net_device *netdev) { - unsigned long value; - - value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); - if (value & PCI_COMMAND_INTX_DISABLE) - value &= ~PCI_COMMAND_INTX_DISABLE; - iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); + disable_irq(netdev->irq); + atl1_intr(netdev->irq, netdev); + enable_irq(netdev->irq); } +#endif /* * atl1_probe - Device Initialization Routine @@ -2091,7 +2196,7 @@ static void atl1_via_workaround(struct atl1_adapter *adapter) * and a hardware reset occur. */ static int __devinit atl1_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { struct net_device *netdev; struct atl1_adapter *adapter; @@ -2145,7 +2250,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev, } /* get device revision number */ adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + - (REG_MASTER_CTRL + 2)); + (REG_MASTER_CTRL + 2)); dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); /* set default ring resource counts */ @@ -2298,7 +2403,8 @@ static void __devexit atl1_remove(struct pci_dev *pdev) * address, we need to save the permanent one. */ if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { - memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN); + memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, + ETH_ALEN); atl1_set_mac_addr(&adapter->hw); } @@ -2310,112 +2416,11 @@ static void __devexit atl1_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -#ifdef CONFIG_PM -static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct atl1_adapter *adapter = netdev_priv(netdev); - struct atl1_hw *hw = &adapter->hw; - u32 ctrl = 0; - u32 wufc = adapter->wol; - - netif_device_detach(netdev); - if (netif_running(netdev)) - atl1_down(adapter); - - atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); - atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); - if (ctrl & BMSR_LSTATUS) - wufc &= ~ATL1_WUFC_LNKC; - - /* reduce speed to 10/100M */ - if (wufc) { - atl1_phy_enter_power_saving(hw); - /* if resume, let driver to re- setup link */ - hw->phy_configured = false; - atl1_set_mac_addr(hw); - atl1_set_multi(netdev); - - ctrl = 0; - /* turn on magic packet wol */ - if (wufc & ATL1_WUFC_MAG) - ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; - - /* turn on Link change WOL */ - if (wufc & ATL1_WUFC_LNKC) - ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); - iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); - - /* turn on all-multi mode if wake on multicast is enabled */ - ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); - ctrl &= ~MAC_CTRL_DBG; - ctrl &= ~MAC_CTRL_PROMIS_EN; - if (wufc & ATL1_WUFC_MC) - ctrl |= MAC_CTRL_MC_ALL_EN; - else - ctrl &= ~MAC_CTRL_MC_ALL_EN; - - /* turn on broadcast mode if wake on-BC is enabled */ - if (wufc & ATL1_WUFC_BC) - ctrl |= MAC_CTRL_BC_EN; - else - ctrl &= ~MAC_CTRL_BC_EN; - - /* enable RX */ - ctrl |= MAC_CTRL_RX_EN; - iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); /* 4 == D3 cold */ - } else { - iowrite32(0, hw->hw_addr + REG_WOL_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ - } - - pci_save_state(pdev); - pci_disable_device(pdev); - - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -static int atl1_resume(struct pci_dev *pdev) -{ - struct net_device *netdev = pci_get_drvdata(pdev); - struct atl1_adapter *adapter = netdev_priv(netdev); - u32 ret_val; - - pci_set_power_state(pdev, 0); - pci_restore_state(pdev); - - ret_val = pci_enable_device(pdev); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); - atl1_reset(adapter); - - if (netif_running(netdev)) - atl1_up(adapter); - netif_device_attach(netdev); - - atl1_via_workaround(adapter); - - return 0; -} -#else -#define atl1_suspend NULL -#define atl1_resume NULL -#endif - static struct pci_driver atl1_driver = { .name = atl1_driver_name, .id_table = atl1_pci_tbl, .probe = atl1_probe, .remove = __devexit_p(atl1_remove), - /* Power Managment Hooks */ - /* probably broken right now -- CHS */ .suspend = atl1_suspend, .resume = atl1_resume }; diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index c27cfcef45fa..e86b3691765b 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -1205,8 +1205,8 @@ static int au1000_rx(struct net_device *dev) continue; } skb_reserve(skb, 2); /* 16 byte IP header align */ - eth_copy_and_sum(skb, - (unsigned char *)pDB->vaddr, frmlen, 0); + skb_copy_to_linear_data(skb, + (unsigned char *)pDB->vaddr, frmlen); skb_put(skb, frmlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* pass the packet to upper layers */ diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index d19874bf0706..1d882360b34d 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c @@ -459,7 +459,7 @@ static int ax_open(struct net_device *dev) struct ei_device *ei_local = netdev_priv(dev); int ret; - dev_dbg(ax->dev, "%s: open\n", dev->name); + dev_dbg(&ax->dev->dev, "%s: open\n", dev->name); ret = request_irq(dev->irq, ax_ei_interrupt, 0, dev->name, dev); if (ret) @@ -492,7 +492,7 @@ static int ax_close(struct net_device *dev) struct ax_device *ax = to_ax_dev(dev); struct ei_device *ei_local = netdev_priv(dev); - dev_dbg(ax->dev, "%s: close\n", dev->name); + dev_dbg(&ax->dev->dev, "%s: close\n", dev->name); /* turn the phy off */ diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 96fb0ec905a7..37f1b6ff5c12 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -1519,14 +1519,13 @@ static void b44_setup_pseudo_magicp(struct b44 *bp) u8 *pwol_pattern; u8 pwol_mask[B44_PMASK_SIZE]; - pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL); + pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL); if (!pwol_pattern) { printk(KERN_ERR PFX "Memory not available for WOL\n"); return; } /* Ipv4 magic packet pattern - pattern 0.*/ - memset(pwol_pattern, 0, B44_PATTERN_SIZE); memset(pwol_mask, 0, B44_PMASK_SIZE); plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, B44_ETHIPV4UDP_HLEN); diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c new file mode 100644 index 000000000000..9a08d656f1ce --- /dev/null +++ b/drivers/net/bfin_mac.c @@ -0,0 +1,1009 @@ +/* + * File: drivers/net/bfin_mac.c + * Based on: + * Maintainer: + * Bryan Wu <bryan.wu@analog.com> + * + * Original author: + * Luke Yang <luke.yang@analog.com> + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software ; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation ; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY ; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program ; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/crc32.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/ethtool.h> +#include <linux/mii.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> + +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> + +#include <asm/dma.h> +#include <linux/dma-mapping.h> + +#include <asm/blackfin.h> +#include <asm/cacheflush.h> +#include <asm/portmux.h> + +#include "bfin_mac.h" + +#define DRV_NAME "bfin_mac" +#define DRV_VERSION "1.1" +#define DRV_AUTHOR "Bryan Wu, Luke Yang" +#define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver" + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(DRV_DESC); + +#if defined(CONFIG_BFIN_MAC_USE_L1) +# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size) +# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr) +#else +# define bfin_mac_alloc(dma_handle, size) \ + dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL) +# define bfin_mac_free(dma_handle, ptr) \ + dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle) +#endif + +#define PKT_BUF_SZ 1580 + +#define MAX_TIMEOUT_CNT 500 + +/* pointers to maintain transmit list */ +static struct net_dma_desc_tx *tx_list_head; +static struct net_dma_desc_tx *tx_list_tail; +static struct net_dma_desc_rx *rx_list_head; +static struct net_dma_desc_rx *rx_list_tail; +static struct net_dma_desc_rx *current_rx_ptr; +static struct net_dma_desc_tx *current_tx_ptr; +static struct net_dma_desc_tx *tx_desc; +static struct net_dma_desc_rx *rx_desc; + +static void desc_list_free(void) +{ + struct net_dma_desc_rx *r; + struct net_dma_desc_tx *t; + int i; +#if !defined(CONFIG_BFIN_MAC_USE_L1) + dma_addr_t dma_handle = 0; +#endif + + if (tx_desc) { + t = tx_list_head; + for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { + if (t) { + if (t->skb) { + dev_kfree_skb(t->skb); + t->skb = NULL; + } + t = t->next; + } + } + bfin_mac_free(dma_handle, tx_desc); + } + + if (rx_desc) { + r = rx_list_head; + for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { + if (r) { + if (r->skb) { + dev_kfree_skb(r->skb); + r->skb = NULL; + } + r = r->next; + } + } + bfin_mac_free(dma_handle, rx_desc); + } +} + +static int desc_list_init(void) +{ + int i; + struct sk_buff *new_skb; +#if !defined(CONFIG_BFIN_MAC_USE_L1) + /* + * This dma_handle is useless in Blackfin dma_alloc_coherent(). + * The real dma handler is the return value of dma_alloc_coherent(). + */ + dma_addr_t dma_handle; +#endif + + tx_desc = bfin_mac_alloc(&dma_handle, + sizeof(struct net_dma_desc_tx) * + CONFIG_BFIN_TX_DESC_NUM); + if (tx_desc == NULL) + goto init_error; + + rx_desc = bfin_mac_alloc(&dma_handle, + sizeof(struct net_dma_desc_rx) * + CONFIG_BFIN_RX_DESC_NUM); + if (rx_desc == NULL) + goto init_error; + + /* init tx_list */ + tx_list_head = tx_list_tail = tx_desc; + + for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { + struct net_dma_desc_tx *t = tx_desc + i; + struct dma_descriptor *a = &(t->desc_a); + struct dma_descriptor *b = &(t->desc_b); + + /* + * disable DMA + * read from memory WNR = 0 + * wordsize is 32 bits + * 6 half words is desc size + * large desc flow + */ + a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; + a->start_addr = (unsigned long)t->packet; + a->x_count = 0; + a->next_dma_desc = b; + + /* + * enabled DMA + * write to memory WNR = 1 + * wordsize is 32 bits + * disable interrupt + * 6 half words is desc size + * large desc flow + */ + b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; + b->start_addr = (unsigned long)(&(t->status)); + b->x_count = 0; + + t->skb = NULL; + tx_list_tail->desc_b.next_dma_desc = a; + tx_list_tail->next = t; + tx_list_tail = t; + } + tx_list_tail->next = tx_list_head; /* tx_list is a circle */ + tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a); + current_tx_ptr = tx_list_head; + + /* init rx_list */ + rx_list_head = rx_list_tail = rx_desc; + + for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { + struct net_dma_desc_rx *r = rx_desc + i; + struct dma_descriptor *a = &(r->desc_a); + struct dma_descriptor *b = &(r->desc_b); + + /* allocate a new skb for next time receive */ + new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); + if (!new_skb) { + printk(KERN_NOTICE DRV_NAME + ": init: low on mem - packet dropped\n"); + goto init_error; + } + skb_reserve(new_skb, 2); + r->skb = new_skb; + + /* + * enabled DMA + * write to memory WNR = 1 + * wordsize is 32 bits + * disable interrupt + * 6 half words is desc size + * large desc flow + */ + a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; + /* since RXDWA is enabled */ + a->start_addr = (unsigned long)new_skb->data - 2; + a->x_count = 0; + a->next_dma_desc = b; + + /* + * enabled DMA + * write to memory WNR = 1 + * wordsize is 32 bits + * enable interrupt + * 6 half words is desc size + * large desc flow + */ + b->config = DMAEN | WNR | WDSIZE_32 | DI_EN | + NDSIZE_6 | DMAFLOW_LARGE; + b->start_addr = (unsigned long)(&(r->status)); + b->x_count = 0; + + rx_list_tail->desc_b.next_dma_desc = a; + rx_list_tail->next = r; + rx_list_tail = r; + } + rx_list_tail->next = rx_list_head; /* rx_list is a circle */ + rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a); + current_rx_ptr = rx_list_head; + + return 0; + +init_error: + desc_list_free(); + printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); + return -ENOMEM; +} + + +/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ + +/* Set FER regs to MUX in Ethernet pins */ +static int setup_pin_mux(int action) +{ +#if defined(CONFIG_BFIN_MAC_RMII) + u16 pin_req[] = P_RMII0; +#else + u16 pin_req[] = P_MII0; +#endif + + if (action) { + if (peripheral_request_list(pin_req, DRV_NAME)) { + printk(KERN_ERR DRV_NAME + ": Requesting Peripherals failed\n"); + return -EFAULT; + } + } else + peripheral_free_list(pin_req); + + return 0; +} + +/* Wait until the previous MDC/MDIO transaction has completed */ +static void poll_mdc_done(void) +{ + int timeout_cnt = MAX_TIMEOUT_CNT; + + /* poll the STABUSY bit */ + while ((bfin_read_EMAC_STAADD()) & STABUSY) { + mdelay(10); + if (timeout_cnt-- < 0) { + printk(KERN_ERR DRV_NAME + ": wait MDC/MDIO transaction to complete timeout\n"); + break; + } + } +} + +/* Read an off-chip register in a PHY through the MDC/MDIO port */ +static u16 read_phy_reg(u16 PHYAddr, u16 RegAddr) +{ + poll_mdc_done(); + /* read mode */ + bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) | + SET_REGAD(RegAddr) | + STABUSY); + poll_mdc_done(); + + return (u16) bfin_read_EMAC_STADAT(); +} + +/* Write an off-chip register in a PHY through the MDC/MDIO port */ +static void raw_write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data) +{ + bfin_write_EMAC_STADAT(Data); + + /* write mode */ + bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) | + SET_REGAD(RegAddr) | + STAOP | + STABUSY); + + poll_mdc_done(); +} + +static void write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data) +{ + poll_mdc_done(); + raw_write_phy_reg(PHYAddr, RegAddr, Data); +} + +/* set up the phy */ +static void bf537mac_setphy(struct net_device *dev) +{ + u16 phydat; + struct bf537mac_local *lp = netdev_priv(dev); + + /* Program PHY registers */ + pr_debug("start setting up phy\n"); + + /* issue a reset */ + raw_write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, 0x8000); + + /* wait half a second */ + msleep(500); + + phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL); + + /* advertise flow control supported */ + phydat = read_phy_reg(lp->PhyAddr, PHYREG_ANAR); + phydat |= (1 << 10); + write_phy_reg(lp->PhyAddr, PHYREG_ANAR, phydat); + + phydat = 0; + if (lp->Negotiate) + phydat |= 0x1000; /* enable auto negotiation */ + else { + if (lp->FullDuplex) + phydat |= (1 << 8); /* full duplex */ + else + phydat &= (~(1 << 8)); /* half duplex */ + + if (!lp->Port10) + phydat |= (1 << 13); /* 100 Mbps */ + else + phydat &= (~(1 << 13)); /* 10 Mbps */ + } + + if (lp->Loopback) + phydat |= (1 << 14); /* enable TX->RX loopback */ + + write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, phydat); + msleep(500); + + phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL); + /* check for SMSC PHY */ + if ((read_phy_reg(lp->PhyAddr, PHYREG_PHYID1) == 0x7) && + ((read_phy_reg(lp->PhyAddr, PHYREG_PHYID2) & 0xfff0) == 0xC0A0)) { + /* + * we have SMSC PHY so reqest interrupt + * on link down condition + */ + + /* enable interrupts */ + write_phy_reg(lp->PhyAddr, 30, 0x0ff); + } +} + +/**************************************************************************/ +void setup_system_regs(struct net_device *dev) +{ + int phyaddr; + unsigned short sysctl, phydat; + u32 opmode; + struct bf537mac_local *lp = netdev_priv(dev); + int count = 0; + + phyaddr = lp->PhyAddr; + + /* Enable PHY output */ + if (!(bfin_read_VR_CTL() & PHYCLKOE)) + bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); + + /* MDC = 2.5 MHz */ + sysctl = SET_MDCDIV(24); + /* Odd word alignment for Receive Frame DMA word */ + /* Configure checksum support and rcve frame word alignment */ +#if defined(BFIN_MAC_CSUM_OFFLOAD) + sysctl |= RXDWA | RXCKS; +#else + sysctl |= RXDWA; +#endif + bfin_write_EMAC_SYSCTL(sysctl); + /* auto negotiation on */ + /* full duplex */ + /* 100 Mbps */ + phydat = PHY_ANEG_EN | PHY_DUPLEX | PHY_SPD_SET; + write_phy_reg(phyaddr, PHYREG_MODECTL, phydat); + + /* test if full duplex supported */ + do { + msleep(100); + phydat = read_phy_reg(phyaddr, PHYREG_MODESTAT); + if (count > 30) { + printk(KERN_NOTICE DRV_NAME ": Link is down\n"); + printk(KERN_NOTICE DRV_NAME + "please check your network connection\n"); + break; + } + count++; + } while (!(phydat & 0x0004)); + + phydat = read_phy_reg(phyaddr, PHYREG_ANLPAR); + + if ((phydat & 0x0100) || (phydat & 0x0040)) { + opmode = FDMODE; + } else { + opmode = 0; + printk(KERN_INFO DRV_NAME + ": Network is set to half duplex\n"); + } + +#if defined(CONFIG_BFIN_MAC_RMII) + opmode |= RMII; /* For Now only 100MBit are supported */ +#endif + + bfin_write_EMAC_OPMODE(opmode); + + bfin_write_EMAC_MMC_CTL(RSTC | CROLL); + + /* Initialize the TX DMA channel registers */ + bfin_write_DMA2_X_COUNT(0); + bfin_write_DMA2_X_MODIFY(4); + bfin_write_DMA2_Y_COUNT(0); + bfin_write_DMA2_Y_MODIFY(0); + + /* Initialize the RX DMA channel registers */ + bfin_write_DMA1_X_COUNT(0); + bfin_write_DMA1_X_MODIFY(4); + bfin_write_DMA1_Y_COUNT(0); + bfin_write_DMA1_Y_MODIFY(0); +} + +void setup_mac_addr(u8 * mac_addr) +{ + u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]); + u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]); + + /* this depends on a little-endian machine */ + bfin_write_EMAC_ADDRLO(addr_low); + bfin_write_EMAC_ADDRHI(addr_hi); +} + +static void adjust_tx_list(void) +{ + int timeout_cnt = MAX_TIMEOUT_CNT; + + if (tx_list_head->status.status_word != 0 + && current_tx_ptr != tx_list_head) { + goto adjust_head; /* released something, just return; */ + } + + /* + * if nothing released, check wait condition + * current's next can not be the head, + * otherwise the dma will not stop as we want + */ + if (current_tx_ptr->next->next == tx_list_head) { + while (tx_list_head->status.status_word == 0) { + mdelay(10); + if (tx_list_head->status.status_word != 0 + || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) { + goto adjust_head; + } + if (timeout_cnt-- < 0) { + printk(KERN_ERR DRV_NAME + ": wait for adjust tx list head timeout\n"); + break; + } + } + if (tx_list_head->status.status_word != 0) { + goto adjust_head; + } + } + + return; + +adjust_head: + do { + tx_list_head->desc_a.config &= ~DMAEN; + tx_list_head->status.status_word = 0; + if (tx_list_head->skb) { + dev_kfree_skb(tx_list_head->skb); + tx_list_head->skb = NULL; + } else { + printk(KERN_ERR DRV_NAME + ": no sk_buff in a transmitted frame!\n"); + } + tx_list_head = tx_list_head->next; + } while (tx_list_head->status.status_word != 0 + && current_tx_ptr != tx_list_head); + return; + +} + +static int bf537mac_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bf537mac_local *lp = netdev_priv(dev); + unsigned int data; + + current_tx_ptr->skb = skb; + + /* + * Is skb->data always 16-bit aligned? + * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)? + */ + if ((((unsigned int)(skb->data)) & 0x02) == 2) { + /* move skb->data to current_tx_ptr payload */ + data = (unsigned int)(skb->data) - 2; + *((unsigned short *)data) = (unsigned short)(skb->len); + current_tx_ptr->desc_a.start_addr = (unsigned long)data; + /* this is important! */ + blackfin_dcache_flush_range(data, (data + (skb->len)) + 2); + + } else { + *((unsigned short *)(current_tx_ptr->packet)) = + (unsigned short)(skb->len); + memcpy((char *)(current_tx_ptr->packet + 2), skb->data, + (skb->len)); + current_tx_ptr->desc_a.start_addr = + (unsigned long)current_tx_ptr->packet; + if (current_tx_ptr->status.status_word != 0) + current_tx_ptr->status.status_word = 0; + blackfin_dcache_flush_range((unsigned int)current_tx_ptr-> + packet, + (unsigned int)(current_tx_ptr-> + packet + skb->len) + + 2); + } + + /* enable this packet's dma */ + current_tx_ptr->desc_a.config |= DMAEN; + + /* tx dma is running, just return */ + if (bfin_read_DMA2_IRQ_STATUS() & 0x08) + goto out; + + /* tx dma is not running */ + bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a)); + /* dma enabled, read from memory, size is 6 */ + bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config); + /* Turn on the EMAC tx */ + bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); + +out: + adjust_tx_list(); + current_tx_ptr = current_tx_ptr->next; + dev->trans_start = jiffies; + lp->stats.tx_packets++; + lp->stats.tx_bytes += (skb->len); + return 0; +} + +static void bf537mac_rx(struct net_device *dev) +{ + struct sk_buff *skb, *new_skb; + struct bf537mac_local *lp = netdev_priv(dev); + unsigned short len; + + /* allocate a new skb for next time receive */ + skb = current_rx_ptr->skb; + new_skb = dev_alloc_skb(PKT_BUF_SZ + 2); + if (!new_skb) { + printk(KERN_NOTICE DRV_NAME + ": rx: low on mem - packet dropped\n"); + lp->stats.rx_dropped++; + goto out; + } + /* reserve 2 bytes for RXDWA padding */ + skb_reserve(new_skb, 2); + current_rx_ptr->skb = new_skb; + current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; + + len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN); + skb_put(skb, len); + blackfin_dcache_invalidate_range((unsigned long)skb->head, + (unsigned long)skb->tail); + + dev->last_rx = jiffies; + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); +#if defined(BFIN_MAC_CSUM_OFFLOAD) + skb->csum = current_rx_ptr->status.ip_payload_csum; + skb->ip_summed = CHECKSUM_PARTIAL; +#endif + + netif_rx(skb); + lp->stats.rx_packets++; + lp->stats.rx_bytes += len; + current_rx_ptr->status.status_word = 0x00000000; + current_rx_ptr = current_rx_ptr->next; + +out: + return; +} + +/* interrupt routine to handle rx and error signal */ +static irqreturn_t bf537mac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + int number = 0; + +get_one_packet: + if (current_rx_ptr->status.status_word == 0) { + /* no more new packet received */ + if (number == 0) { + if (current_rx_ptr->next->status.status_word != 0) { + current_rx_ptr = current_rx_ptr->next; + goto real_rx; + } + } + bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() | + DMA_DONE | DMA_ERR); + return IRQ_HANDLED; + } + +real_rx: + bf537mac_rx(dev); + number++; + goto get_one_packet; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bf537mac_poll(struct net_device *dev) +{ + disable_irq(IRQ_MAC_RX); + bf537mac_interrupt(IRQ_MAC_RX, dev); + enable_irq(IRQ_MAC_RX); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static void bf537mac_reset(void) +{ + unsigned int opmode; + + opmode = bfin_read_EMAC_OPMODE(); + opmode &= (~RE); + opmode &= (~TE); + /* Turn off the EMAC */ + bfin_write_EMAC_OPMODE(opmode); +} + +/* + * Enable Interrupts, Receive, and Transmit + */ +static int bf537mac_enable(struct net_device *dev) +{ + u32 opmode; + + pr_debug("%s: %s\n", dev->name, __FUNCTION__); + + /* Set RX DMA */ + bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); + bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config); + + /* Wait MII done */ + poll_mdc_done(); + + /* We enable only RX here */ + /* ASTP : Enable Automatic Pad Stripping + PR : Promiscuous Mode for test + PSF : Receive frames with total length less than 64 bytes. + FDMODE : Full Duplex Mode + LB : Internal Loopback for test + RE : Receiver Enable */ + opmode = bfin_read_EMAC_OPMODE(); + if (opmode & FDMODE) + opmode |= PSF; + else + opmode |= DRO | DC | PSF; + opmode |= RE; + +#if defined(CONFIG_BFIN_MAC_RMII) + opmode |= RMII; /* For Now only 100MBit are supported */ +#ifdef CONFIG_BF_REV_0_2 + opmode |= TE; +#endif +#endif + /* Turn on the EMAC rx */ + bfin_write_EMAC_OPMODE(opmode); + + return 0; +} + +/* Our watchdog timed out. Called by the networking layer */ +static void bf537mac_timeout(struct net_device *dev) +{ + pr_debug("%s: %s\n", dev->name, __FUNCTION__); + + bf537mac_reset(); + + /* reset tx queue */ + tx_list_tail = tx_list_head->next; + + bf537mac_enable(dev); + + /* We can accept TX packets again */ + dev->trans_start = jiffies; + netif_wake_queue(dev); +} + +/* + * Get the current statistics. + * This may be called with the card open or closed. + */ +static struct net_device_stats *bf537mac_query_statistics(struct net_device + *dev) +{ + struct bf537mac_local *lp = netdev_priv(dev); + + pr_debug("%s: %s\n", dev->name, __FUNCTION__); + + return &lp->stats; +} + +/* + * This routine will, depending on the values passed to it, + * either make it accept multicast packets, go into + * promiscuous mode (for TCPDUMP and cousins) or accept + * a select set of multicast packets + */ +static void bf537mac_set_multicast_list(struct net_device *dev) +{ + u32 sysctl; + + if (dev->flags & IFF_PROMISC) { + printk(KERN_INFO "%s: set to promisc mode\n", dev->name); + sysctl = bfin_read_EMAC_OPMODE(); + sysctl |= RAF; + bfin_write_EMAC_OPMODE(sysctl); + } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) { + /* accept all multicast */ + sysctl = bfin_read_EMAC_OPMODE(); + sysctl |= PAM; + bfin_write_EMAC_OPMODE(sysctl); + } else { + /* clear promisc or multicast mode */ + sysctl = bfin_read_EMAC_OPMODE(); + sysctl &= ~(RAF | PAM); + bfin_write_EMAC_OPMODE(sysctl); + } +} + +/* + * this puts the device in an inactive state + */ +static void bf537mac_shutdown(struct net_device *dev) +{ + /* Turn off the EMAC */ + bfin_write_EMAC_OPMODE(0x00000000); + /* Turn off the EMAC RX DMA */ + bfin_write_DMA1_CONFIG(0x0000); + bfin_write_DMA2_CONFIG(0x0000); +} + +/* + * Open and Initialize the interface + * + * Set up everything, reset the card, etc.. + */ +static int bf537mac_open(struct net_device *dev) +{ + pr_debug("%s: %s\n", dev->name, __FUNCTION__); + + /* + * Check that the address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx + */ + if (!is_valid_ether_addr(dev->dev_addr)) { + printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); + return -EINVAL; + } + + /* initial rx and tx list */ + desc_list_init(); + + bf537mac_setphy(dev); + setup_system_regs(dev); + bf537mac_reset(); + bf537mac_enable(dev); + + pr_debug("hardware init finished\n"); + netif_start_queue(dev); + netif_carrier_on(dev); + + return 0; +} + +/* + * + * this makes the board clean up everything that it can + * and not talk to the outside world. Caused by + * an 'ifconfig ethX down' + */ +static int bf537mac_close(struct net_device *dev) +{ + pr_debug("%s: %s\n", dev->name, __FUNCTION__); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + /* clear everything */ + bf537mac_shutdown(dev); + + /* free the rx/tx buffers */ + desc_list_free(); + + return 0; +} + +static int __init bf537mac_probe(struct net_device *dev) +{ + struct bf537mac_local *lp = netdev_priv(dev); + int retval; + + /* Grab the MAC address in the MAC */ + *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); + *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); + + /* probe mac */ + /*todo: how to proble? which is revision_register */ + bfin_write_EMAC_ADDRLO(0x12345678); + if (bfin_read_EMAC_ADDRLO() != 0x12345678) { + pr_debug("can't detect bf537 mac!\n"); + retval = -ENODEV; + goto err_out; + } + + /* set the GPIO pins to Ethernet mode */ + retval = setup_pin_mux(1); + + if (retval) + return retval; + + /*Is it valid? (Did bootloader initialize it?) */ + if (!is_valid_ether_addr(dev->dev_addr)) { + /* Grab the MAC from the board somehow - this is done in the + arch/blackfin/mach-bf537/boards/eth_mac.c */ + get_bf537_ether_addr(dev->dev_addr); + } + + /* If still not valid, get a random one */ + if (!is_valid_ether_addr(dev->dev_addr)) { + random_ether_addr(dev->dev_addr); + } + + setup_mac_addr(dev->dev_addr); + + /* Fill in the fields of the device structure with ethernet values. */ + ether_setup(dev); + + dev->open = bf537mac_open; + dev->stop = bf537mac_close; + dev->hard_start_xmit = bf537mac_hard_start_xmit; + dev->tx_timeout = bf537mac_timeout; + dev->get_stats = bf537mac_query_statistics; + dev->set_multicast_list = bf537mac_set_multicast_list; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = bf537mac_poll; +#endif + + /* fill in some of the fields */ + lp->version = 1; + lp->PhyAddr = 0x01; + lp->CLKIN = 25; + lp->FullDuplex = 0; + lp->Negotiate = 1; + lp->FlowControl = 0; + spin_lock_init(&lp->lock); + + /* now, enable interrupts */ + /* register irq handler */ + if (request_irq + (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED, + "BFIN537_MAC_RX", dev)) { + printk(KERN_WARNING DRV_NAME + ": Unable to attach BlackFin MAC RX interrupt\n"); + return -EBUSY; + } + + /* Enable PHY output early */ + if (!(bfin_read_VR_CTL() & PHYCLKOE)) + bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); + + retval = register_netdev(dev); + if (retval == 0) { + /* now, print out the card info, in a short format.. */ + printk(KERN_INFO "%s: Version %s, %s\n", + DRV_NAME, DRV_VERSION, DRV_DESC); + } + +err_out: + return retval; +} + +static int bfin_mac_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + + ndev = alloc_etherdev(sizeof(struct bf537mac_local)); + if (!ndev) { + printk(KERN_WARNING DRV_NAME ": could not allocate device\n"); + return -ENOMEM; + } + + SET_MODULE_OWNER(ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + + platform_set_drvdata(pdev, ndev); + + if (bf537mac_probe(ndev) != 0) { + platform_set_drvdata(pdev, NULL); + free_netdev(ndev); + printk(KERN_WARNING DRV_NAME ": not found\n"); + return -ENODEV; + } + + return 0; +} + +static int bfin_mac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + unregister_netdev(ndev); + + free_irq(IRQ_MAC_RX, ndev); + + free_netdev(ndev); + + setup_pin_mux(0); + + return 0; +} + +static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t state) +{ + return 0; +} + +static int bfin_mac_resume(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver bfin_mac_driver = { + .probe = bfin_mac_probe, + .remove = bfin_mac_remove, + .resume = bfin_mac_resume, + .suspend = bfin_mac_suspend, + .driver = { + .name = DRV_NAME, + }, +}; + +static int __init bfin_mac_init(void) +{ + return platform_driver_register(&bfin_mac_driver); +} + +module_init(bfin_mac_init); + +static void __exit bfin_mac_cleanup(void) +{ + platform_driver_unregister(&bfin_mac_driver); +} + +module_exit(bfin_mac_cleanup); diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h new file mode 100644 index 000000000000..af87189b85fa --- /dev/null +++ b/drivers/net/bfin_mac.h @@ -0,0 +1,132 @@ +/* + * File: drivers/net/bfin_mac.c + * Based on: + * Maintainer: + * Bryan Wu <bryan.wu@analog.com> + * + * Original author: + * Luke Yang <luke.yang@analog.com> + * + * Created: + * Description: + * + * Modified: + * Copyright 2004-2006 Analog Devices Inc. + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software ; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation ; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY ; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program ; see the file COPYING. + * If not, write to the Free Software Foundation, + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + * PHY REGISTER NAMES + */ +#define PHYREG_MODECTL 0x0000 +#define PHYREG_MODESTAT 0x0001 +#define PHYREG_PHYID1 0x0002 +#define PHYREG_PHYID2 0x0003 +#define PHYREG_ANAR 0x0004 +#define PHYREG_ANLPAR 0x0005 +#define PHYREG_ANER 0x0006 +#define PHYREG_NSR 0x0010 +#define PHYREG_LBREMR 0x0011 +#define PHYREG_REC 0x0012 +#define PHYREG_10CFG 0x0013 +#define PHYREG_PHY1_1 0x0014 +#define PHYREG_PHY1_2 0x0015 +#define PHYREG_PHY2 0x0016 +#define PHYREG_TW_1 0x0017 +#define PHYREG_TW_2 0x0018 +#define PHYREG_TEST 0x0019 + +#define PHY_RESET 0x8000 +#define PHY_ANEG_EN 0x1000 +#define PHY_DUPLEX 0x0100 +#define PHY_SPD_SET 0x2000 + +#define BFIN_MAC_CSUM_OFFLOAD + +struct dma_descriptor { + struct dma_descriptor *next_dma_desc; + unsigned long start_addr; + unsigned short config; + unsigned short x_count; +}; + +struct status_area_rx { +#if defined(BFIN_MAC_CSUM_OFFLOAD) + unsigned short ip_hdr_csum; /* ip header checksum */ + /* ip payload(udp or tcp or others) checksum */ + unsigned short ip_payload_csum; +#endif + unsigned long status_word; /* the frame status word */ +}; + +struct status_area_tx { + unsigned long status_word; /* the frame status word */ +}; + +/* use two descriptors for a packet */ +struct net_dma_desc_rx { + struct net_dma_desc_rx *next; + struct sk_buff *skb; + struct dma_descriptor desc_a; + struct dma_descriptor desc_b; + struct status_area_rx status; +}; + +/* use two descriptors for a packet */ +struct net_dma_desc_tx { + struct net_dma_desc_tx *next; + struct sk_buff *skb; + struct dma_descriptor desc_a; + struct dma_descriptor desc_b; + unsigned char packet[1560]; + struct status_area_tx status; +}; + +struct bf537mac_local { + /* + * these are things that the kernel wants me to keep, so users + * can find out semi-useless statistics of how well the card is + * performing + */ + struct net_device_stats stats; + + int version; + + int FlowEnabled; /* record if data flow is active */ + int EtherIntIVG; /* IVG for the ethernet interrupt */ + int RXIVG; /* IVG for the RX completion */ + int TXIVG; /* IVG for the TX completion */ + int PhyAddr; /* PHY address */ + int OpMode; /* set these bits n the OPMODE regs */ + int Port10; /* set port speed to 10 Mbit/s */ + int GenChksums; /* IP checksums to be calculated */ + int NoRcveLnth; /* dont insert recv length at start of buffer */ + int StripPads; /* remove trailing pad bytes */ + int FullDuplex; /* set full duplex mode */ + int Negotiate; /* enable auto negotiation */ + int Loopback; /* loopback at the PHY */ + int Cache; /* Buffers may be cached */ + int FlowControl; /* flow control active */ + int CLKIN; /* clock in value in MHZ */ + unsigned short IntMask; /* interrupt mask */ + unsigned char Mac[6]; /* MAC address of the board */ + spinlock_t lock; +}; + +extern void get_bf537_ether_addr(char *addr); diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index ce3ed67a878e..a729da061bbb 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -54,8 +54,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.5.11" -#define DRV_MODULE_RELDATE "June 4, 2007" +#define DRV_MODULE_VERSION "1.6.3" +#define DRV_MODULE_RELDATE "July 16, 2007" #define RUN_AT(x) (jiffies + (x)) @@ -126,91 +126,102 @@ static struct pci_device_id bnx2_pci_tbl[] = { static struct flash_spec flash_table[] = { +#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE) +#define NONBUFFERED_FLAGS (BNX2_NV_WREN) /* Slow EEPROM */ {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, - 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - slow"}, /* Expansion entry 0001 */ {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0001"}, /* Saifun SA25F010 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, "Non-buffered flash (128kB)"}, /* Saifun SA25F020 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, "Non-buffered flash (256kB)"}, /* Expansion entry 0100 */ {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 0100"}, /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, - 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, /* Entry 0110: ST M45PE20 (non-buffered flash)*/ {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, - 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, /* Saifun SA25F005 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, "Non-buffered flash (64kB)"}, /* Fast EEPROM */ {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, - 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, "EEPROM - fast"}, /* Expansion entry 1001 */ {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1001"}, /* Expansion entry 1010 */ {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1010"}, /* ATMEL AT45DB011B (buffered flash) */ {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, - 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, "Buffered flash (128kB)"}, /* Expansion entry 1100 */ {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1100"}, /* Expansion entry 1101 */ {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, - 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, SAIFUN_FLASH_BYTE_ADDR_MASK, 0, "Entry 1101"}, /* Ateml Expansion entry 1110 */ {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, - 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, 0, "Entry 1110 (Atmel)"}, /* ATMEL AT45DB021B (buffered flash) */ {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, - 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, "Buffered flash (256kB)"}, }; +static struct flash_spec flash_5709 = { + .flags = BNX2_NV_BUFFERED, + .page_bits = BCM5709_FLASH_PAGE_BITS, + .page_size = BCM5709_FLASH_PAGE_SIZE, + .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, + .total_size = BUFFERED_FLASH_TOTAL_SIZE*2, + .name = "5709 Buffered flash (256kB)", +}; + MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); static inline u32 bnx2_tx_avail(struct bnx2 *bp) @@ -550,6 +561,9 @@ bnx2_report_fw_link(struct bnx2 *bp) { u32 fw_link_status = 0; + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + return; + if (bp->link_up) { u32 bmsr; @@ -601,12 +615,21 @@ bnx2_report_fw_link(struct bnx2 *bp) REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status); } +static char * +bnx2_xceiver_str(struct bnx2 *bp) +{ + return ((bp->phy_port == PORT_FIBRE) ? "SerDes" : + ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" : + "Copper")); +} + static void bnx2_report_link(struct bnx2 *bp) { if (bp->link_up) { netif_carrier_on(bp->dev); - printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name); + printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name, + bnx2_xceiver_str(bp)); printk("%d Mbps ", bp->line_speed); @@ -630,7 +653,8 @@ bnx2_report_link(struct bnx2 *bp) } else { netif_carrier_off(bp->dev); - printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); + printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name, + bnx2_xceiver_str(bp)); } bnx2_report_fw_link(bp); @@ -1100,6 +1124,9 @@ bnx2_set_link(struct bnx2 *bp) return 0; } + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + return 0; + link_up = bp->link_up; bnx2_enable_bmsr1(bp); @@ -1210,12 +1237,74 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp) return adv; } +static int bnx2_fw_sync(struct bnx2 *, u32, int); + static int -bnx2_setup_serdes_phy(struct bnx2 *bp) +bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) +{ + u32 speed_arg = 0, pause_adv; + + pause_adv = bnx2_phy_get_pause_adv(bp); + + if (bp->autoneg & AUTONEG_SPEED) { + speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG; + if (bp->advertising & ADVERTISED_10baseT_Half) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF; + if (bp->advertising & ADVERTISED_10baseT_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL; + if (bp->advertising & ADVERTISED_100baseT_Half) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF; + if (bp->advertising & ADVERTISED_100baseT_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL; + if (bp->advertising & ADVERTISED_1000baseT_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL; + if (bp->advertising & ADVERTISED_2500baseX_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; + } else { + if (bp->req_line_speed == SPEED_2500) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; + else if (bp->req_line_speed == SPEED_1000) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL; + else if (bp->req_line_speed == SPEED_100) { + if (bp->req_duplex == DUPLEX_FULL) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL; + else + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF; + } else if (bp->req_line_speed == SPEED_10) { + if (bp->req_duplex == DUPLEX_FULL) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL; + else + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF; + } + } + + if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP)) + speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE; + if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM)) + speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE; + + if (port == PORT_TP) + speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE | + BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED; + + REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg); + + spin_unlock_bh(&bp->phy_lock); + bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0); + spin_lock_bh(&bp->phy_lock); + + return 0; +} + +static int +bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) { u32 adv, bmcr; u32 new_adv = 0; + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + return (bnx2_setup_remote_phy(bp, port)); + if (!(bp->autoneg & AUTONEG_SPEED)) { u32 new_bmcr; int force_link_down = 0; @@ -1323,7 +1412,9 @@ bnx2_setup_serdes_phy(struct bnx2 *bp) } #define ETHTOOL_ALL_FIBRE_SPEED \ - (ADVERTISED_1000baseT_Full) + (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \ + (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\ + (ADVERTISED_1000baseT_Full) #define ETHTOOL_ALL_COPPER_SPEED \ (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ @@ -1335,6 +1426,188 @@ bnx2_setup_serdes_phy(struct bnx2 *bp) #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL) +static void +bnx2_set_default_remote_link(struct bnx2 *bp) +{ + u32 link; + + if (bp->phy_port == PORT_TP) + link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK); + else + link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK); + + if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) { + bp->req_line_speed = 0; + bp->autoneg |= AUTONEG_SPEED; + bp->advertising = ADVERTISED_Autoneg; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) + bp->advertising |= ADVERTISED_10baseT_Half; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL) + bp->advertising |= ADVERTISED_10baseT_Full; + if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) + bp->advertising |= ADVERTISED_100baseT_Half; + if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL) + bp->advertising |= ADVERTISED_100baseT_Full; + if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) + bp->advertising |= ADVERTISED_1000baseT_Full; + if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) + bp->advertising |= ADVERTISED_2500baseX_Full; + } else { + bp->autoneg = 0; + bp->advertising = 0; + bp->req_duplex = DUPLEX_FULL; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10) { + bp->req_line_speed = SPEED_10; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) + bp->req_duplex = DUPLEX_HALF; + } + if (link & BNX2_NETLINK_SET_LINK_SPEED_100) { + bp->req_line_speed = SPEED_100; + if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) + bp->req_duplex = DUPLEX_HALF; + } + if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) + bp->req_line_speed = SPEED_1000; + if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) + bp->req_line_speed = SPEED_2500; + } +} + +static void +bnx2_set_default_link(struct bnx2 *bp) +{ + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + return bnx2_set_default_remote_link(bp); + + bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; + bp->req_line_speed = 0; + if (bp->phy_flags & PHY_SERDES_FLAG) { + u32 reg; + + bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; + + reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG); + reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK; + if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) { + bp->autoneg = 0; + bp->req_line_speed = bp->line_speed = SPEED_1000; + bp->req_duplex = DUPLEX_FULL; + } + } else + bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; +} + +static void +bnx2_send_heart_beat(struct bnx2 *bp) +{ + u32 msg; + u32 addr; + + spin_lock(&bp->indirect_lock); + msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); + addr = bp->shmem_base + BNX2_DRV_PULSE_MB; + REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); + REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); + spin_unlock(&bp->indirect_lock); +} + +static void +bnx2_remote_phy_event(struct bnx2 *bp) +{ + u32 msg; + u8 link_up = bp->link_up; + u8 old_port; + + msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS); + + if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED) + bnx2_send_heart_beat(bp); + + msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED; + + if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN) + bp->link_up = 0; + else { + u32 speed; + + bp->link_up = 1; + speed = msg & BNX2_LINK_STATUS_SPEED_MASK; + bp->duplex = DUPLEX_FULL; + switch (speed) { + case BNX2_LINK_STATUS_10HALF: + bp->duplex = DUPLEX_HALF; + case BNX2_LINK_STATUS_10FULL: + bp->line_speed = SPEED_10; + break; + case BNX2_LINK_STATUS_100HALF: + bp->duplex = DUPLEX_HALF; + case BNX2_LINK_STATUS_100BASE_T4: + case BNX2_LINK_STATUS_100FULL: + bp->line_speed = SPEED_100; + break; + case BNX2_LINK_STATUS_1000HALF: + bp->duplex = DUPLEX_HALF; + case BNX2_LINK_STATUS_1000FULL: + bp->line_speed = SPEED_1000; + break; + case BNX2_LINK_STATUS_2500HALF: + bp->duplex = DUPLEX_HALF; + case BNX2_LINK_STATUS_2500FULL: + bp->line_speed = SPEED_2500; + break; + default: + bp->line_speed = 0; + break; + } + + spin_lock(&bp->phy_lock); + bp->flow_ctrl = 0; + if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != + (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { + if (bp->duplex == DUPLEX_FULL) + bp->flow_ctrl = bp->req_flow_ctrl; + } else { + if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED) + bp->flow_ctrl |= FLOW_CTRL_TX; + if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED) + bp->flow_ctrl |= FLOW_CTRL_RX; + } + + old_port = bp->phy_port; + if (msg & BNX2_LINK_STATUS_SERDES_LINK) + bp->phy_port = PORT_FIBRE; + else + bp->phy_port = PORT_TP; + + if (old_port != bp->phy_port) + bnx2_set_default_link(bp); + + spin_unlock(&bp->phy_lock); + } + if (bp->link_up != link_up) + bnx2_report_link(bp); + + bnx2_set_mac_link(bp); +} + +static int +bnx2_set_remote_link(struct bnx2 *bp) +{ + u32 evt_code; + + evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB); + switch (evt_code) { + case BNX2_FW_EVT_CODE_LINK_EVENT: + bnx2_remote_phy_event(bp); + break; + case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT: + default: + bnx2_send_heart_beat(bp); + break; + } + return 0; +} + static int bnx2_setup_copper_phy(struct bnx2 *bp) { @@ -1433,13 +1706,13 @@ bnx2_setup_copper_phy(struct bnx2 *bp) } static int -bnx2_setup_phy(struct bnx2 *bp) +bnx2_setup_phy(struct bnx2 *bp, u8 port) { if (bp->loopback == MAC_LOOPBACK) return 0; if (bp->phy_flags & PHY_SERDES_FLAG) { - return (bnx2_setup_serdes_phy(bp)); + return (bnx2_setup_serdes_phy(bp, port)); } else { return (bnx2_setup_copper_phy(bp)); @@ -1659,6 +1932,9 @@ bnx2_init_phy(struct bnx2 *bp) REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + goto setup_phy; + bnx2_read_phy(bp, MII_PHYSID1, &val); bp->phy_id = val << 16; bnx2_read_phy(bp, MII_PHYSID2, &val); @@ -1676,7 +1952,9 @@ bnx2_init_phy(struct bnx2 *bp) rc = bnx2_init_copper_phy(bp); } - bnx2_setup_phy(bp); +setup_phy: + if (!rc) + rc = bnx2_setup_phy(bp, bp->phy_port); return rc; } @@ -1984,6 +2262,9 @@ bnx2_phy_int(struct bnx2 *bp) bnx2_set_link(bp); spin_unlock(&bp->phy_lock); } + if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT)) + bnx2_set_remote_link(bp); + } static void @@ -2297,6 +2578,7 @@ bnx2_interrupt(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct bnx2 *bp = netdev_priv(dev); + struct status_block *sblk = bp->status_blk; /* When using INTx, it is possible for the interrupt to arrive * at the CPU before the status block posted prior to the @@ -2304,7 +2586,7 @@ bnx2_interrupt(int irq, void *dev_instance) * When using MSI, the MSI message will always complete after * the status block write. */ - if ((bp->status_blk->status_idx == bp->last_status_idx) && + if ((sblk->status_idx == bp->last_status_idx) && (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) return IRQ_NONE; @@ -2313,16 +2595,25 @@ bnx2_interrupt(int irq, void *dev_instance) BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + /* Read back to deassert IRQ immediately to avoid too many + * spurious interrupts. + */ + REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); + /* Return here if interrupt is shared and is disabled. */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - netif_rx_schedule(dev); + if (netif_rx_schedule_prep(dev)) { + bp->last_status_idx = sblk->status_idx; + __netif_rx_schedule(dev); + } return IRQ_HANDLED; } -#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE +#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \ + STATUS_ATTN_BITS_TIMER_ABORT) static inline int bnx2_has_work(struct bnx2 *bp) @@ -3009,7 +3300,7 @@ bnx2_enable_nvram_write(struct bnx2 *bp) val = REG_RD(bp, BNX2_MISC_CFG); REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); - if (!bp->flash_info->buffered) { + if (bp->flash_info->flags & BNX2_NV_WREN) { int j; REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); @@ -3069,7 +3360,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) u32 cmd; int j; - if (bp->flash_info->buffered) + if (bp->flash_info->flags & BNX2_NV_BUFFERED) /* Buffered flash, no erase needed */ return 0; @@ -3112,8 +3403,8 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) /* Build the command word. */ cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; - /* Calculate an offset of a buffered flash. */ - if (bp->flash_info->buffered) { + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { offset = ((offset / bp->flash_info->page_size) << bp->flash_info->page_bits) + (offset % bp->flash_info->page_size); @@ -3159,8 +3450,8 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) /* Build the command word. */ cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; - /* Calculate an offset of a buffered flash. */ - if (bp->flash_info->buffered) { + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { offset = ((offset / bp->flash_info->page_size) << bp->flash_info->page_bits) + (offset % bp->flash_info->page_size); @@ -3198,15 +3489,19 @@ static int bnx2_init_nvram(struct bnx2 *bp) { u32 val; - int j, entry_count, rc; + int j, entry_count, rc = 0; struct flash_spec *flash; + if (CHIP_NUM(bp) == CHIP_NUM_5709) { + bp->flash_info = &flash_5709; + goto get_flash_size; + } + /* Determine the selected interface. */ val = REG_RD(bp, BNX2_NVM_CFG1); entry_count = sizeof(flash_table) / sizeof(struct flash_spec); - rc = 0; if (val & 0x40000000) { /* Flash interface has been reconfigured */ @@ -3262,6 +3557,7 @@ bnx2_init_nvram(struct bnx2 *bp) return -ENODEV; } +get_flash_size: val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2); val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; if (val) @@ -3426,7 +3722,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, buf = align_buf; } - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { flash_buffer = kmalloc(264, GFP_KERNEL); if (flash_buffer == NULL) { rc = -ENOMEM; @@ -3459,7 +3755,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, bnx2_enable_nvram_access(bp); cmd_flags = BNX2_NVM_COMMAND_FIRST; - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { int j; /* Read the whole page into the buffer @@ -3487,7 +3783,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, /* Loop to write back the buffer data from page_start to * data_start */ i = 0; - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { /* Erase the page */ if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) goto nvram_write_end; @@ -3511,7 +3807,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, /* Loop to write the new data from data_start to data_end */ for (addr = data_start; addr < data_end; addr += 4, i += 4) { if ((addr == page_end - 4) || - ((bp->flash_info->buffered) && + ((bp->flash_info->flags & BNX2_NV_BUFFERED) && (addr == data_end - 4))) { cmd_flags |= BNX2_NVM_COMMAND_LAST; @@ -3528,7 +3824,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, /* Loop to write back the buffer data from data_end * to page_end */ - if (bp->flash_info->buffered == 0) { + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { for (addr = data_end; addr < page_end; addr += 4, i += 4) { @@ -3562,6 +3858,36 @@ nvram_write_end: return rc; } +static void +bnx2_init_remote_phy(struct bnx2 *bp) +{ + u32 val; + + bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG; + if (!(bp->phy_flags & PHY_SERDES_FLAG)) + return; + + val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB); + if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE) + return; + + if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) { + if (netif_running(bp->dev)) { + val = BNX2_DRV_ACK_CAP_SIGNATURE | + BNX2_FW_CAP_REMOTE_PHY_CAPABLE; + REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB, + val); + } + bp->phy_flags |= REMOTE_PHY_CAP_FLAG; + + val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS); + if (val & BNX2_LINK_STATUS_SERDES_LINK) + bp->phy_port = PORT_FIBRE; + else + bp->phy_port = PORT_TP; + } +} + static int bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) { @@ -3642,6 +3968,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) if (rc) return rc; + spin_lock_bh(&bp->phy_lock); + bnx2_init_remote_phy(bp); + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + bnx2_set_default_remote_link(bp); + spin_unlock_bh(&bp->phy_lock); + if (CHIP_ID(bp) == CHIP_ID_5706_A0) { /* Adjust the voltage regular to two steps lower. The default * of this register is 0x0000000e. */ @@ -3791,7 +4123,7 @@ bnx2_init_chip(struct bnx2 *bp) if (CHIP_NUM(bp) == CHIP_NUM_5708) REG_WR(bp, BNX2_HC_STATS_TICKS, 0); else - REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00); + REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ if (CHIP_ID(bp) == CHIP_ID_5706_A1) @@ -3811,10 +4143,6 @@ bnx2_init_chip(struct bnx2 *bp) REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); - if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & - BNX2_PORT_FEATURE_ASF_ENABLED) - bp->flags |= ASF_ENABLE_FLAG; - /* Initialize the receive filter. */ bnx2_set_rx_mode(bp->dev); @@ -3826,7 +4154,7 @@ bnx2_init_chip(struct bnx2 *bp) rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, 0); - REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff); + REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); udelay(20); @@ -4069,8 +4397,8 @@ bnx2_init_nic(struct bnx2 *bp) spin_lock_bh(&bp->phy_lock); bnx2_init_phy(bp); - spin_unlock_bh(&bp->phy_lock); bnx2_set_link(bp); + spin_unlock_bh(&bp->phy_lock); return 0; } @@ -4600,6 +4928,9 @@ bnx2_5706_serdes_timer(struct bnx2 *bp) static void bnx2_5708_serdes_timer(struct bnx2 *bp) { + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + return; + if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) { bp->serdes_an_pending = 0; return; @@ -4631,7 +4962,6 @@ static void bnx2_timer(unsigned long data) { struct bnx2 *bp = (struct bnx2 *) data; - u32 msg; if (!netif_running(bp->dev)) return; @@ -4639,8 +4969,7 @@ bnx2_timer(unsigned long data) if (atomic_read(&bp->intr_sem) != 0) goto bnx2_restart_timer; - msg = (u32) ++bp->fw_drv_pulse_wr_seq; - REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg); + bnx2_send_heart_beat(bp); bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT); @@ -5083,17 +5412,25 @@ static int bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct bnx2 *bp = netdev_priv(dev); + int support_serdes = 0, support_copper = 0; cmd->supported = SUPPORTED_Autoneg; - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) { + support_serdes = 1; + support_copper = 1; + } else if (bp->phy_port == PORT_FIBRE) + support_serdes = 1; + else + support_copper = 1; + + if (support_serdes) { cmd->supported |= SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE; if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) cmd->supported |= SUPPORTED_2500baseX_Full; - cmd->port = PORT_FIBRE; } - else { + if (support_copper) { cmd->supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | @@ -5101,9 +5438,10 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) SUPPORTED_1000baseT_Full | SUPPORTED_TP; - cmd->port = PORT_TP; } + spin_lock_bh(&bp->phy_lock); + cmd->port = bp->phy_port; cmd->advertising = bp->advertising; if (bp->autoneg & AUTONEG_SPEED) { @@ -5121,6 +5459,7 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->speed = -1; cmd->duplex = -1; } + spin_unlock_bh(&bp->phy_lock); cmd->transceiver = XCVR_INTERNAL; cmd->phy_address = bp->phy_addr; @@ -5136,6 +5475,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) u8 req_duplex = bp->req_duplex; u16 req_line_speed = bp->req_line_speed; u32 advertising = bp->advertising; + int err = -EINVAL; + + spin_lock_bh(&bp->phy_lock); + + if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE) + goto err_out_unlock; + + if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG)) + goto err_out_unlock; if (cmd->autoneg == AUTONEG_ENABLE) { autoneg |= AUTONEG_SPEED; @@ -5148,44 +5496,41 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) (cmd->advertising == ADVERTISED_100baseT_Half) || (cmd->advertising == ADVERTISED_100baseT_Full)) { - if (bp->phy_flags & PHY_SERDES_FLAG) - return -EINVAL; + if (cmd->port == PORT_FIBRE) + goto err_out_unlock; advertising = cmd->advertising; } else if (cmd->advertising == ADVERTISED_2500baseX_Full) { - if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) - return -EINVAL; - } else if (cmd->advertising == ADVERTISED_1000baseT_Full) { + if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) || + (cmd->port == PORT_TP)) + goto err_out_unlock; + } else if (cmd->advertising == ADVERTISED_1000baseT_Full) advertising = cmd->advertising; - } - else if (cmd->advertising == ADVERTISED_1000baseT_Half) { - return -EINVAL; - } + else if (cmd->advertising == ADVERTISED_1000baseT_Half) + goto err_out_unlock; else { - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (cmd->port == PORT_FIBRE) advertising = ETHTOOL_ALL_FIBRE_SPEED; - } - else { + else advertising = ETHTOOL_ALL_COPPER_SPEED; - } } advertising |= ADVERTISED_Autoneg; } else { - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (cmd->port == PORT_FIBRE) { if ((cmd->speed != SPEED_1000 && cmd->speed != SPEED_2500) || (cmd->duplex != DUPLEX_FULL)) - return -EINVAL; + goto err_out_unlock; if (cmd->speed == SPEED_2500 && !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) - return -EINVAL; - } - else if (cmd->speed == SPEED_1000) { - return -EINVAL; + goto err_out_unlock; } + else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500) + goto err_out_unlock; + autoneg &= ~AUTONEG_SPEED; req_line_speed = cmd->speed; req_duplex = cmd->duplex; @@ -5197,13 +5542,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) bp->req_line_speed = req_line_speed; bp->req_duplex = req_duplex; - spin_lock_bh(&bp->phy_lock); - - bnx2_setup_phy(bp); + err = bnx2_setup_phy(bp, cmd->port); +err_out_unlock: spin_unlock_bh(&bp->phy_lock); - return 0; + return err; } static void @@ -5214,11 +5558,7 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strcpy(info->driver, DRV_MODULE_NAME); strcpy(info->version, DRV_MODULE_VERSION); strcpy(info->bus_info, pci_name(bp->pdev)); - info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0'; - info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0'; - info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0'; - info->fw_version[1] = info->fw_version[3] = '.'; - info->fw_version[5] = 0; + strcpy(info->fw_version, bp->fw_version); } #define BNX2_REGDUMP_LEN (32 * 1024) @@ -5330,6 +5670,14 @@ bnx2_nway_reset(struct net_device *dev) spin_lock_bh(&bp->phy_lock); + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) { + int rc; + + rc = bnx2_setup_remote_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); + return rc; + } + /* Force a link down visible on the other side */ if (bp->phy_flags & PHY_SERDES_FLAG) { bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); @@ -5450,8 +5798,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) bp->stats_ticks = USEC_PER_SEC; } - if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00; - bp->stats_ticks &= 0xffff00; + if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS) + bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS; + bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; if (netif_running(bp->dev)) { bnx2_netif_stop(bp); @@ -5543,7 +5892,7 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) spin_lock_bh(&bp->phy_lock); - bnx2_setup_phy(bp); + bnx2_setup_phy(bp, bp->phy_port); spin_unlock_bh(&bp->phy_lock); @@ -5882,7 +6231,7 @@ bnx2_set_tx_csum(struct net_device *dev, u32 data) struct bnx2 *bp = netdev_priv(dev); if (CHIP_NUM(bp) == CHIP_NUM_5709) - return (ethtool_op_set_tx_hw_csum(dev, data)); + return (ethtool_op_set_tx_ipv6_csum(dev, data)); else return (ethtool_op_set_tx_csum(dev, data)); } @@ -5939,6 +6288,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: { u32 mii_regval; + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + return -EOPNOTSUPP; + if (!netif_running(dev)) return -EAGAIN; @@ -5955,6 +6307,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!capable(CAP_NET_ADMIN)) return -EPERM; + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + return -EOPNOTSUPP; + if (!netif_running(dev)) return -EAGAIN; @@ -6116,7 +6471,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) { struct bnx2 *bp; unsigned long mem_len; - int rc; + int rc, i, j; u32 reg; u64 dma_mask, persist_dma_mask; @@ -6273,7 +6628,47 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) goto err_out_unmap; } - bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV); + reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV); + for (i = 0, j = 0; i < 3; i++) { + u8 num, k, skip0; + + num = (u8) (reg >> (24 - (i * 8))); + for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { + if (num >= k || !skip0 || k == 1) { + bp->fw_version[j++] = (num / k) + '0'; + skip0 = 0; + } + } + if (i != 2) + bp->fw_version[j++] = '.'; + } + if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & + BNX2_PORT_FEATURE_ASF_ENABLED) { + bp->flags |= ASF_ENABLE_FLAG; + + for (i = 0; i < 30; i++) { + reg = REG_RD_IND(bp, bp->shmem_base + + BNX2_BC_STATE_CONDITION); + if (reg & BNX2_CONDITION_MFW_RUN_MASK) + break; + msleep(10); + } + } + reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION); + reg &= BNX2_CONDITION_MFW_RUN_MASK; + if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && + reg != BNX2_CONDITION_MFW_RUN_NONE) { + int i; + u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR); + + bp->fw_version[j++] = ' '; + for (i = 0; i < 3; i++) { + reg = REG_RD_IND(bp, addr + i * 4); + reg = swab32(reg); + memcpy(&bp->fw_version[j], ®, 4); + j += 4; + } + } reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER); bp->mac_addr[0] = (u8) (reg >> 8); @@ -6302,7 +6697,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->rx_ticks_int = 18; bp->rx_ticks = 18; - bp->stats_ticks = 1000000 & 0xffff00; + bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; bp->timer_interval = HZ; bp->current_interval = HZ; @@ -6315,7 +6710,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) bp->phy_flags |= PHY_SERDES_FLAG; + bp->phy_port = PORT_TP; if (bp->phy_flags & PHY_SERDES_FLAG) { + bp->phy_port = PORT_FIBRE; bp->flags |= NO_WOL_FLAG; if (CHIP_NUM(bp) != CHIP_NUM_5706) { bp->phy_addr = 2; @@ -6324,6 +6721,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; } + bnx2_init_remote_phy(bp); + } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || CHIP_NUM(bp) == CHIP_NUM_5708) bp->phy_flags |= PHY_CRC_FIX_FLAG; @@ -6363,10 +6762,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, amd_8132))) { - u8 rev; - pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev); - if (rev >= 0x10 && rev <= 0x13) { + if (amd_8132->revision >= 0x10 && + amd_8132->revision <= 0x13) { disable_msi = 1; pci_dev_put(amd_8132); break; @@ -6374,23 +6772,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) } } - bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; - bp->req_line_speed = 0; - if (bp->phy_flags & PHY_SERDES_FLAG) { - bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; - - reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG); - reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK; - if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) { - bp->autoneg = 0; - bp->req_line_speed = bp->line_speed = SPEED_1000; - bp->req_duplex = DUPLEX_FULL; - } - } - else { - bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; - } - + bnx2_set_default_link(bp); bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; init_timer(&bp->timer); @@ -6490,10 +6872,10 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) memcpy(dev->perm_addr, bp->mac_addr, 6); bp->name = board_info[ent->driver_data].name; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; if (CHIP_NUM(bp) == CHIP_NUM_5709) - dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; - else - dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + dev->features |= NETIF_F_IPV6_CSUM; + #ifdef BCM_VLAN dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; #endif diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 49a5de253b17..102adfe1e923 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -6338,6 +6338,8 @@ struct l2_fhdr { #define RX_COPY_THRESH 92 +#define BNX2_MISC_ENABLE_DEFAULT 0x7ffffff + #define DMA_READ_CHANS 5 #define DMA_WRITE_CHANS 3 @@ -6431,6 +6433,11 @@ struct sw_bd { #define ST_MICRO_FLASH_PAGE_SIZE 256 #define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536 +#define BCM5709_FLASH_PAGE_BITS 8 +#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS) +#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1) +#define BCM5709_FLASH_PAGE_SIZE 256 + #define NVRAM_TIMEOUT_COUNT 30000 @@ -6447,7 +6454,10 @@ struct flash_spec { u32 config2; u32 config3; u32 write1; - u32 buffered; + u32 flags; +#define BNX2_NV_BUFFERED 0x00000001 +#define BNX2_NV_TRANSLATE 0x00000002 +#define BNX2_NV_WREN 0x00000004 u32 page_bits; u32 page_size; u32 addr_mask; @@ -6537,6 +6547,7 @@ struct bnx2 { #define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100 #define PHY_INT_MODE_LINK_READY_FLAG 0x200 #define PHY_DIS_EARLY_DAC_FLAG 0x400 +#define REMOTE_PHY_CAP_FLAG 0x800 u32 mii_bmcr; u32 mii_bmsr; @@ -6625,6 +6636,7 @@ struct bnx2 { u16 req_line_speed; u8 req_duplex; + u8 phy_port; u8 link_up; u16 line_speed; @@ -6656,7 +6668,7 @@ struct bnx2 { u32 shmem_base; - u32 fw_ver; + char fw_version[32]; int pm_cap; int pcix_cap; @@ -6770,7 +6782,7 @@ struct fw_info { * the firmware has timed out, the driver will assume there is no firmware * running and there won't be any firmware-driver synchronization during a * driver reset. */ -#define FW_ACK_TIME_OUT_MS 100 +#define FW_ACK_TIME_OUT_MS 1000 #define BNX2_DRV_RESET_SIGNATURE 0x00000000 @@ -6788,6 +6800,7 @@ struct fw_info { #define BNX2_DRV_MSG_CODE_DIAG 0x07000000 #define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000 #define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000 +#define BNX2_DRV_MSG_CODE_CMD_SET_LINK 0x10000000 #define BNX2_DRV_MSG_DATA 0x00ff0000 #define BNX2_DRV_MSG_DATA_WAIT0 0x00010000 @@ -6836,6 +6849,7 @@ struct fw_info { #define BNX2_LINK_STATUS_SERDES_LINK (1<<20) #define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21) #define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22) +#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED (1<<31) #define BNX2_DRV_PULSE_MB 0x00000010 #define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff @@ -6845,6 +6859,30 @@ struct fw_info { * This is used for debugging. */ #define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000 +#define BNX2_DRV_MB_ARG0 0x00000014 +#define BNX2_NETLINK_SET_LINK_SPEED_10HALF (1<<0) +#define BNX2_NETLINK_SET_LINK_SPEED_10FULL (1<<1) +#define BNX2_NETLINK_SET_LINK_SPEED_10 \ + (BNX2_NETLINK_SET_LINK_SPEED_10HALF | \ + BNX2_NETLINK_SET_LINK_SPEED_10FULL) +#define BNX2_NETLINK_SET_LINK_SPEED_100HALF (1<<2) +#define BNX2_NETLINK_SET_LINK_SPEED_100FULL (1<<3) +#define BNX2_NETLINK_SET_LINK_SPEED_100 \ + (BNX2_NETLINK_SET_LINK_SPEED_100HALF | \ + BNX2_NETLINK_SET_LINK_SPEED_100FULL) +#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF (1<<4) +#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL (1<<5) +#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF (1<<6) +#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL (1<<7) +#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF (1<<8) +#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL (1<<9) +#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG (1<<10) +#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE (1<<11) +#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE (1<<12) +#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE (1<<13) +#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED (1<<14) +#define BNX2_NETLINK_SET_LINK_PHY_RESET (1<<15) + #define BNX2_DEV_INFO_SIGNATURE 0x00000020 #define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900 #define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00 @@ -7006,6 +7044,8 @@ struct fw_info { #define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff #define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000 +#define BNX2_MFW_VER_PTR 0x00000014c + #define BNX2_BC_STATE_RESET_TYPE 0x000001c0 #define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254 #define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff @@ -7059,12 +7099,42 @@ struct fw_info { #define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600) #define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700) +#define BNX2_BC_STATE_CONDITION 0x000001c8 +#define BNX2_CONDITION_MFW_RUN_UNKNOWN 0x00000000 +#define BNX2_CONDITION_MFW_RUN_IPMI 0x00002000 +#define BNX2_CONDITION_MFW_RUN_UMP 0x00004000 +#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 +#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 +#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 + #define BNX2_BC_STATE_DEBUG_CMD 0x1dc #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000 #define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff #define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff +#define BNX2_FW_EVT_CODE_MB 0x354 +#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000 +#define BNX2_FW_EVT_CODE_LINK_EVENT 0x00000001 + +#define BNX2_DRV_ACK_CAP_MB 0x364 +#define BNX2_DRV_ACK_CAP_SIGNATURE 0x35450000 +#define BNX2_CAPABILITY_SIGNATURE_MASK 0xFFFF0000 + +#define BNX2_FW_CAP_MB 0x368 +#define BNX2_FW_CAP_SIGNATURE 0xaa550000 +#define BNX2_FW_ACK_DRV_SIGNATURE 0x52500000 +#define BNX2_FW_CAP_SIGNATURE_MASK 0xffff0000 +#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE 0x00000001 +#define BNX2_FW_CAP_REMOTE_PHY_PRESENT 0x00000002 + +#define BNX2_RPHY_SIGNATURE 0x36c +#define BNX2_RPHY_LOAD_SIGNATURE 0x5a5a5a5a + +#define BNX2_RPHY_FLAGS 0x370 +#define BNX2_RPHY_SERDES_LINK 0x374 +#define BNX2_RPHY_COPPER_LINK 0x378 + #define HOST_VIEW_SHMEM_BASE 0x167c00 #endif diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c index 7845eaf6f29f..202d4a4ef751 100644 --- a/drivers/net/bsd_comp.c +++ b/drivers/net/bsd_comp.c @@ -395,14 +395,13 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp) * Allocate the main control structure for this instance. */ maxmaxcode = MAXCODE(bits); - db = kmalloc(sizeof (struct bsd_db), + db = kzalloc(sizeof (struct bsd_db), GFP_KERNEL); if (!db) { return NULL; } - memset (db, 0, sizeof(struct bsd_db)); /* * Allocate space for the dictionary. This may be more than one page in * length. diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 59b9943b077d..f6e4030c73d1 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -3422,21 +3422,19 @@ done: static void cas_check_pci_invariants(struct cas *cp) { struct pci_dev *pdev = cp->pdev; - u8 rev; cp->cas_flags = 0; - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); if ((pdev->vendor == PCI_VENDOR_ID_SUN) && (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { - if (rev >= CAS_ID_REVPLUS) + if (pdev->revision >= CAS_ID_REVPLUS) cp->cas_flags |= CAS_FLAG_REG_PLUS; - if (rev < CAS_ID_REVPLUS02u) + if (pdev->revision < CAS_ID_REVPLUS02u) cp->cas_flags |= CAS_FLAG_TARGET_ABORT; /* Original Cassini supports HW CSUM, but it's not * enabled by default as it can trigger TX hangs. */ - if (rev < CAS_ID_REV2) + if (pdev->revision < CAS_ID_REV2) cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; } else { /* Only sun has original cassini chips. */ @@ -4919,13 +4917,13 @@ static int __devinit cas_init_one(struct pci_dev *pdev, pci_cmd &= ~PCI_COMMAND_SERR; pci_cmd |= PCI_COMMAND_PARITY; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); - if (pci_set_mwi(pdev)) + if (pci_try_set_mwi(pdev)) printk(KERN_WARNING PFX "Could not enable MWI for %s\n", pci_name(pdev)); /* * On some architectures, the default cache line size set - * by pci_set_mwi reduces perforamnce. We have to increase + * by pci_try_set_mwi reduces perforamnce. We have to increase * it for this case. To start, we'll print some configuration * data. */ diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h index 8eddd23a3a51..eb508bf8022a 100644 --- a/drivers/net/cxgb3/version.h +++ b/drivers/net/cxgb3/version.h @@ -39,6 +39,6 @@ /* Firmware version */ #define FW_VERSION_MAJOR 4 -#define FW_VERSION_MINOR 1 +#define FW_VERSION_MINOR 3 #define FW_VERSION_MICRO 0 #endif /* __CHELSIO_VERSION_H */ diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 74ec64a1625d..04e3710c9082 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -250,7 +250,6 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) np->an_enable = 1; mii_set_media (dev); } - pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id); err = register_netdev (dev); if (err) @@ -866,9 +865,9 @@ receive_packet (struct net_device *dev) PCI_DMA_FROMDEVICE); /* 16 byte align the IP header */ skb_reserve (skb, 2); - eth_copy_and_sum (skb, + skb_copy_to_linear_data (skb, np->rx_skbuff[entry]->data, - pkt_len, 0); + pkt_len); skb_put (skb, pkt_len); pci_dma_sync_single_for_device(np->pdev, desc->fraginfo & @@ -879,7 +878,7 @@ receive_packet (struct net_device *dev) skb->protocol = eth_type_trans (skb, dev); #if 0 /* Checksum done by hw, but csum value unavailable. */ - if (np->pci_rev_id >= 0x0c && + if (np->pdev->pci_rev_id >= 0x0c && !(frame_status & (TCPError | UDPError | IPError))) { skb->ip_summed = CHECKSUM_UNNECESSARY; } diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h index 814c449c359f..e443065a452e 100644 --- a/drivers/net/dl2k.h +++ b/drivers/net/dl2k.h @@ -668,7 +668,6 @@ struct netdev_private { unsigned int rx_flow:1; /* Rx flow control enable */ unsigned int phy_media:1; /* 1: fiber, 0: copper */ unsigned int link_status:1; /* Current link status */ - unsigned char pci_rev_id; /* PCI revision ID */ struct netdev_desc *last_tx; /* Last Tx descriptor used. */ unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */ unsigned long cur_tx, old_tx; diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 264fa0e2e075..c3de81bf090a 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -104,6 +104,18 @@ #define PRINTK(args...) printk(KERN_DEBUG args) #endif +#ifdef CONFIG_BLACKFIN +#define readsb insb +#define readsw insw +#define readsl insl +#define writesb outsb +#define writesw outsw +#define writesl outsl +#define DM9000_IRQ_FLAGS (IRQF_SHARED | IRQF_TRIGGER_HIGH) +#else +#define DM9000_IRQ_FLAGS IRQF_SHARED +#endif + /* * Transmit timeout, default 5 seconds. */ @@ -431,6 +443,9 @@ dm9000_probe(struct platform_device *pdev) db->io_addr = (void __iomem *)base; db->io_data = (void __iomem *)(base + 4); + /* ensure at least we have a default set of IO routines */ + dm9000_set_io(db, 2); + } else { db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -614,7 +629,7 @@ dm9000_open(struct net_device *dev) PRINTK2("entering dm9000_open\n"); - if (request_irq(dev->irq, &dm9000_interrupt, IRQF_SHARED, dev->name, dev)) + if (request_irq(dev->irq, &dm9000_interrupt, DM9000_IRQ_FLAGS, dev->name, dev)) return -EAGAIN; /* Initialize DM9000 board */ diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 60673bc292c0..756a6bcb038d 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -34,11 +34,12 @@ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/moduleparam.h> +#include <linux/rtnetlink.h> +#include <net/rtnetlink.h> static int numdummies = 1; static int dummy_xmit(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *dummy_get_stats(struct net_device *dev); static int dummy_set_address(struct net_device *dev, void *p) { @@ -56,13 +57,13 @@ static void set_multicast_list(struct net_device *dev) { } -static void __init dummy_setup(struct net_device *dev) +static void dummy_setup(struct net_device *dev) { /* Initialize the device structure. */ - dev->get_stats = dummy_get_stats; dev->hard_start_xmit = dummy_xmit; dev->set_multicast_list = set_multicast_list; dev->set_mac_address = dummy_set_address; + dev->destructor = free_netdev; /* Fill in device structure with ethernet-generic values. */ ether_setup(dev); @@ -76,77 +77,80 @@ static void __init dummy_setup(struct net_device *dev) static int dummy_xmit(struct sk_buff *skb, struct net_device *dev) { - struct net_device_stats *stats = netdev_priv(dev); - - stats->tx_packets++; - stats->tx_bytes+=skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; dev_kfree_skb(skb); return 0; } -static struct net_device_stats *dummy_get_stats(struct net_device *dev) +static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) { - return netdev_priv(dev); + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + return 0; } -static struct net_device **dummies; +static struct rtnl_link_ops dummy_link_ops __read_mostly = { + .kind = "dummy", + .setup = dummy_setup, + .validate = dummy_validate, +}; /* Number of dummy devices to be set up by this module. */ module_param(numdummies, int, 0); MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices"); -static int __init dummy_init_one(int index) +static int __init dummy_init_one(void) { struct net_device *dev_dummy; int err; - dev_dummy = alloc_netdev(sizeof(struct net_device_stats), - "dummy%d", dummy_setup); - + dev_dummy = alloc_netdev(0, "dummy%d", dummy_setup); if (!dev_dummy) return -ENOMEM; - if ((err = register_netdev(dev_dummy))) { - free_netdev(dev_dummy); - dev_dummy = NULL; - } else { - dummies[index] = dev_dummy; - } + err = dev_alloc_name(dev_dummy, dev_dummy->name); + if (err < 0) + goto err; - return err; -} + dev_dummy->rtnl_link_ops = &dummy_link_ops; + err = register_netdevice(dev_dummy); + if (err < 0) + goto err; + return 0; -static void dummy_free_one(int index) -{ - unregister_netdev(dummies[index]); - free_netdev(dummies[index]); +err: + free_netdev(dev_dummy); + return err; } static int __init dummy_init_module(void) { int i, err = 0; - dummies = kmalloc(numdummies * sizeof(void *), GFP_KERNEL); - if (!dummies) - return -ENOMEM; + + rtnl_lock(); + err = __rtnl_link_register(&dummy_link_ops); + for (i = 0; i < numdummies && !err; i++) - err = dummy_init_one(i); - if (err) { - i--; - while (--i >= 0) - dummy_free_one(i); - } + err = dummy_init_one(); + if (err < 0) + __rtnl_link_unregister(&dummy_link_ops); + rtnl_unlock(); + return err; } static void __exit dummy_cleanup_module(void) { - int i; - for (i = 0; i < numdummies; i++) - dummy_free_one(i); - kfree(dummies); + rtnl_link_unregister(&dummy_link_ops); } module_init(dummy_init_module); module_exit(dummy_cleanup_module); MODULE_LICENSE("GPL"); +MODULE_ALIAS_RTNL_LINK("dummy"); diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 74ea6373c7cd..6b6401e9304e 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -583,7 +583,6 @@ struct nic { u32 rx_tco_frames; u32 rx_over_length_errors; - u8 rev_id; u16 leds; u16 eeprom_wc; u16 eeprom[256]; @@ -937,9 +936,8 @@ static void e100_get_defaults(struct nic *nic) struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; - pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ - nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id; + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; if(nic->mac == mac_unknown) nic->mac = mac_82557_D100_A; @@ -1279,7 +1277,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb if (nic->flags & ich) goto noloaducode; - /* Search for ucode match against h/w rev_id */ + /* Search for ucode match against h/w revision */ for (opts = ucode_opts; opts->mac; opts++) { int i; u32 *ucode = opts->ucode; @@ -2238,7 +2236,7 @@ static void e100_get_regs(struct net_device *netdev, u32 *buff = p; int i; - regs->version = (1 << 24) | nic->rev_id; + regs->version = (1 << 24) | nic->pdev->revision; buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | ioread8(&nic->csr->scb.cmd_lo) << 16 | ioread16(&nic->csr->scb.status); diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index cf8af928a69c..f48b659e0c2b 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -1266,8 +1266,7 @@ e1000_sw_init(struct e1000_adapter *adapter) hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; - - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->revision_id = pdev->revision; pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index 9800341956a2..3c54014acece 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c @@ -1801,7 +1801,7 @@ speedo_rx(struct net_device *dev) #if 1 || USE_IP_CSUM /* Packet is in one chunk -- we can copy + cksum. */ - eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); + skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); #else skb_copy_from_linear_data(sp->rx_skbuff[entry], @@ -2292,10 +2292,15 @@ static int eepro100_resume(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata (pdev); struct speedo_private *sp = netdev_priv(dev); void __iomem *ioaddr = sp->regs; + int rc; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - pci_enable_device(pdev); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + pci_set_master(pdev); if (!netif_running(dev)) diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index f03f070451de..489c8b260dd8 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -39,13 +39,13 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0067" +#define DRV_VERSION "EHEA_0071" -/* EHEA capability flags */ +/* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 -#define DLPAR_MEM_ADD 2 -#define DLPAR_MEM_REM 4 -#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) +#define DLPAR_MEM_ADD 2 +#define DLPAR_MEM_REM 4 +#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) @@ -113,6 +113,8 @@ /* Memory Regions */ #define EHEA_MR_ACC_CTRL 0x00800000 +#define EHEA_BUSMAP_START 0x8000000000000000ULL + #define EHEA_WATCH_DOG_TIMEOUT 10*HZ /* utility functions */ @@ -186,6 +188,12 @@ struct h_epas { set to 0 if unused */ }; +struct ehea_busmap { + unsigned int entries; /* total number of entries */ + unsigned int valid_sections; /* number of valid sections */ + u64 *vaddr; +}; + struct ehea_qp; struct ehea_cq; struct ehea_eq; @@ -382,6 +390,8 @@ struct ehea_adapter { struct ehea_mr mr; u32 pd; /* protection domain */ u64 max_mc_mac; /* max number of multicast mac addresses */ + int active_ports; + struct list_head list; }; @@ -431,6 +441,9 @@ struct port_res_cfg { int max_entries_rq3; }; +enum ehea_flag_bits { + __EHEA_STOP_XFER +}; void ehea_set_ethtool_ops(struct net_device *netdev); int ehea_sense_port_attr(struct ehea_port *port); diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 383144db4d18..4c70a9301c1b 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -79,6 +79,11 @@ MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 "); static int port_name_cnt = 0; +static LIST_HEAD(adapter_list); +u64 ehea_driver_flags = 0; +struct workqueue_struct *ehea_driver_wq; +struct work_struct ehea_rereg_mr_task; + static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, const struct of_device_id *id); @@ -238,13 +243,17 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); rwqe->sg_list[0].l_key = pr->recv_mr.lkey; - rwqe->sg_list[0].vaddr = (u64)skb->data; + rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); rwqe->sg_list[0].len = packet_size; rwqe->data_segments = 1; index++; index &= max_index_mask; + + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) + goto out; } + q_skba->index = index; /* Ring doorbell */ @@ -253,7 +262,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, ehea_update_rq2a(pr->qp, i); else ehea_update_rq3a(pr->qp, i); - +out: return ret; } @@ -457,6 +466,8 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, cqe->vlan_tag); else netif_receive_skb(skb); + + dev->last_rx = jiffies; } else { pr->p_stats.poll_receive_errors++; port_reset = ehea_treat_poll_error(pr, rq, cqe, @@ -1321,7 +1332,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, sg1entry->len = skb_data_size - headersize; tmp_addr = (u64)(skb->data + headersize); - sg1entry->vaddr = tmp_addr; + sg1entry->vaddr = ehea_map_vaddr(tmp_addr); swqe->descriptors++; } } else @@ -1352,7 +1363,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, sg1entry->l_key = lkey; sg1entry->len = skb_data_size - SWQE2_MAX_IMM; tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM); - sg1entry->vaddr = tmp_addr; + sg1entry->vaddr = ehea_map_vaddr(tmp_addr); swqe->descriptors++; } } else { @@ -1391,7 +1402,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, sg1entry->len = frag->size; tmp_addr = (u64)(page_address(frag->page) + frag->page_offset); - sg1entry->vaddr = tmp_addr; + sg1entry->vaddr = ehea_map_vaddr(tmp_addr); swqe->descriptors++; sg1entry_contains_frag_data = 1; } @@ -1406,7 +1417,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, tmp_addr = (u64)(page_address(frag->page) + frag->page_offset); - sgentry->vaddr = tmp_addr; + sgentry->vaddr = ehea_map_vaddr(tmp_addr); swqe->descriptors++; } } @@ -1424,7 +1435,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) port->logical_port_id, reg_type, port->mac_addr, 0, hcallid); if (hret != H_SUCCESS) { - ehea_error("reg_dereg_bcmc failed (tagged)"); + ehea_error("%sregistering bc address failed (tagged)", + hcallid == H_REG_BCMC ? "" : "de"); ret = -EIO; goto out_herr; } @@ -1435,7 +1447,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) port->logical_port_id, reg_type, port->mac_addr, 0, hcallid); if (hret != H_SUCCESS) { - ehea_error("reg_dereg_bcmc failed (vlan)"); + ehea_error("%sregistering bc address failed (vlan)", + hcallid == H_REG_BCMC ? "" : "de"); ret = -EIO; } out_herr: @@ -1878,6 +1891,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ehea_dump(swqe, 512, "swqe"); } + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) + goto out; + ehea_post_swqe(pr->qp, swqe); pr->tx_packets++; @@ -1892,7 +1908,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) } dev->trans_start = jiffies; spin_unlock(&pr->xmit_lock); - +out: return NETDEV_TX_OK; } @@ -2158,7 +2174,6 @@ static int ehea_up(struct net_device *dev) { int ret, i; struct ehea_port *port = netdev_priv(dev); - u64 mac_addr = 0; if (port->state == EHEA_PORT_UP) return 0; @@ -2177,18 +2192,10 @@ static int ehea_up(struct net_device *dev) goto out_clean_pr; } - ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); - if (ret) { - ret = -EIO; - ehea_error("out_clean_pr"); - goto out_clean_pr; - } - mac_addr = (*(u64*)dev->dev_addr) >> 16; - ret = ehea_reg_interrupts(dev); if (ret) { - ehea_error("out_dereg_bc"); - goto out_dereg_bc; + ehea_error("reg_interrupts failed. ret:%d", ret); + goto out_clean_pr; } for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { @@ -2214,12 +2221,12 @@ static int ehea_up(struct net_device *dev) out_free_irqs: ehea_free_interrupts(dev); -out_dereg_bc: - ehea_broadcast_reg_helper(port, H_DEREG_BCMC); - out_clean_pr: ehea_clean_all_portres(port); out: + if (ret) + ehea_info("Failed starting %s. ret=%i", dev->name, ret); + return ret; } @@ -2258,9 +2265,13 @@ static int ehea_down(struct net_device *dev) &port->port_res[i].d_netdev->state)) msleep(1); - ehea_broadcast_reg_helper(port, H_DEREG_BCMC); - ret = ehea_clean_all_portres(port); port->state = EHEA_PORT_DOWN; + + ret = ehea_clean_all_portres(port); + if (ret) + ehea_info("Failed freeing resources for %s. ret=%i", + dev->name, ret); + return ret; } @@ -2292,15 +2303,11 @@ static void ehea_reset_port(struct work_struct *work) netif_stop_queue(dev); netif_poll_disable(dev); - ret = ehea_down(dev); - if (ret) - ehea_error("ehea_down failed. not all resources are freed"); + ehea_down(dev); ret = ehea_up(dev); - if (ret) { - ehea_error("Reset device %s failed: ret=%d", dev->name, ret); + if (ret) goto out; - } if (netif_msg_timer(port)) ehea_info("Device %s resetted successfully", dev->name); @@ -2312,6 +2319,88 @@ out: return; } +static void ehea_rereg_mrs(struct work_struct *work) +{ + int ret, i; + struct ehea_adapter *adapter; + + ehea_info("LPAR memory enlarged - re-initializing driver"); + + list_for_each_entry(adapter, &adapter_list, list) + if (adapter->active_ports) { + /* Shutdown all ports */ + for (i = 0; i < EHEA_MAX_PORTS; i++) { + struct ehea_port *port = adapter->port[i]; + + if (port) { + struct net_device *dev = port->netdev; + + if (dev->flags & IFF_UP) { + ehea_info("stopping %s", + dev->name); + down(&port->port_lock); + netif_stop_queue(dev); + netif_poll_disable(dev); + ehea_down(dev); + up(&port->port_lock); + } + } + } + + /* Unregister old memory region */ + ret = ehea_rem_mr(&adapter->mr); + if (ret) { + ehea_error("unregister MR failed - driver" + " inoperable!"); + goto out; + } + } + + ehea_destroy_busmap(); + + ret = ehea_create_busmap(); + if (ret) + goto out; + + clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + + list_for_each_entry(adapter, &adapter_list, list) + if (adapter->active_ports) { + /* Register new memory region */ + ret = ehea_reg_kernel_mr(adapter, &adapter->mr); + if (ret) { + ehea_error("register MR failed - driver" + " inoperable!"); + goto out; + } + + /* Restart all ports */ + for (i = 0; i < EHEA_MAX_PORTS; i++) { + struct ehea_port *port = adapter->port[i]; + + if (port) { + struct net_device *dev = port->netdev; + + if (dev->flags & IFF_UP) { + ehea_info("restarting %s", + dev->name); + down(&port->port_lock); + + ret = ehea_up(dev); + if (!ret) { + netif_poll_enable(dev); + netif_wake_queue(dev); + } + + up(&port->port_lock); + } + } + } + } +out: + return; +} + static void ehea_tx_watchdog(struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); @@ -2557,12 +2646,18 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, INIT_WORK(&port->reset_task, ehea_reset_port); + ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); + if (ret) { + ret = -EIO; + goto out_unreg_port; + } + ehea_set_ethtool_ops(dev); ret = register_netdev(dev); if (ret) { ehea_error("register_netdev failed. ret=%d", ret); - goto out_unreg_port; + goto out_dereg_bc; } ret = ehea_get_jumboframe_status(port, &jumbo); @@ -2573,8 +2668,13 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, ehea_info("%s: Jumbo frames are %sabled", dev->name, jumbo == 1 ? "en" : "dis"); + adapter->active_ports++; + return port; +out_dereg_bc: + ehea_broadcast_reg_helper(port, H_DEREG_BCMC); + out_unreg_port: ehea_unregister_port(port); @@ -2594,8 +2694,10 @@ static void ehea_shutdown_single_port(struct ehea_port *port) { unregister_netdev(port->netdev); ehea_unregister_port(port); + ehea_broadcast_reg_helper(port, H_DEREG_BCMC); kfree(port->mc_list); free_netdev(port->netdev); + port->adapter->active_ports--; } static int ehea_setup_ports(struct ehea_adapter *adapter) @@ -2788,6 +2890,8 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, goto out; } + list_add(&adapter->list, &adapter_list); + adapter->ebus_dev = dev; adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle", @@ -2891,7 +2995,10 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev) ehea_destroy_eq(adapter->neq); ehea_remove_adapter_mr(adapter); + list_del(&adapter->list); + kfree(adapter); + return 0; } @@ -2939,9 +3046,18 @@ int __init ehea_module_init(void) printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION); + ehea_driver_wq = create_workqueue("ehea_driver_wq"); + + INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); + ret = check_module_parm(); if (ret) goto out; + + ret = ehea_create_busmap(); + if (ret) + goto out; + ret = ibmebus_register_driver(&ehea_driver); if (ret) { ehea_error("failed registering eHEA device driver on ebus"); @@ -2965,6 +3081,7 @@ static void __exit ehea_module_exit(void) { driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); ibmebus_unregister_driver(&ehea_driver); + ehea_destroy_busmap(); } module_init(ehea_module_init); diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index d17a45a7e717..89b63531ff26 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h @@ -60,6 +60,9 @@ static inline u32 get_longbusy_msecs(int long_busy_ret_code) } } +/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */ +#define EHEA_MAX_RPAGE 512 + /* Notification Event Queue (NEQ) Entry bit masks */ #define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7) #define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47) diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 29eaa46948b0..a36fa6c23fdf 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -31,6 +31,13 @@ #include "ehea_phyp.h" #include "ehea_qmr.h" + +struct ehea_busmap ehea_bmap = { 0, 0, NULL }; +extern u64 ehea_driver_flags; +extern struct workqueue_struct *ehea_driver_wq; +extern struct work_struct ehea_rereg_mr_task; + + static void *hw_qpageit_get_inc(struct hw_queue *queue) { void *retvalue = hw_qeit_get(queue); @@ -547,18 +554,84 @@ int ehea_destroy_qp(struct ehea_qp *qp) return 0; } +int ehea_create_busmap( void ) +{ + u64 vaddr = EHEA_BUSMAP_START; + unsigned long abs_max_pfn = 0; + unsigned long sec_max_pfn; + int i; + + /* + * Sections are not in ascending order -> Loop over all sections and + * find the highest PFN to compute the required map size. + */ + ehea_bmap.valid_sections = 0; + + for (i = 0; i < NR_MEM_SECTIONS; i++) + if (valid_section_nr(i)) { + sec_max_pfn = section_nr_to_pfn(i); + if (sec_max_pfn > abs_max_pfn) + abs_max_pfn = sec_max_pfn; + ehea_bmap.valid_sections++; + } + + ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1; + ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); + + if (!ehea_bmap.vaddr) + return -ENOMEM; + + for (i = 0 ; i < ehea_bmap.entries; i++) { + unsigned long pfn = section_nr_to_pfn(i); + + if (pfn_valid(pfn)) { + ehea_bmap.vaddr[i] = vaddr; + vaddr += EHEA_SECTSIZE; + } else + ehea_bmap.vaddr[i] = 0; + } + + return 0; +} + +void ehea_destroy_busmap( void ) +{ + vfree(ehea_bmap.vaddr); +} + +u64 ehea_map_vaddr(void *caddr) +{ + u64 mapped_addr; + unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS; + + if (likely(index < ehea_bmap.entries)) { + mapped_addr = ehea_bmap.vaddr[index]; + if (likely(mapped_addr)) + mapped_addr |= (((unsigned long)caddr) + & (EHEA_SECTSIZE - 1)); + else + mapped_addr = -1; + } else + mapped_addr = -1; + + if (unlikely(mapped_addr == -1)) + if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) + queue_work(ehea_driver_wq, &ehea_rereg_mr_task); + + return mapped_addr; +} + int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) { - int i, k, ret; - u64 hret, pt_abs, start, end, nr_pages; - u32 acc_ctrl = EHEA_MR_ACC_CTRL; + int ret; u64 *pt; + void *pg; + u64 hret, pt_abs, i, j, m, mr_len; + u32 acc_ctrl = EHEA_MR_ACC_CTRL; - start = KERNELBASE; - end = (u64)high_memory; - nr_pages = (end - start) / EHEA_PAGESIZE; + mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; - pt = kzalloc(PAGE_SIZE, GFP_KERNEL); + pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL); if (!pt) { ehea_error("no mem"); ret = -ENOMEM; @@ -566,7 +639,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) } pt_abs = virt_to_abs(pt); - hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start, + hret = ehea_h_alloc_resource_mr(adapter->handle, + EHEA_BUSMAP_START, mr_len, acc_ctrl, adapter->pd, &mr->handle, &mr->lkey); if (hret != H_SUCCESS) { @@ -575,49 +649,43 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) goto out; } - mr->vaddr = KERNELBASE; - k = 0; - - while (nr_pages > 0) { - if (nr_pages > 1) { - u64 num_pages = min(nr_pages, (u64)512); - for (i = 0; i < num_pages; i++) - pt[i] = virt_to_abs((void*)(((u64)start) + - ((k++) * - EHEA_PAGESIZE))); - - hret = ehea_h_register_rpage_mr(adapter->handle, - mr->handle, 0, - 0, (u64)pt_abs, - num_pages); - nr_pages -= num_pages; - } else { - u64 abs_adr = virt_to_abs((void*)(((u64)start) + - (k * EHEA_PAGESIZE))); - - hret = ehea_h_register_rpage_mr(adapter->handle, - mr->handle, 0, - 0, abs_adr,1); - nr_pages--; - } - - if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) { - ehea_h_free_resource(adapter->handle, - mr->handle, FORCE_FREE); - ehea_error("register_rpage_mr failed"); - ret = -EIO; - goto out; + for (i = 0 ; i < ehea_bmap.entries; i++) + if (ehea_bmap.vaddr[i]) { + void *sectbase = __va(i << SECTION_SIZE_BITS); + unsigned long k = 0; + + for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE); + j++) { + + for (m = 0; m < EHEA_MAX_RPAGE; m++) { + pg = sectbase + ((k++) * EHEA_PAGESIZE); + pt[m] = virt_to_abs(pg); + } + + hret = ehea_h_register_rpage_mr(adapter->handle, + mr->handle, + 0, 0, pt_abs, + EHEA_MAX_RPAGE); + if ((hret != H_SUCCESS) + && (hret != H_PAGE_REGISTERED)) { + ehea_h_free_resource(adapter->handle, + mr->handle, + FORCE_FREE); + ehea_error("register_rpage_mr failed"); + ret = -EIO; + goto out; + } + } } - } if (hret != H_SUCCESS) { - ehea_h_free_resource(adapter->handle, mr->handle, - FORCE_FREE); - ehea_error("register_rpage failed for last page"); + ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); + ehea_error("registering mr failed"); ret = -EIO; goto out; } + mr->vaddr = EHEA_BUSMAP_START; mr->adapter = adapter; ret = 0; out: diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index c0eb3e03a102..b71f8452a5e3 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h @@ -36,8 +36,14 @@ * page size of ehea hardware queues */ -#define EHEA_PAGESHIFT 12 -#define EHEA_PAGESIZE 4096UL +#define EHEA_PAGESHIFT 12 +#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) +#define EHEA_SECTSIZE (1UL << 24) +#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT) + +#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE +#error eHEA module can't work if kernel sectionsize < ehea sectionsize +#endif /* Some abbreviations used here: * @@ -372,4 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr); void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); +int ehea_create_busmap( void ); +void ehea_destroy_busmap( void ); +u64 ehea_map_vaddr(void *caddr); + #endif /* __EHEA_QMR_H__ */ diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 5e517946f46a..119778401e48 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c @@ -1201,7 +1201,7 @@ static int epic_rx(struct net_device *dev, int budget) ep->rx_ring[entry].bufaddr, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); - eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0); + skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); pci_dma_sync_single_for_device(ep->pci_dev, ep->rx_ring[entry].bufaddr, diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index abe9b089c610..ff9f177d7157 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -1727,8 +1727,8 @@ static int netdev_rx(struct net_device *dev) /* Call copy + cksum if available. */ #if ! defined(__alpha__) - eth_copy_and_sum(skb, - np->cur_rx->skbuff->data, pkt_len, 0); + skb_copy_to_linear_data(skb, + np->cur_rx->skbuff->data, pkt_len); skb_put(skb, pkt_len); #else memcpy(skb_put(skb, pkt_len), diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 255b09124e11..03023dd17829 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -648,7 +648,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { fep->stats.rx_dropped++; } else { skb_put(skb,pkt_len-4); /* Make room */ - eth_copy_and_sum(skb, data, pkt_len-4, 0); + skb_copy_to_linear_data(skb, data, pkt_len-4); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); } diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 42ba1c012ee2..6d1d50a19783 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -550,6 +550,8 @@ union ring_type { /* PHY defines */ #define PHY_OUI_MARVELL 0x5043 #define PHY_OUI_CICADA 0x03f1 +#define PHY_OUI_VITESSE 0x01c1 +#define PHY_OUI_REALTEK 0x01c1 #define PHYID1_OUI_MASK 0x03ff #define PHYID1_OUI_SHFT 6 #define PHYID2_OUI_MASK 0xfc00 @@ -557,12 +559,36 @@ union ring_type { #define PHYID2_MODEL_MASK 0x03f0 #define PHY_MODEL_MARVELL_E3016 0x220 #define PHY_MARVELL_E3016_INITMASK 0x0300 -#define PHY_INIT1 0x0f000 -#define PHY_INIT2 0x0e00 -#define PHY_INIT3 0x01000 -#define PHY_INIT4 0x0200 -#define PHY_INIT5 0x0004 -#define PHY_INIT6 0x02000 +#define PHY_CICADA_INIT1 0x0f000 +#define PHY_CICADA_INIT2 0x0e00 +#define PHY_CICADA_INIT3 0x01000 +#define PHY_CICADA_INIT4 0x0200 +#define PHY_CICADA_INIT5 0x0004 +#define PHY_CICADA_INIT6 0x02000 +#define PHY_VITESSE_INIT_REG1 0x1f +#define PHY_VITESSE_INIT_REG2 0x10 +#define PHY_VITESSE_INIT_REG3 0x11 +#define PHY_VITESSE_INIT_REG4 0x12 +#define PHY_VITESSE_INIT_MSK1 0xc +#define PHY_VITESSE_INIT_MSK2 0x0180 +#define PHY_VITESSE_INIT1 0x52b5 +#define PHY_VITESSE_INIT2 0xaf8a +#define PHY_VITESSE_INIT3 0x8 +#define PHY_VITESSE_INIT4 0x8f8a +#define PHY_VITESSE_INIT5 0xaf86 +#define PHY_VITESSE_INIT6 0x8f86 +#define PHY_VITESSE_INIT7 0xaf82 +#define PHY_VITESSE_INIT8 0x0100 +#define PHY_VITESSE_INIT9 0x8f82 +#define PHY_VITESSE_INIT10 0x0 +#define PHY_REALTEK_INIT_REG1 0x1f +#define PHY_REALTEK_INIT_REG2 0x19 +#define PHY_REALTEK_INIT_REG3 0x13 +#define PHY_REALTEK_INIT1 0x0000 +#define PHY_REALTEK_INIT2 0x8e00 +#define PHY_REALTEK_INIT3 0x0001 +#define PHY_REALTEK_INIT4 0xad17 + #define PHY_GIGABIT 0x0100 #define PHY_TIMEOUT 0x1 @@ -1096,6 +1122,28 @@ static int phy_init(struct net_device *dev) return PHY_ERROR; } } + if (np->phy_oui == PHY_OUI_REALTEK) { + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + } /* set advertise register */ reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); @@ -1141,14 +1189,14 @@ static int phy_init(struct net_device *dev) /* phy vendor specific configuration */ if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); - phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); - phy_reserved |= (PHY_INIT3 | PHY_INIT4); + phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); + phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); - phy_reserved |= PHY_INIT5; + phy_reserved |= PHY_CICADA_INIT5; if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; @@ -1156,12 +1204,106 @@ static int phy_init(struct net_device *dev) } if (np->phy_oui == PHY_OUI_CICADA) { phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); - phy_reserved |= PHY_INIT6; + phy_reserved |= PHY_CICADA_INIT6; if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); return PHY_ERROR; } } + if (np->phy_oui == PHY_OUI_VITESSE) { + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; + phy_reserved |= PHY_VITESSE_INIT3; + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; + phy_reserved |= PHY_VITESSE_INIT3; + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); + phy_reserved &= ~PHY_VITESSE_INIT_MSK2; + phy_reserved |= PHY_VITESSE_INIT8; + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + if (np->phy_oui == PHY_OUI_REALTEK) { + /* reset could have cleared these out, set them back */ + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + /* some phys clear out pause advertisment on reset, set it back */ mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); @@ -4995,12 +5137,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_unmap; np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; } - np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL); - np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL); + np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); + np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); if (!np->rx_skb || !np->tx_skb) goto out_freering; - memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); - memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); dev->open = nv_open; dev->stop = nv_close; @@ -5084,15 +5224,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->wolenabled = 0; if (id->driver_data & DEV_HAS_POWER_CNTRL) { - u8 revision_id; - pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id); /* take phy and nic out of low power mode */ powerstate = readl(base + NvRegPowerState2); powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && - revision_id >= 0xA3) + pci_dev->revision >= 0xA3) powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; writel(powerstate, base + NvRegPowerState2); } diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index d7a1a58de766..f92690555dd9 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -420,8 +420,18 @@ static phy_interface_t gfar_get_interface(struct net_device *dev) if (ecntrl & ECNTRL_REDUCED_MODE) { if (ecntrl & ECNTRL_REDUCED_MII_MODE) return PHY_INTERFACE_MODE_RMII; - else + else { + phy_interface_t interface = priv->einfo->interface; + + /* + * This isn't autodetected right now, so it must + * be set by the device tree or platform code. + */ + if (interface == PHY_INTERFACE_MODE_RGMII_ID) + return PHY_INTERFACE_MODE_RGMII_ID; + return PHY_INTERFACE_MODE_RGMII; + } } if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c index 5dd34a1a7b89..ac3596f45dd8 100644 --- a/drivers/net/gianfar_mii.c +++ b/drivers/net/gianfar_mii.c @@ -31,7 +31,6 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <asm/ocp.h> #include <linux/crc32.h> #include <linux/mii.h> #include <linux/phy.h> diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 2521b111b3a5..15254dc7876a 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c @@ -1575,8 +1575,8 @@ static int hamachi_rx(struct net_device *dev) PCI_DMA_FROMDEVICE); /* Call copy + cksum if available. */ #if 1 || USE_IP_COPYSUM - eth_copy_and_sum(skb, - hmp->rx_skbuff[entry]->data, pkt_len, 0); + skb_copy_to_linear_data(skb, + hmp->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); #else memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 84aa2117c0ee..355c6cf3d112 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -320,7 +320,7 @@ static int eppconfig(struct baycom_state *bc) sprintf(portarg, "%ld", bc->pdev->port->base); printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg); - return call_usermodehelper(eppconfig_path, argv, envp, 1); + return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC); } /* ---------------------------------------------------------------------- */ diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 3be8c5047599..205f09672492 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -453,8 +453,8 @@ static int __init setup_adapter(int card_base, int type, int n) int scc_base = card_base + hw[type].scc_offset; char *chipnames[] = CHIPNAMES; - /* Allocate memory */ - info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); + /* Initialize what is necessary for write_scc and write_scc_data */ + info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); if (!info) { printk(KERN_ERR "dmascc: " "could not allocate memory for %s at %#3x\n", @@ -462,8 +462,6 @@ static int __init setup_adapter(int card_base, int type, int n) goto out; } - /* Initialize what is necessary for write_scc and write_scc_data */ - memset(info, 0, sizeof(struct scc_info)); info->dev[0] = alloc_netdev(0, "", dev_setup); if (!info->dev[0]) { diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 6ec3d500f334..d96eb7229548 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -1337,7 +1337,7 @@ const char * buf, size_t count) #define ATTR(_name, _mode) \ struct attribute veth_##_name##_attr = { \ - .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \ + .name = __stringify(_name), .mode = _mode, \ }; static ATTR(active, 0644); diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 07b4c0d7a75c..f5c3598e59af 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -136,13 +136,14 @@ resched: } -static void __init ifb_setup(struct net_device *dev) +static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ dev->get_stats = ifb_get_stats; dev->hard_start_xmit = ifb_xmit; dev->open = &ifb_open; dev->stop = &ifb_close; + dev->destructor = free_netdev; /* Fill in device structure with ethernet-generic values. */ ether_setup(dev); @@ -197,12 +198,6 @@ static struct net_device_stats *ifb_get_stats(struct net_device *dev) return stats; } -static struct net_device **ifbs; - -/* Number of ifb devices to be set up by this module. */ -module_param(numifbs, int, 0); -MODULE_PARM_DESC(numifbs, "Number of ifb devices"); - static int ifb_close(struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); @@ -226,6 +221,28 @@ static int ifb_open(struct net_device *dev) return 0; } +static int ifb_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + return 0; +} + +static struct rtnl_link_ops ifb_link_ops __read_mostly = { + .kind = "ifb", + .priv_size = sizeof(struct ifb_private), + .setup = ifb_setup, + .validate = ifb_validate, +}; + +/* Number of ifb devices to be set up by this module. */ +module_param(numifbs, int, 0); +MODULE_PARM_DESC(numifbs, "Number of ifb devices"); + static int __init ifb_init_one(int index) { struct net_device *dev_ifb; @@ -237,49 +254,44 @@ static int __init ifb_init_one(int index) if (!dev_ifb) return -ENOMEM; - if ((err = register_netdev(dev_ifb))) { - free_netdev(dev_ifb); - dev_ifb = NULL; - } else { - ifbs[index] = dev_ifb; - } + err = dev_alloc_name(dev_ifb, dev_ifb->name); + if (err < 0) + goto err; - return err; -} + dev_ifb->rtnl_link_ops = &ifb_link_ops; + err = register_netdevice(dev_ifb); + if (err < 0) + goto err; + return 0; -static void ifb_free_one(int index) -{ - unregister_netdev(ifbs[index]); - free_netdev(ifbs[index]); +err: + free_netdev(dev_ifb); + return err; } static int __init ifb_init_module(void) { - int i, err = 0; - ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL); - if (!ifbs) - return -ENOMEM; + int i, err; + + rtnl_lock(); + err = __rtnl_link_register(&ifb_link_ops); + for (i = 0; i < numifbs && !err; i++) err = ifb_init_one(i); - if (err) { - i--; - while (--i >= 0) - ifb_free_one(i); - } + if (err) + __rtnl_link_unregister(&ifb_link_ops); + rtnl_unlock(); return err; } static void __exit ifb_cleanup_module(void) { - int i; - - for (i = 0; i < numifbs; i++) - ifb_free_one(i); - kfree(ifbs); + rtnl_link_unregister(&ifb_link_ops); } module_init(ifb_init_module); module_exit(ifb_cleanup_module); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamal Hadi Salim"); +MODULE_ALIAS_RTNL_LINK("ifb"); diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index 829da9a1d113..2098d0af8ff5 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -155,6 +155,15 @@ config KINGSUN_DONGLE To compile it as a module, choose M here: the module will be called kingsun-sir. +config EP7211_DONGLE + tristate "EP7211 I/R support" + depends on IRTTY_SIR && ARCH_EP7211 && IRDA && EXPERIMENTAL + help + Say Y here if you want to build support for the Cirrus logic + EP7211 chipset's infrared module. + + + comment "Old SIR device drivers" config IRPORT_SIR @@ -355,7 +364,7 @@ config WINBOND_FIR config TOSHIBA_FIR tristate "Toshiba Type-O IR Port" - depends on IRDA && PCI && !64BIT + depends on IRDA && PCI && !64BIT && VIRT_TO_BUS help Say Y here if you want to build support for the Toshiba Type-O IR and Donau oboe chipsets. These chipsets are used by the Toshiba diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index 233a2f923730..2808ef5c7b79 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o +obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o # The SIR helper module diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c new file mode 100644 index 000000000000..831572429bb9 --- /dev/null +++ b/drivers/net/irda/ep7211-sir.c @@ -0,0 +1,89 @@ +/* + * IR port driver for the Cirrus Logic EP7211 processor. + * + * Copyright 2001, Blue Mug Inc. All rights reserved. + * Copyright 2007, Samuel Ortiz <samuel@sortiz.org> + */ +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/tty.h> +#include <linux/init.h> +#include <linux/spinlock.h> + +#include <net/irda/irda.h> +#include <net/irda/irda_device.h> + +#include <asm/io.h> +#include <asm/hardware.h> + +#include "sir-dev.h" + +#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */ +#define MAX_DELAY 10000 /* 1 ms */ + +static int ep7211_open(struct sir_dev *dev); +static int ep7211_close(struct sir_dev *dev); +static int ep7211_change_speed(struct sir_dev *dev, unsigned speed); +static int ep7211_reset(struct sir_dev *dev); + +static struct dongle_driver ep7211 = { + .owner = THIS_MODULE, + .driver_name = "EP7211 IR driver", + .type = IRDA_EP7211_DONGLE, + .open = ep7211_open, + .close = ep7211_close, + .reset = ep7211_reset, + .set_speed = ep7211_change_speed, +}; + +static int __init ep7211_sir_init(void) +{ + return irda_register_dongle(&ep7211); +} + +static void __exit ep7211_sir_cleanup(void) +{ + irda_unregister_dongle(&ep7211); +} + +static int ep7211_open(struct sir_dev *dev) +{ + unsigned int syscon; + + /* Turn on the SIR encoder. */ + syscon = clps_readl(SYSCON1); + syscon |= SYSCON1_SIREN; + clps_writel(syscon, SYSCON1); + + return 0; +} + +static int ep7211_close(struct sir_dev *dev) +{ + unsigned int syscon; + + /* Turn off the SIR encoder. */ + syscon = clps_readl(SYSCON1); + syscon &= ~SYSCON1_SIREN; + clps_writel(syscon, SYSCON1); + + return 0; +} + +static int ep7211_change_speed(struct sir_dev *dev, unsigned speed) +{ + return 0; +} + +static int ep7211_reset(struct sir_dev *dev) +{ + return 0; +} + +MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>"); +MODULE_DESCRIPTION("EP7211 IR dongle driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */ + +module_init(ep7211_sir_init); +module_exit(ep7211_sir_cleanup); diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c index 3078c419cb02..20732458f5ac 100644 --- a/drivers/net/irda/irport.c +++ b/drivers/net/irda/irport.c @@ -164,14 +164,13 @@ irport_open(int i, unsigned int iobase, unsigned int irq) /* Allocate memory if needed */ if (self->tx_buff.truesize > 0) { - self->tx_buff.head = kmalloc(self->tx_buff.truesize, + self->tx_buff.head = kzalloc(self->tx_buff.truesize, GFP_KERNEL); if (self->tx_buff.head == NULL) { IRDA_ERROR("%s(), can't allocate memory for " "transmit buffer!\n", __FUNCTION__); goto err_out4; } - memset(self->tx_buff.head, 0, self->tx_buff.truesize); } self->tx_buff.data = self->tx_buff.head; diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index ad1857364d51..6f5f697ec9f8 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c @@ -505,10 +505,9 @@ static int irtty_open(struct tty_struct *tty) } /* allocate private device info block */ - priv = kmalloc(sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) goto out_put; - memset(priv, 0, sizeof(*priv)); priv->magic = IRTTY_MAGIC; priv->tty = tty; diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c index 217429122e79..bdd5c979bead 100644 --- a/drivers/net/irda/kingsun-sir.c +++ b/drivers/net/irda/kingsun-sir.c @@ -4,7 +4,7 @@ * Version: 0.1.1 * Description: Irda KingSun/DonShine USB Dongle * Status: Experimental -* Author: Alex Villac�s Lasso <a_villacis@palosanto.com> +* Author: Alex VillacÃs Lasso <a_villacis@palosanto.com> * * Based on stir4200 and mcs7780 drivers, with (strange?) differences * @@ -652,6 +652,6 @@ static void __exit kingsun_cleanup(void) } module_exit(kingsun_cleanup); -MODULE_AUTHOR("Alex Villac�s Lasso <a_villacis@palosanto.com>"); +MODULE_AUTHOR("Alex VillacÃs Lasso <a_villacis@palosanto.com>"); MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index bf78ef1120ad..0538ca9ce058 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -44,6 +44,7 @@ MODULE_LICENSE("GPL"); #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/byteorder.h> @@ -1660,8 +1661,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) idev = ndev->priv; spin_lock_init(&idev->lock); - init_MUTEX(&idev->sem); - down(&idev->sem); + mutex_init(&idev->mtx); + mutex_lock(&idev->mtx); idev->pdev = pdev; if (vlsi_irda_init(ndev) < 0) @@ -1689,12 +1690,12 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name); pci_set_drvdata(pdev, ndev); - up(&idev->sem); + mutex_unlock(&idev->mtx); return 0; out_freedev: - up(&idev->sem); + mutex_unlock(&idev->mtx); free_netdev(ndev); out_disable: pci_disable_device(pdev); @@ -1716,12 +1717,12 @@ static void __devexit vlsi_irda_remove(struct pci_dev *pdev) unregister_netdev(ndev); idev = ndev->priv; - down(&idev->sem); + mutex_lock(&idev->mtx); if (idev->proc_entry) { remove_proc_entry(ndev->name, vlsi_proc_root); idev->proc_entry = NULL; } - up(&idev->sem); + mutex_unlock(&idev->mtx); free_netdev(ndev); @@ -1751,7 +1752,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } idev = ndev->priv; - down(&idev->sem); + mutex_lock(&idev->mtx); if (pdev->current_state != 0) { /* already suspended */ if (state.event > pdev->current_state) { /* simply go deeper */ pci_set_power_state(pdev, pci_choose_state(pdev, state)); @@ -1759,7 +1760,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) } else IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event); - up(&idev->sem); + mutex_unlock(&idev->mtx); return 0; } @@ -1775,7 +1776,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) pci_set_power_state(pdev, pci_choose_state(pdev, state)); pdev->current_state = state.event; idev->resume_ok = 1; - up(&idev->sem); + mutex_unlock(&idev->mtx); return 0; } @@ -1790,9 +1791,9 @@ static int vlsi_irda_resume(struct pci_dev *pdev) return 0; } idev = ndev->priv; - down(&idev->sem); + mutex_lock(&idev->mtx); if (pdev->current_state == 0) { - up(&idev->sem); + mutex_unlock(&idev->mtx); IRDA_WARNING("%s - %s: already resumed\n", __FUNCTION__, pci_name(pdev)); return 0; @@ -1814,7 +1815,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev) * device and independently resume_ok should catch any garbage config. */ IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__); - up(&idev->sem); + mutex_unlock(&idev->mtx); return 0; } @@ -1824,7 +1825,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev) netif_device_attach(ndev); } idev->resume_ok = 0; - up(&idev->sem); + mutex_unlock(&idev->mtx); return 0; } diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h index 2d3b773d8e35..ca12a6096419 100644 --- a/drivers/net/irda/vlsi_ir.h +++ b/drivers/net/irda/vlsi_ir.h @@ -728,7 +728,7 @@ typedef struct vlsi_irda_dev { struct timeval last_rx; spinlock_t lock; - struct semaphore sem; + struct mutex mtx; u8 resume_ok; struct proc_dir_entry *proc_entry; diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 347d50cd77d4..0433c41f9029 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -822,10 +822,9 @@ static int veth_init_connection(u8 rlp) || ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) ) return 0; - cnx = kmalloc(sizeof(*cnx), GFP_KERNEL); + cnx = kzalloc(sizeof(*cnx), GFP_KERNEL); if (! cnx) return -ENOMEM; - memset(cnx, 0, sizeof(*cnx)); cnx->remote_lp = rlp; spin_lock_init(&cnx->lock); @@ -852,14 +851,13 @@ static int veth_init_connection(u8 rlp) if (rc != 0) return rc; - msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL); + msgs = kcalloc(VETH_NUMBUFFERS, sizeof(struct veth_msg), GFP_KERNEL); if (! msgs) { veth_error("Can't allocate buffers for LPAR %d.\n", rlp); return -ENOMEM; } cnx->msgs = msgs; - memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg)); for (i = 0; i < VETH_NUMBUFFERS; i++) { msgs[i].token = i; diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index d5f694fc4a21..d9ce1aef148a 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -111,7 +111,7 @@ static int ixpdev_rx(struct net_device *dev, int *budget) skb = dev_alloc_skb(desc->pkt_length + 2); if (likely(skb != NULL)) { skb_reserve(skb, 2); - eth_copy_and_sum(skb, buf, desc->pkt_length, 0); + skb_copy_to_linear_data(skb, buf, desc->pkt_length); skb_put(skb, desc->pkt_length); skb->protocol = eth_type_trans(skb, nds[desc->channel]); diff --git a/drivers/net/lance.c b/drivers/net/lance.c index 0fe96c85828b..a4e5fab12628 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -533,11 +533,10 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int dev->base_addr = ioaddr; /* Make certain the data structures used by the LANCE are aligned and DMAble. */ - lp = kmalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); + lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); if(lp==NULL) return -ENODEV; if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); - memset(lp, 0, sizeof(*lp)); dev->priv = lp; lp->name = chipname; lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE, @@ -1186,9 +1185,9 @@ lance_rx(struct net_device *dev) } skb_reserve(skb,2); /* 16 byte align */ skb_put(skb,pkt_len); /* Make room */ - eth_copy_and_sum(skb, + skb_copy_to_linear_data(skb, (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)), - pkt_len,0); + pkt_len); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c new file mode 100644 index 000000000000..112778652f7d --- /dev/null +++ b/drivers/net/lguest_net.c @@ -0,0 +1,354 @@ +/* A simple network driver for lguest. + * + * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +//#define DEBUG +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/mm_types.h> +#include <linux/io.h> +#include <linux/lguest_bus.h> + +#define SHARED_SIZE PAGE_SIZE +#define MAX_LANS 4 +#define NUM_SKBS 8 + +struct lguestnet_info +{ + /* The shared page(s). */ + struct lguest_net *peer; + unsigned long peer_phys; + unsigned long mapsize; + + /* The lguest_device I come from */ + struct lguest_device *lgdev; + + /* My peerid. */ + unsigned int me; + + /* Receive queue. */ + struct sk_buff *skb[NUM_SKBS]; + struct lguest_dma dma[NUM_SKBS]; +}; + +/* How many bytes left in this page. */ +static unsigned int rest_of_page(void *data) +{ + return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); +} + +/* Simple convention: offset 4 * peernum. */ +static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum) +{ + return info->peer_phys + 4 * peernum; +} + +static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen, + struct lguest_dma *dma) +{ + unsigned int i, seg; + + for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) { + dma->addr[seg] = virt_to_phys(skb->data + i); + dma->len[seg] = min((unsigned)(headlen - i), + rest_of_page(skb->data + i)); + } + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) { + const skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + /* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */ + if (seg == LGUEST_MAX_DMA_SECTIONS) { + printk("Woah dude! Megapacket!\n"); + break; + } + dma->addr[seg] = page_to_phys(f->page) + f->page_offset; + dma->len[seg] = f->size; + } + if (seg < LGUEST_MAX_DMA_SECTIONS) + dma->len[seg] = 0; +} + +/* We overload multicast bit to show promiscuous mode. */ +#define PROMISC_BIT 0x01 + +static void lguestnet_set_multicast(struct net_device *dev) +{ + struct lguestnet_info *info = netdev_priv(dev); + + if ((dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) || dev->mc_count) + info->peer[info->me].mac[0] |= PROMISC_BIT; + else + info->peer[info->me].mac[0] &= ~PROMISC_BIT; +} + +static int promisc(struct lguestnet_info *info, unsigned int peer) +{ + return info->peer[peer].mac[0] & PROMISC_BIT; +} + +static int mac_eq(const unsigned char mac[ETH_ALEN], + struct lguestnet_info *info, unsigned int peer) +{ + /* Ignore multicast bit, which peer turns on to mean promisc. */ + if ((info->peer[peer].mac[0] & (~PROMISC_BIT)) != mac[0]) + return 0; + return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0; +} + +static void transfer_packet(struct net_device *dev, + struct sk_buff *skb, + unsigned int peernum) +{ + struct lguestnet_info *info = netdev_priv(dev); + struct lguest_dma dma; + + skb_to_dma(skb, skb_headlen(skb), &dma); + pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len); + + lguest_send_dma(peer_key(info, peernum), &dma); + if (dma.used_len != skb->len) { + dev->stats.tx_carrier_errors++; + pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n", + peernum, dma.used_len, skb->len, + (void *)dma.addr[0], dma.len[0]); + } else { + dev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + } +} + +static int unused_peer(const struct lguest_net peer[], unsigned int num) +{ + return peer[num].mac[0] == 0; +} + +static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned int i; + int broadcast; + struct lguestnet_info *info = netdev_priv(dev); + const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; + + pr_debug("%s: xmit %02x:%02x:%02x:%02x:%02x:%02x\n", + dev->name, dest[0],dest[1],dest[2],dest[3],dest[4],dest[5]); + + broadcast = is_multicast_ether_addr(dest); + for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) { + if (i == info->me || unused_peer(info->peer, i)) + continue; + + if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i)) + continue; + + pr_debug("lguestnet %s: sending from %i to %i\n", + dev->name, info->me, i); + transfer_packet(dev, skb, i); + } + dev_kfree_skb(skb); + return 0; +} + +/* Find a new skb to put in this slot in shared mem. */ +static int fill_slot(struct net_device *dev, unsigned int slot) +{ + struct lguestnet_info *info = netdev_priv(dev); + /* Try to create and register a new one. */ + info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN); + if (!info->skb[slot]) { + printk("%s: could not fill slot %i\n", dev->name, slot); + return -ENOMEM; + } + + skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]); + wmb(); + /* Now we tell hypervisor it can use the slot. */ + info->dma[slot].used_len = 0; + return 0; +} + +static irqreturn_t lguestnet_rcv(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lguestnet_info *info = netdev_priv(dev); + unsigned int i, done = 0; + + for (i = 0; i < ARRAY_SIZE(info->dma); i++) { + unsigned int length; + struct sk_buff *skb; + + length = info->dma[i].used_len; + if (length == 0) + continue; + + done++; + skb = info->skb[i]; + fill_slot(dev, i); + + if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) { + pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n", + dev->name, length); + dev_kfree_skb(skb); + continue; + } + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, dev); + /* This is a reliable transport. */ + if (dev->features & NETIF_F_NO_CSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; + pr_debug("Receiving skb proto 0x%04x len %i type %i\n", + ntohs(skb->protocol), skb->len, skb->pkt_type); + + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + netif_rx(skb); + } + return done ? IRQ_HANDLED : IRQ_NONE; +} + +static int lguestnet_open(struct net_device *dev) +{ + int i; + struct lguestnet_info *info = netdev_priv(dev); + + /* Set up our MAC address */ + memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN); + + /* Turn on promisc mode if needed */ + lguestnet_set_multicast(dev); + + for (i = 0; i < ARRAY_SIZE(info->dma); i++) { + if (fill_slot(dev, i) != 0) + goto cleanup; + } + if (lguest_bind_dma(peer_key(info,info->me), info->dma, + NUM_SKBS, lgdev_irq(info->lgdev)) != 0) + goto cleanup; + return 0; + +cleanup: + while (--i >= 0) + dev_kfree_skb(info->skb[i]); + return -ENOMEM; +} + +static int lguestnet_close(struct net_device *dev) +{ + unsigned int i; + struct lguestnet_info *info = netdev_priv(dev); + + /* Clear all trace: others might deliver packets, we'll ignore it. */ + memset(&info->peer[info->me], 0, sizeof(info->peer[info->me])); + + /* Deregister sg lists. */ + lguest_unbind_dma(peer_key(info, info->me), info->dma); + for (i = 0; i < ARRAY_SIZE(info->dma); i++) + dev_kfree_skb(info->skb[i]); + return 0; +} + +static int lguestnet_probe(struct lguest_device *lgdev) +{ + int err, irqf = IRQF_SHARED; + struct net_device *dev; + struct lguestnet_info *info; + struct lguest_device_desc *desc = &lguest_devices[lgdev->index]; + + pr_debug("lguest_net: probing for device %i\n", lgdev->index); + + dev = alloc_etherdev(sizeof(struct lguestnet_info)); + if (!dev) + return -ENOMEM; + + SET_MODULE_OWNER(dev); + + /* Ethernet defaults with some changes */ + ether_setup(dev); + dev->set_mac_address = NULL; + + dev->dev_addr[0] = 0x02; /* set local assignment bit (IEEE802) */ + dev->dev_addr[1] = 0x00; + memcpy(&dev->dev_addr[2], &lguest_data.guestid, 2); + dev->dev_addr[4] = 0x00; + dev->dev_addr[5] = 0x00; + + dev->open = lguestnet_open; + dev->stop = lguestnet_close; + dev->hard_start_xmit = lguestnet_start_xmit; + + /* Turning on/off promisc will call dev->set_multicast_list. + * We don't actually support multicast yet */ + dev->set_multicast_list = lguestnet_set_multicast; + SET_NETDEV_DEV(dev, &lgdev->dev); + if (desc->features & LGUEST_NET_F_NOCSUM) + dev->features = NETIF_F_SG|NETIF_F_NO_CSUM; + + info = netdev_priv(dev); + info->mapsize = PAGE_SIZE * desc->num_pages; + info->peer_phys = ((unsigned long)desc->pfn << PAGE_SHIFT); + info->lgdev = lgdev; + info->peer = lguest_map(info->peer_phys, desc->num_pages); + if (!info->peer) { + err = -ENOMEM; + goto free; + } + + /* This stores our peerid (upper bits reserved for future). */ + info->me = (desc->features & (info->mapsize-1)); + + err = register_netdev(dev); + if (err) { + pr_debug("lguestnet: registering device failed\n"); + goto unmap; + } + + if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS) + irqf |= IRQF_SAMPLE_RANDOM; + if (request_irq(lgdev_irq(lgdev), lguestnet_rcv, irqf, "lguestnet", + dev) != 0) { + pr_debug("lguestnet: cannot get irq %i\n", lgdev_irq(lgdev)); + goto unregister; + } + + pr_debug("lguestnet: registered device %s\n", dev->name); + lgdev->private = dev; + return 0; + +unregister: + unregister_netdev(dev); +unmap: + lguest_unmap(info->peer); +free: + free_netdev(dev); + return err; +} + +static struct lguest_driver lguestnet_drv = { + .name = "lguestnet", + .owner = THIS_MODULE, + .device_type = LGUEST_DEVICE_T_NET, + .probe = lguestnet_probe, +}; + +static __init int lguestnet_init(void) +{ + return register_lguest_driver(&lguestnet_drv); +} +module_init(lguestnet_init); + +MODULE_DESCRIPTION("Lguest network driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index 26a3b45a4a34..62c1c6262feb 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c @@ -608,7 +608,7 @@ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)"); MODULE_LICENSE("GPL"); -int +int __init init_module(void) { net_debug = debug; diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 0e04f7ac3f2e..a4bb0264180a 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -17,13 +17,12 @@ #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <linux/mii.h> -#include <linux/mutex.h> #include <linux/dma-mapping.h> -#include <linux/ethtool.h> #include <linux/platform_device.h> +#include <linux/phy.h> #include <asm/arch/board.h> +#include <asm/arch/cpu.h> #include "macb.h" @@ -85,172 +84,202 @@ static void __init macb_get_hwaddr(struct macb *bp) memcpy(bp->dev->dev_addr, addr, sizeof(addr)); } -static void macb_enable_mdio(struct macb *bp) +static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { - unsigned long flags; - u32 reg; - - spin_lock_irqsave(&bp->lock, flags); - reg = macb_readl(bp, NCR); - reg |= MACB_BIT(MPE); - macb_writel(bp, NCR, reg); - macb_writel(bp, IER, MACB_BIT(MFD)); - spin_unlock_irqrestore(&bp->lock, flags); -} - -static void macb_disable_mdio(struct macb *bp) -{ - unsigned long flags; - u32 reg; - - spin_lock_irqsave(&bp->lock, flags); - reg = macb_readl(bp, NCR); - reg &= ~MACB_BIT(MPE); - macb_writel(bp, NCR, reg); - macb_writel(bp, IDR, MACB_BIT(MFD)); - spin_unlock_irqrestore(&bp->lock, flags); -} - -static int macb_mdio_read(struct net_device *dev, int phy_id, int location) -{ - struct macb *bp = netdev_priv(dev); + struct macb *bp = bus->priv; int value; - mutex_lock(&bp->mdio_mutex); - - macb_enable_mdio(bp); macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | MACB_BF(RW, MACB_MAN_READ) - | MACB_BF(PHYA, phy_id) - | MACB_BF(REGA, location) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) | MACB_BF(CODE, MACB_MAN_CODE))); - wait_for_completion(&bp->mdio_complete); + /* wait for end of transfer */ + while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) + cpu_relax(); value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); - macb_disable_mdio(bp); - mutex_unlock(&bp->mdio_mutex); return value; } -static void macb_mdio_write(struct net_device *dev, int phy_id, - int location, int val) +static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) { - struct macb *bp = netdev_priv(dev); - - dev_dbg(&bp->pdev->dev, "mdio_write %02x:%02x <- %04x\n", - phy_id, location, val); - - mutex_lock(&bp->mdio_mutex); - macb_enable_mdio(bp); + struct macb *bp = bus->priv; macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | MACB_BF(RW, MACB_MAN_WRITE) - | MACB_BF(PHYA, phy_id) - | MACB_BF(REGA, location) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) | MACB_BF(CODE, MACB_MAN_CODE) - | MACB_BF(DATA, val))); + | MACB_BF(DATA, value))); - wait_for_completion(&bp->mdio_complete); + /* wait for end of transfer */ + while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) + cpu_relax(); - macb_disable_mdio(bp); - mutex_unlock(&bp->mdio_mutex); + return 0; } -static int macb_phy_probe(struct macb *bp) +static int macb_mdio_reset(struct mii_bus *bus) { - int phy_address; - u16 phyid1, phyid2; + return 0; +} - for (phy_address = 0; phy_address < 32; phy_address++) { - phyid1 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID1); - phyid2 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID2); +static void macb_handle_link_change(struct net_device *dev) +{ + struct macb *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + unsigned long flags; - if (phyid1 != 0xffff && phyid1 != 0x0000 - && phyid2 != 0xffff && phyid2 != 0x0000) - break; + int status_change = 0; + + spin_lock_irqsave(&bp->lock, flags); + + if (phydev->link) { + if ((bp->speed != phydev->speed) || + (bp->duplex != phydev->duplex)) { + u32 reg; + + reg = macb_readl(bp, NCFGR); + reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); + + if (phydev->duplex) + reg |= MACB_BIT(FD); + if (phydev->speed) + reg |= MACB_BIT(SPD); + + macb_writel(bp, NCFGR, reg); + + bp->speed = phydev->speed; + bp->duplex = phydev->duplex; + status_change = 1; + } } - if (phy_address == 32) - return -ENODEV; + if (phydev->link != bp->link) { + if (phydev->link) + netif_schedule(dev); + else { + bp->speed = 0; + bp->duplex = -1; + } + bp->link = phydev->link; - dev_info(&bp->pdev->dev, - "detected PHY at address %d (ID %04x:%04x)\n", - phy_address, phyid1, phyid2); + status_change = 1; + } - bp->mii.phy_id = phy_address; - return 0; + spin_unlock_irqrestore(&bp->lock, flags); + + if (status_change) { + if (phydev->link) + printk(KERN_INFO "%s: link up (%d/%s)\n", + dev->name, phydev->speed, + DUPLEX_FULL == phydev->duplex ? "Full":"Half"); + else + printk(KERN_INFO "%s: link down\n", dev->name); + } } -static void macb_set_media(struct macb *bp, int media) +/* based on au1000_eth. c*/ +static int macb_mii_probe(struct net_device *dev) { - u32 reg; + struct macb *bp = netdev_priv(dev); + struct phy_device *phydev = NULL; + struct eth_platform_data *pdata; + int phy_addr; - spin_lock_irq(&bp->lock); - reg = macb_readl(bp, NCFGR); - reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); - if (media & (ADVERTISE_100HALF | ADVERTISE_100FULL)) - reg |= MACB_BIT(SPD); - if (media & ADVERTISE_FULL) - reg |= MACB_BIT(FD); - macb_writel(bp, NCFGR, reg); - spin_unlock_irq(&bp->lock); + /* find the first phy */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + if (bp->mii_bus.phy_map[phy_addr]) { + phydev = bp->mii_bus.phy_map[phy_addr]; + break; + } + } + + if (!phydev) { + printk (KERN_ERR "%s: no PHY found\n", dev->name); + return -1; + } + + pdata = bp->pdev->dev.platform_data; + /* TODO : add pin_irq */ + + /* attach the mac to the phy */ + if (pdata && pdata->is_rmii) { + phydev = phy_connect(dev, phydev->dev.bus_id, + &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); + } else { + phydev = phy_connect(dev, phydev->dev.bus_id, + &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII); + } + + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= PHY_BASIC_FEATURES; + + phydev->advertising = phydev->supported; + + bp->link = 0; + bp->speed = 0; + bp->duplex = -1; + bp->phy_dev = phydev; + + return 0; } -static void macb_check_media(struct macb *bp, int ok_to_print, int init_media) +static int macb_mii_init(struct macb *bp) { - struct mii_if_info *mii = &bp->mii; - unsigned int old_carrier, new_carrier; - int advertise, lpa, media, duplex; + struct eth_platform_data *pdata; + int err = -ENXIO, i; - /* if forced media, go no further */ - if (mii->force_media) - return; + /* Enable managment port */ + macb_writel(bp, NCR, MACB_BIT(MPE)); - /* check current and old link status */ - old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; - new_carrier = (unsigned int) mii_link_ok(mii); + bp->mii_bus.name = "MACB_mii_bus", + bp->mii_bus.read = &macb_mdio_read, + bp->mii_bus.write = &macb_mdio_write, + bp->mii_bus.reset = &macb_mdio_reset, + bp->mii_bus.id = bp->pdev->id, + bp->mii_bus.priv = bp, + bp->mii_bus.dev = &bp->dev->dev; + pdata = bp->pdev->dev.platform_data; - /* if carrier state did not change, assume nothing else did */ - if (!init_media && old_carrier == new_carrier) - return; + if (pdata) + bp->mii_bus.phy_mask = pdata->phy_mask; - /* no carrier, nothing much to do */ - if (!new_carrier) { - netif_carrier_off(mii->dev); - printk(KERN_INFO "%s: link down\n", mii->dev->name); - return; + bp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (!bp->mii_bus.irq) { + err = -ENOMEM; + goto err_out; } - /* - * we have carrier, see who's on the other end - */ - netif_carrier_on(mii->dev); + for (i = 0; i < PHY_MAX_ADDR; i++) + bp->mii_bus.irq[i] = PHY_POLL; - /* get MII advertise and LPA values */ - if (!init_media && mii->advertising) { - advertise = mii->advertising; - } else { - advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); - mii->advertising = advertise; - } - lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); + platform_set_drvdata(bp->dev, &bp->mii_bus); - /* figure out media and duplex from advertise and LPA values */ - media = mii_nway_result(lpa & advertise); - duplex = (media & ADVERTISE_FULL) ? 1 : 0; + if (mdiobus_register(&bp->mii_bus)) + goto err_out_free_mdio_irq; - if (ok_to_print) - printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n", - mii->dev->name, - media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10", - duplex ? "full" : "half", lpa); + if (macb_mii_probe(bp->dev) != 0) { + goto err_out_unregister_bus; + } - mii->full_duplex = duplex; + return 0; - /* Let the MAC know about the new link state */ - macb_set_media(bp, media); +err_out_unregister_bus: + mdiobus_unregister(&bp->mii_bus); +err_out_free_mdio_irq: + kfree(bp->mii_bus.irq); +err_out: + return err; } static void macb_update_stats(struct macb *bp) @@ -265,16 +294,6 @@ static void macb_update_stats(struct macb *bp) *p += __raw_readl(reg); } -static void macb_periodic_task(struct work_struct *work) -{ - struct macb *bp = container_of(work, struct macb, periodic_task.work); - - macb_update_stats(bp); - macb_check_media(bp, 1, 0); - - schedule_delayed_work(&bp->periodic_task, HZ); -} - static void macb_tx(struct macb *bp) { unsigned int tail; @@ -519,9 +538,6 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) spin_lock(&bp->lock); while (status) { - if (status & MACB_BIT(MFD)) - complete(&bp->mdio_complete); - /* close possible race with dev_close */ if (unlikely(!netif_running(dev))) { macb_writel(bp, IDR, ~0UL); @@ -535,7 +551,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) * until we have processed the buffers */ macb_writel(bp, IDR, MACB_RX_INT_FLAGS); - dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n"); + dev_dbg(&bp->pdev->dev, + "scheduling RX softirq\n"); __netif_rx_schedule(dev); } } @@ -765,7 +782,7 @@ static void macb_init_hw(struct macb *bp) macb_writel(bp, TBQP, bp->tx_ring_dma); /* Enable TX and RX */ - macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE)); + macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); /* Enable interrupts */ macb_writel(bp, IER, (MACB_BIT(RCOMP) @@ -776,18 +793,126 @@ static void macb_init_hw(struct macb *bp) | MACB_BIT(TCOMP) | MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP))); + } -static void macb_init_phy(struct net_device *dev) +/* + * The hash address register is 64 bits long and takes up two + * locations in the memory map. The least significant bits are stored + * in EMAC_HSL and the most significant bits in EMAC_HSH. + * + * The unicast hash enable and the multicast hash enable bits in the + * network configuration register enable the reception of hash matched + * frames. The destination address is reduced to a 6 bit index into + * the 64 bit hash register using the following hash function. The + * hash function is an exclusive or of every sixth bit of the + * destination address. + * + * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] + * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] + * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] + * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] + * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] + * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] + * + * da[0] represents the least significant bit of the first byte + * received, that is, the multicast/unicast indicator, and da[47] + * represents the most significant bit of the last byte received. If + * the hash index, hi[n], points to a bit that is set in the hash + * register then the frame will be matched according to whether the + * frame is multicast or unicast. A multicast match will be signalled + * if the multicast hash enable bit is set, da[0] is 1 and the hash + * index points to a bit set in the hash register. A unicast match + * will be signalled if the unicast hash enable bit is set, da[0] is 0 + * and the hash index points to a bit set in the hash register. To + * receive all multicast frames, the hash register should be set with + * all ones and the multicast hash enable bit should be set in the + * network configuration register. + */ + +static inline int hash_bit_value(int bitnr, __u8 *addr) { + if (addr[bitnr / 8] & (1 << (bitnr % 8))) + return 1; + return 0; +} + +/* + * Return the hash index value for the specified address. + */ +static int hash_get_index(__u8 *addr) +{ + int i, j, bitval; + int hash_index = 0; + + for (j = 0; j < 6; j++) { + for (i = 0, bitval = 0; i < 8; i++) + bitval ^= hash_bit_value(i*6 + j, addr); + + hash_index |= (bitval << j); + } + + return hash_index; +} + +/* + * Add multicast addresses to the internal multicast-hash table. + */ +static void macb_sethashtable(struct net_device *dev) +{ + struct dev_mc_list *curr; + unsigned long mc_filter[2]; + unsigned int i, bitnr; + struct macb *bp = netdev_priv(dev); + + mc_filter[0] = mc_filter[1] = 0; + + curr = dev->mc_list; + for (i = 0; i < dev->mc_count; i++, curr = curr->next) { + if (!curr) break; /* unexpected end of list */ + + bitnr = hash_get_index(curr->dmi_addr); + mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); + } + + macb_writel(bp, HRB, mc_filter[0]); + macb_writel(bp, HRT, mc_filter[1]); +} + +/* + * Enable/Disable promiscuous and multicast modes. + */ +static void macb_set_rx_mode(struct net_device *dev) +{ + unsigned long cfg; struct macb *bp = netdev_priv(dev); - /* Set some reasonable default settings */ - macb_mdio_write(dev, bp->mii.phy_id, MII_ADVERTISE, - ADVERTISE_CSMA | ADVERTISE_ALL); - macb_mdio_write(dev, bp->mii.phy_id, MII_BMCR, - (BMCR_SPEED100 | BMCR_ANENABLE - | BMCR_ANRESTART | BMCR_FULLDPLX)); + cfg = macb_readl(bp, NCFGR); + + if (dev->flags & IFF_PROMISC) + /* Enable promiscuous mode */ + cfg |= MACB_BIT(CAF); + else if (dev->flags & (~IFF_PROMISC)) + /* Disable promiscuous mode */ + cfg &= ~MACB_BIT(CAF); + + if (dev->flags & IFF_ALLMULTI) { + /* Enable all multicast mode */ + macb_writel(bp, HRB, -1); + macb_writel(bp, HRT, -1); + cfg |= MACB_BIT(NCFGR_MTI); + } else if (dev->mc_count > 0) { + /* Enable specific multicasts */ + macb_sethashtable(dev); + cfg |= MACB_BIT(NCFGR_MTI); + } else if (dev->flags & (~IFF_ALLMULTI)) { + /* Disable all multicast mode */ + macb_writel(bp, HRB, 0); + macb_writel(bp, HRT, 0); + cfg &= ~MACB_BIT(NCFGR_MTI); + } + + macb_writel(bp, NCFGR, cfg); } static int macb_open(struct net_device *dev) @@ -797,6 +922,10 @@ static int macb_open(struct net_device *dev) dev_dbg(&bp->pdev->dev, "open\n"); + /* if the phy is not yet register, retry later*/ + if (!bp->phy_dev) + return -EAGAIN; + if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; @@ -810,12 +939,11 @@ static int macb_open(struct net_device *dev) macb_init_rings(bp); macb_init_hw(bp); - macb_init_phy(dev); - macb_check_media(bp, 1, 1); - netif_start_queue(dev); + /* schedule a link state check */ + phy_start(bp->phy_dev); - schedule_delayed_work(&bp->periodic_task, HZ); + netif_start_queue(dev); return 0; } @@ -825,10 +953,11 @@ static int macb_close(struct net_device *dev) struct macb *bp = netdev_priv(dev); unsigned long flags; - cancel_rearming_delayed_work(&bp->periodic_task); - netif_stop_queue(dev); + if (bp->phy_dev) + phy_stop(bp->phy_dev); + spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); netif_carrier_off(dev); @@ -845,6 +974,9 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) struct net_device_stats *nstat = &bp->stats; struct macb_stats *hwstat = &bp->hw_stats; + /* read stats from hardware */ + macb_update_stats(bp); + /* Convert HW stats into netdevice stats */ nstat->rx_errors = (hwstat->rx_fcs_errors + hwstat->rx_align_errors + @@ -882,18 +1014,27 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct macb *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; - return mii_ethtool_gset(&bp->mii, cmd); + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); } static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct macb *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!phydev) + return -ENODEV; - return mii_ethtool_sset(&bp->mii, cmd); + return phy_ethtool_sset(phydev, cmd); } -static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +static void macb_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) { struct macb *bp = netdev_priv(dev); @@ -902,104 +1043,34 @@ static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf strcpy(info->bus_info, bp->pdev->dev.bus_id); } -static int macb_nway_reset(struct net_device *dev) -{ - struct macb *bp = netdev_priv(dev); - return mii_nway_restart(&bp->mii); -} - static struct ethtool_ops macb_ethtool_ops = { .get_settings = macb_get_settings, .set_settings = macb_set_settings, .get_drvinfo = macb_get_drvinfo, - .nway_reset = macb_nway_reset, .get_link = ethtool_op_get_link, }; static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct macb *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; if (!netif_running(dev)) return -EINVAL; - return generic_mii_ioctl(&bp->mii, if_mii(rq), cmd, NULL); -} - -static ssize_t macb_mii_show(const struct device *_dev, char *buf, - unsigned long addr) -{ - struct net_device *dev = to_net_dev(_dev); - struct macb *bp = netdev_priv(dev); - ssize_t ret = -EINVAL; - - if (netif_running(dev)) { - int value; - value = macb_mdio_read(dev, bp->mii.phy_id, addr); - ret = sprintf(buf, "0x%04x\n", (uint16_t)value); - } - - return ret; -} - -#define MII_ENTRY(name, addr) \ -static ssize_t show_##name(struct device *_dev, \ - struct device_attribute *attr, \ - char *buf) \ -{ \ - return macb_mii_show(_dev, buf, addr); \ -} \ -static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) - -MII_ENTRY(bmcr, MII_BMCR); -MII_ENTRY(bmsr, MII_BMSR); -MII_ENTRY(physid1, MII_PHYSID1); -MII_ENTRY(physid2, MII_PHYSID2); -MII_ENTRY(advertise, MII_ADVERTISE); -MII_ENTRY(lpa, MII_LPA); -MII_ENTRY(expansion, MII_EXPANSION); - -static struct attribute *macb_mii_attrs[] = { - &dev_attr_bmcr.attr, - &dev_attr_bmsr.attr, - &dev_attr_physid1.attr, - &dev_attr_physid2.attr, - &dev_attr_advertise.attr, - &dev_attr_lpa.attr, - &dev_attr_expansion.attr, - NULL, -}; - -static struct attribute_group macb_mii_group = { - .name = "mii", - .attrs = macb_mii_attrs, -}; - -static void macb_unregister_sysfs(struct net_device *net) -{ - struct device *_dev = &net->dev; + if (!phydev) + return -ENODEV; - sysfs_remove_group(&_dev->kobj, &macb_mii_group); + return phy_mii_ioctl(phydev, if_mii(rq), cmd); } -static int macb_register_sysfs(struct net_device *net) -{ - struct device *_dev = &net->dev; - int ret; - - ret = sysfs_create_group(&_dev->kobj, &macb_mii_group); - if (ret) - printk(KERN_WARNING - "%s: sysfs mii attribute registration failed: %d\n", - net->name, ret); - return ret; -} static int __devinit macb_probe(struct platform_device *pdev) { struct eth_platform_data *pdata; struct resource *regs; struct net_device *dev; struct macb *bp; + struct phy_device *phydev; unsigned long pclk_hz; u32 config; int err = -ENXIO; @@ -1073,6 +1144,7 @@ static int __devinit macb_probe(struct platform_device *pdev) dev->stop = macb_close; dev->hard_start_xmit = macb_start_xmit; dev->get_stats = macb_get_stats; + dev->set_multicast_list = macb_set_rx_mode; dev->do_ioctl = macb_ioctl; dev->poll = macb_poll; dev->weight = 64; @@ -1080,10 +1152,6 @@ static int __devinit macb_probe(struct platform_device *pdev) dev->base_addr = regs->start; - INIT_DELAYED_WORK(&bp->periodic_task, macb_periodic_task); - mutex_init(&bp->mdio_mutex); - init_completion(&bp->mdio_complete); - /* Set MII management clock divider */ pclk_hz = clk_get_rate(bp->pclk); if (pclk_hz <= 20000000) @@ -1096,20 +1164,9 @@ static int __devinit macb_probe(struct platform_device *pdev) config = MACB_BF(CLK, MACB_CLK_DIV64); macb_writel(bp, NCFGR, config); - bp->mii.dev = dev; - bp->mii.mdio_read = macb_mdio_read; - bp->mii.mdio_write = macb_mdio_write; - bp->mii.phy_id_mask = 0x1f; - bp->mii.reg_num_mask = 0x1f; - macb_get_hwaddr(bp); - err = macb_phy_probe(bp); - if (err) { - dev_err(&pdev->dev, "Failed to detect PHY, aborting.\n"); - goto err_out_free_irq; - } - pdata = pdev->dev.platform_data; + if (pdata && pdata->is_rmii) #if defined(CONFIG_ARCH_AT91) macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) ); @@ -1131,9 +1188,11 @@ static int __devinit macb_probe(struct platform_device *pdev) goto err_out_free_irq; } - platform_set_drvdata(pdev, dev); + if (macb_mii_init(bp) != 0) { + goto err_out_unregister_netdev; + } - macb_register_sysfs(dev); + platform_set_drvdata(pdev, dev); printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d " "(%02x:%02x:%02x:%02x:%02x:%02x)\n", @@ -1141,8 +1200,15 @@ static int __devinit macb_probe(struct platform_device *pdev) dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + phydev = bp->phy_dev; + printk(KERN_INFO "%s: attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", + dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); + return 0; +err_out_unregister_netdev: + unregister_netdev(dev); err_out_free_irq: free_irq(dev->irq, dev); err_out_iounmap: @@ -1153,7 +1219,9 @@ err_out_disable_clocks: clk_put(bp->hclk); #endif clk_disable(bp->pclk); +#ifndef CONFIG_ARCH_AT91 err_out_put_pclk: +#endif clk_put(bp->pclk); err_out_free_dev: free_netdev(dev); @@ -1171,7 +1239,8 @@ static int __devexit macb_remove(struct platform_device *pdev) if (dev) { bp = netdev_priv(dev); - macb_unregister_sysfs(dev); + mdiobus_unregister(&bp->mii_bus); + kfree(bp->mii_bus.irq); unregister_netdev(dev); free_irq(dev->irq, dev); iounmap(bp->regs); diff --git a/drivers/net/macb.h b/drivers/net/macb.h index b3bb2182edd1..4e3283ebd97c 100644 --- a/drivers/net/macb.h +++ b/drivers/net/macb.h @@ -383,11 +383,11 @@ struct macb { unsigned int rx_pending, tx_pending; - struct delayed_work periodic_task; - - struct mutex mdio_mutex; - struct completion mdio_complete; - struct mii_if_info mii; + struct mii_bus mii_bus; + struct phy_device *phy_dev; + unsigned int link; + unsigned int speed; + unsigned int duplex; }; #endif /* _MACB_H */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c new file mode 100644 index 000000000000..dc74d006e01f --- /dev/null +++ b/drivers/net/macvlan.c @@ -0,0 +1,496 @@ +/* + * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * The code this is based on carried the following copyright notice: + * --- + * (C) Copyright 2001-2006 + * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com + * Re-worked by Ben Greear <greearb@candelatech.com> + * --- + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/list.h> +#include <linux/notifier.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_arp.h> +#include <linux/if_link.h> +#include <linux/if_macvlan.h> +#include <net/rtnetlink.h> + +#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) + +struct macvlan_port { + struct net_device *dev; + struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; + struct list_head vlans; +}; + +struct macvlan_dev { + struct net_device *dev; + struct list_head list; + struct hlist_node hlist; + struct macvlan_port *port; + struct net_device *lowerdev; +}; + + +static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, + const unsigned char *addr) +{ + struct macvlan_dev *vlan; + struct hlist_node *n; + + hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) { + if (!compare_ether_addr(vlan->dev->dev_addr, addr)) + return vlan; + } + return NULL; +} + +static void macvlan_broadcast(struct sk_buff *skb, + const struct macvlan_port *port) +{ + const struct ethhdr *eth = eth_hdr(skb); + const struct macvlan_dev *vlan; + struct hlist_node *n; + struct net_device *dev; + struct sk_buff *nskb; + unsigned int i; + + for (i = 0; i < MACVLAN_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { + dev = vlan->dev; + if (unlikely(!(dev->flags & IFF_UP))) + continue; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (nskb == NULL) { + dev->stats.rx_errors++; + dev->stats.rx_dropped++; + continue; + } + + dev->stats.rx_bytes += skb->len + ETH_HLEN; + dev->stats.rx_packets++; + dev->stats.multicast++; + dev->last_rx = jiffies; + + nskb->dev = dev; + if (!compare_ether_addr(eth->h_dest, dev->broadcast)) + nskb->pkt_type = PACKET_BROADCAST; + else + nskb->pkt_type = PACKET_MULTICAST; + + netif_rx(nskb); + } + } +} + +/* called under rcu_read_lock() from netif_receive_skb */ +static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) +{ + const struct ethhdr *eth = eth_hdr(skb); + const struct macvlan_port *port; + const struct macvlan_dev *vlan; + struct net_device *dev; + + port = rcu_dereference(skb->dev->macvlan_port); + if (port == NULL) + return skb; + + if (is_multicast_ether_addr(eth->h_dest)) { + macvlan_broadcast(skb, port); + return skb; + } + + vlan = macvlan_hash_lookup(port, eth->h_dest); + if (vlan == NULL) + return skb; + + dev = vlan->dev; + if (unlikely(!(dev->flags & IFF_UP))) { + kfree_skb(skb); + return NULL; + } + + skb = skb_share_check(skb, GFP_ATOMIC); + if (skb == NULL) { + dev->stats.rx_errors++; + dev->stats.rx_dropped++; + return NULL; + } + + dev->stats.rx_bytes += skb->len + ETH_HLEN; + dev->stats.rx_packets++; + dev->last_rx = jiffies; + + skb->dev = dev; + skb->pkt_type = PACKET_HOST; + + netif_rx(skb); + return NULL; +} + +static int macvlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + const struct macvlan_dev *vlan = netdev_priv(dev); + unsigned int len = skb->len; + int ret; + + skb->dev = vlan->lowerdev; + ret = dev_queue_xmit(skb); + + if (likely(ret == NET_XMIT_SUCCESS)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; + } else { + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + } + return NETDEV_TX_OK; +} + +static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, void *daddr, void *saddr, + unsigned len) +{ + const struct macvlan_dev *vlan = netdev_priv(dev); + struct net_device *lowerdev = vlan->lowerdev; + + return lowerdev->hard_header(skb, lowerdev, type, daddr, + saddr ? : dev->dev_addr, len); +} + +static int macvlan_open(struct net_device *dev) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + struct macvlan_port *port = vlan->port; + struct net_device *lowerdev = vlan->lowerdev; + int err; + + err = dev_unicast_add(lowerdev, dev->dev_addr, ETH_ALEN); + if (err < 0) + return err; + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(lowerdev, 1); + + hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[dev->dev_addr[5]]); + return 0; +} + +static int macvlan_stop(struct net_device *dev) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + struct net_device *lowerdev = vlan->lowerdev; + + dev_mc_unsync(lowerdev, dev); + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(lowerdev, -1); + + dev_unicast_delete(lowerdev, dev->dev_addr, ETH_ALEN); + + hlist_del_rcu(&vlan->hlist); + synchronize_rcu(); + return 0; +} + +static void macvlan_change_rx_flags(struct net_device *dev, int change) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + struct net_device *lowerdev = vlan->lowerdev; + + if (change & IFF_ALLMULTI) + dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); +} + +static void macvlan_set_multicast_list(struct net_device *dev) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + + dev_mc_sync(vlan->lowerdev, dev); +} + +static int macvlan_change_mtu(struct net_device *dev, int new_mtu) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + + if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +/* + * macvlan network devices have devices nesting below it and are a special + * "super class" of normal network devices; split their locks off into a + * separate class since they always nest. + */ +static struct lock_class_key macvlan_netdev_xmit_lock_key; + +#define MACVLAN_FEATURES \ + (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ + NETIF_F_TSO_ECN | NETIF_F_TSO6) + +#define MACVLAN_STATE_MASK \ + ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) + +static int macvlan_init(struct net_device *dev) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + const struct net_device *lowerdev = vlan->lowerdev; + + dev->state = (dev->state & ~MACVLAN_STATE_MASK) | + (lowerdev->state & MACVLAN_STATE_MASK); + dev->features = lowerdev->features & MACVLAN_FEATURES; + dev->iflink = lowerdev->ifindex; + + lockdep_set_class(&dev->_xmit_lock, &macvlan_netdev_xmit_lock_key); + return 0; +} + +static void macvlan_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + snprintf(drvinfo->driver, 32, "macvlan"); + snprintf(drvinfo->version, 32, "0.1"); +} + +static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev) +{ + const struct macvlan_dev *vlan = netdev_priv(dev); + struct net_device *lowerdev = vlan->lowerdev; + + if (lowerdev->ethtool_ops->get_rx_csum == NULL) + return 0; + return lowerdev->ethtool_ops->get_rx_csum(lowerdev); +} + +static const struct ethtool_ops macvlan_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_rx_csum = macvlan_ethtool_get_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .get_tso = ethtool_op_get_tso, + .get_ufo = ethtool_op_get_ufo, + .get_sg = ethtool_op_get_sg, + .get_drvinfo = macvlan_ethtool_get_drvinfo, +}; + +static void macvlan_setup(struct net_device *dev) +{ + ether_setup(dev); + + dev->init = macvlan_init; + dev->open = macvlan_open; + dev->stop = macvlan_stop; + dev->change_mtu = macvlan_change_mtu; + dev->change_rx_flags = macvlan_change_rx_flags; + dev->set_multicast_list = macvlan_set_multicast_list; + dev->hard_header = macvlan_hard_header; + dev->hard_start_xmit = macvlan_hard_start_xmit; + dev->destructor = free_netdev; + dev->ethtool_ops = &macvlan_ethtool_ops; + dev->tx_queue_len = 0; +} + +static int macvlan_port_create(struct net_device *dev) +{ + struct macvlan_port *port; + unsigned int i; + + if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) + return -EINVAL; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (port == NULL) + return -ENOMEM; + + port->dev = dev; + INIT_LIST_HEAD(&port->vlans); + for (i = 0; i < MACVLAN_HASH_SIZE; i++) + INIT_HLIST_HEAD(&port->vlan_hash[i]); + rcu_assign_pointer(dev->macvlan_port, port); + return 0; +} + +static void macvlan_port_destroy(struct net_device *dev) +{ + struct macvlan_port *port = dev->macvlan_port; + + rcu_assign_pointer(dev->macvlan_port, NULL); + synchronize_rcu(); + kfree(port); +} + +static void macvlan_transfer_operstate(struct net_device *dev) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + const struct net_device *lowerdev = vlan->lowerdev; + + if (lowerdev->operstate == IF_OPER_DORMANT) + netif_dormant_on(dev); + else + netif_dormant_off(dev); + + if (netif_carrier_ok(lowerdev)) { + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + } else { + if (netif_carrier_ok(lowerdev)) + netif_carrier_off(dev); + } +} + +static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + return 0; +} + +static int macvlan_newlink(struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + struct macvlan_port *port; + struct net_device *lowerdev; + int err; + + if (!tb[IFLA_LINK]) + return -EINVAL; + + lowerdev = __dev_get_by_index(nla_get_u32(tb[IFLA_LINK])); + if (lowerdev == NULL) + return -ENODEV; + + if (!tb[IFLA_MTU]) + dev->mtu = lowerdev->mtu; + else if (dev->mtu > lowerdev->mtu) + return -EINVAL; + + if (!tb[IFLA_ADDRESS]) + random_ether_addr(dev->dev_addr); + + if (lowerdev->macvlan_port == NULL) { + err = macvlan_port_create(lowerdev); + if (err < 0) + return err; + } + port = lowerdev->macvlan_port; + + vlan->lowerdev = lowerdev; + vlan->dev = dev; + vlan->port = port; + + err = register_netdevice(dev); + if (err < 0) + return err; + + list_add_tail(&vlan->list, &port->vlans); + macvlan_transfer_operstate(dev); + return 0; +} + +static void macvlan_dellink(struct net_device *dev) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + struct macvlan_port *port = vlan->port; + + list_del(&vlan->list); + unregister_netdevice(dev); + + if (list_empty(&port->vlans)) + macvlan_port_destroy(dev); +} + +static struct rtnl_link_ops macvlan_link_ops __read_mostly = { + .kind = "macvlan", + .priv_size = sizeof(struct macvlan_dev), + .setup = macvlan_setup, + .validate = macvlan_validate, + .newlink = macvlan_newlink, + .dellink = macvlan_dellink, +}; + +static int macvlan_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = ptr; + struct macvlan_dev *vlan, *next; + struct macvlan_port *port; + + port = dev->macvlan_port; + if (port == NULL) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGE: + list_for_each_entry(vlan, &port->vlans, list) + macvlan_transfer_operstate(vlan->dev); + break; + case NETDEV_FEAT_CHANGE: + list_for_each_entry(vlan, &port->vlans, list) { + vlan->dev->features = dev->features & MACVLAN_FEATURES; + netdev_features_change(vlan->dev); + } + break; + case NETDEV_UNREGISTER: + list_for_each_entry_safe(vlan, next, &port->vlans, list) + macvlan_dellink(vlan->dev); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block macvlan_notifier_block __read_mostly = { + .notifier_call = macvlan_device_event, +}; + +static int __init macvlan_init_module(void) +{ + int err; + + register_netdevice_notifier(&macvlan_notifier_block); + macvlan_handle_frame_hook = macvlan_handle_frame; + + err = rtnl_link_register(&macvlan_link_ops); + if (err < 0) + goto err1; + return 0; +err1: + macvlan_handle_frame_hook = macvlan_handle_frame; + unregister_netdevice_notifier(&macvlan_notifier_block); + return err; +} + +static void __exit macvlan_cleanup_module(void) +{ + rtnl_link_unregister(&macvlan_link_ops); + macvlan_handle_frame_hook = NULL; + unregister_netdevice_notifier(&macvlan_notifier_block); +} + +module_init(macvlan_init_module); +module_exit(macvlan_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); +MODULE_DESCRIPTION("Driver for MAC address based VLANs"); +MODULE_ALIAS_RTNL_LINK("macvlan"); diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c index 1bb088aeaf71..6b32ec94b3a8 100644 --- a/drivers/net/mlx4/catas.c +++ b/drivers/net/mlx4/catas.c @@ -30,41 +30,133 @@ * SOFTWARE. */ +#include <linux/workqueue.h> + #include "mlx4.h" -void mlx4_handle_catas_err(struct mlx4_dev *dev) +enum { + MLX4_CATAS_POLL_INTERVAL = 5 * HZ, +}; + +static DEFINE_SPINLOCK(catas_lock); + +static LIST_HEAD(catas_list); +static struct workqueue_struct *catas_wq; +static struct work_struct catas_work; + +static int internal_err_reset = 1; +module_param(internal_err_reset, int, 0644); +MODULE_PARM_DESC(internal_err_reset, + "Reset device on internal errors if non-zero (default 1)"); + +static void dump_err_buf(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; - mlx4_err(dev, "Catastrophic error detected:\n"); + mlx4_err(dev, "Internal error detected:\n"); for (i = 0; i < priv->fw.catas_size; ++i) mlx4_err(dev, " buf[%02x]: %08x\n", i, swab32(readl(priv->catas_err.map + i))); +} - mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); +static void poll_catas(unsigned long dev_ptr) +{ + struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (readl(priv->catas_err.map)) { + dump_err_buf(dev); + + mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); + + if (internal_err_reset) { + spin_lock(&catas_lock); + list_add(&priv->catas_err.list, &catas_list); + spin_unlock(&catas_lock); + + queue_work(catas_wq, &catas_work); + } + } else + mod_timer(&priv->catas_err.timer, + round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); } -void mlx4_map_catas_buf(struct mlx4_dev *dev) +static void catas_reset(struct work_struct *work) +{ + struct mlx4_priv *priv, *tmppriv; + struct mlx4_dev *dev; + + LIST_HEAD(tlist); + int ret; + + spin_lock_irq(&catas_lock); + list_splice_init(&catas_list, &tlist); + spin_unlock_irq(&catas_lock); + + list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { + ret = mlx4_restart_one(priv->dev.pdev); + dev = &priv->dev; + if (ret) + mlx4_err(dev, "Reset failed (%d)\n", ret); + else + mlx4_dbg(dev, "Reset succeeded\n"); + } +} + +void mlx4_start_catas_poll(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); unsigned long addr; + INIT_LIST_HEAD(&priv->catas_err.list); + init_timer(&priv->catas_err.timer); + priv->catas_err.map = NULL; + addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + priv->fw.catas_offset; priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); - if (!priv->catas_err.map) - mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n", + if (!priv->catas_err.map) { + mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", addr); + return; + } + priv->catas_err.timer.data = (unsigned long) dev; + priv->catas_err.timer.function = poll_catas; + priv->catas_err.timer.expires = + round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); + add_timer(&priv->catas_err.timer); } -void mlx4_unmap_catas_buf(struct mlx4_dev *dev) +void mlx4_stop_catas_poll(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); + del_timer_sync(&priv->catas_err.timer); + if (priv->catas_err.map) iounmap(priv->catas_err.map); + + spin_lock_irq(&catas_lock); + list_del(&priv->catas_err.list); + spin_unlock_irq(&catas_lock); +} + +int __init mlx4_catas_init(void) +{ + INIT_WORK(&catas_work, catas_reset); + + catas_wq = create_singlethread_workqueue("mlx4_err"); + if (!catas_wq) + return -ENOMEM; + + return 0; +} + +void mlx4_catas_cleanup(void) +{ + destroy_workqueue(catas_wq); } diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c index c1f81a993f5d..a9f31753661a 100644 --- a/drivers/net/mlx4/cmd.c +++ b/drivers/net/mlx4/cmd.c @@ -246,8 +246,6 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) context->result = mlx4_status_to_errno(status); context->out_param = out_param; - context->token += priv->cmd.token_mask + 1; - complete(&context->done); } @@ -264,6 +262,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, spin_lock(&cmd->context_lock); BUG_ON(cmd->free_head < 0); context = &cmd->context[cmd->free_head]; + context->token += cmd->token_mask + 1; cmd->free_head = context->next; spin_unlock(&cmd->context_lock); diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 27a82cecd693..2095c843fa15 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -89,14 +89,12 @@ struct mlx4_eq_context { (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ - (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ (1ull << MLX4_EVENT_TYPE_CMD)) -#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) struct mlx4_eqe { u8 reserved1; @@ -264,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); - for (i = 0; i < MLX4_EQ_CATAS; ++i) + for (i = 0; i < MLX4_NUM_EQ; ++i) work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); return IRQ_RETVAL(work); @@ -281,14 +279,6 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) return IRQ_HANDLED; } -static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr) -{ - mlx4_handle_catas_err(dev_ptr); - - /* MSI-X vectors always belong to us */ - return IRQ_HANDLED; -} - static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, int eq_num) { @@ -490,11 +480,9 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) if (eq_table->have_irq) free_irq(dev->pdev->irq, dev); - for (i = 0; i < MLX4_EQ_CATAS; ++i) + for (i = 0; i < MLX4_NUM_EQ; ++i) if (eq_table->eq[i].have_irq) free_irq(eq_table->eq[i].irq, eq_table->eq + i); - if (eq_table->eq[MLX4_EQ_CATAS].have_irq) - free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev); } static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev) @@ -598,32 +586,19 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) if (dev->flags & MLX4_FLAG_MSI_X) { static const char *eq_name[] = { [MLX4_EQ_COMP] = DRV_NAME " (comp)", - [MLX4_EQ_ASYNC] = DRV_NAME " (async)", - [MLX4_EQ_CATAS] = DRV_NAME " (catas)" + [MLX4_EQ_ASYNC] = DRV_NAME " (async)" }; - err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS, - &priv->eq_table.eq[MLX4_EQ_CATAS]); - if (err) - goto err_out_async; - - for (i = 0; i < MLX4_EQ_CATAS; ++i) { + for (i = 0; i < MLX4_NUM_EQ; ++i) { err = request_irq(priv->eq_table.eq[i].irq, mlx4_msi_x_interrupt, 0, eq_name[i], priv->eq_table.eq + i); if (err) - goto err_out_catas; + goto err_out_async; priv->eq_table.eq[i].have_irq = 1; } - err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq, - mlx4_catas_interrupt, 0, - eq_name[MLX4_EQ_CATAS], dev); - if (err) - goto err_out_catas; - - priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1; } else { err = request_irq(dev->pdev->irq, mlx4_interrupt, IRQF_SHARED, DRV_NAME, dev); @@ -639,22 +614,11 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); - for (i = 0; i < MLX4_EQ_CATAS; ++i) + for (i = 0; i < MLX4_NUM_EQ; ++i) eq_set_ci(&priv->eq_table.eq[i], 1); - if (dev->flags & MLX4_FLAG_MSI_X) { - err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0, - priv->eq_table.eq[MLX4_EQ_CATAS].eqn); - if (err) - mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n", - priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err); - } - return 0; -err_out_catas: - mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); - err_out_async: mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); @@ -675,19 +639,13 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); int i; - if (dev->flags & MLX4_FLAG_MSI_X) - mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1, - priv->eq_table.eq[MLX4_EQ_CATAS].eqn); - mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); mlx4_free_irqs(dev); - for (i = 0; i < MLX4_EQ_CATAS; ++i) + for (i = 0; i < MLX4_NUM_EQ; ++i) mlx4_free_eq(dev, &priv->eq_table.eq[i]); - if (dev->flags & MLX4_FLAG_MSI_X) - mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); mlx4_unmap_clr_int(dev); diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index d2b065351e45..c45cbe43a0c4 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c @@ -138,6 +138,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 +#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f @@ -220,6 +221,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->local_ca_ack_delay = field & 0x1f; MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); dev_cap->num_ports = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); + dev_cap->max_msg_sz = 1 << (field & 0x1f); MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); dev_cap->stat_rate_support = stat_rate; MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h index 296254ac27c1..7e1dd9e25cfb 100644 --- a/drivers/net/mlx4/fw.h +++ b/drivers/net/mlx4/fw.h @@ -60,6 +60,7 @@ struct mlx4_dev_cap { int max_rdma_global; int local_ca_ack_delay; int num_ports; + u32 max_msg_sz; int max_mtu[MLX4_MAX_PORTS + 1]; int max_port_width[MLX4_MAX_PORTS + 1]; int max_vl[MLX4_MAX_PORTS + 1]; diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c index 9ae951bf6aa6..be5d9e90ccf2 100644 --- a/drivers/net/mlx4/intf.c +++ b/drivers/net/mlx4/intf.c @@ -142,6 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev) mlx4_add_device(intf, priv); mutex_unlock(&intf_mutex); + mlx4_start_catas_poll(dev); return 0; } @@ -151,6 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_interface *intf; + mlx4_stop_catas_poll(dev); mutex_lock(&intf_mutex); list_for_each_entry(intf, &intf_list, list) diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index c3da2a2f5431..4dc9dc19b716 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -78,7 +78,7 @@ static const char mlx4_version[] __devinitdata = static struct mlx4_profile default_profile = { .num_qp = 1 << 16, .num_srq = 1 << 16, - .rdmarc_per_qp = 4, + .rdmarc_per_qp = 1 << 4, .num_cq = 1 << 16, .num_mcg = 1 << 13, .num_mpt = 1 << 17, @@ -154,6 +154,7 @@ static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev dev->caps.reserved_uars = dev_cap->reserved_uars; dev->caps.reserved_pds = dev_cap->reserved_pds; dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; + dev->caps.max_msg_sz = dev_cap->max_msg_sz; dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.flags = dev_cap->flags; dev->caps.stat_rate_support = dev_cap->stat_rate_support; @@ -582,13 +583,11 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) goto err_pd_table_free; } - mlx4_map_catas_buf(dev); - err = mlx4_init_eq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "event queue table, aborting.\n"); - goto err_catas_buf; + goto err_mr_table_free; } err = mlx4_cmd_use_events(dev); @@ -658,8 +657,7 @@ err_cmd_poll: err_eq_table_free: mlx4_cleanup_eq_table(dev); -err_catas_buf: - mlx4_unmap_catas_buf(dev); +err_mr_table_free: mlx4_cleanup_mr_table(dev); err_pd_table_free: @@ -835,9 +833,6 @@ err_cleanup: mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); - - mlx4_unmap_catas_buf(dev); - mlx4_cleanup_mr_table(dev); mlx4_cleanup_pd_table(dev); mlx4_cleanup_uar_table(dev); @@ -884,9 +879,6 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev) mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); - - mlx4_unmap_catas_buf(dev); - mlx4_cleanup_mr_table(dev); mlx4_cleanup_pd_table(dev); @@ -907,6 +899,12 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev) } } +int mlx4_restart_one(struct pci_dev *pdev) +{ + mlx4_remove_one(pdev); + return mlx4_init_one(pdev, NULL); +} + static struct pci_device_id mlx4_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ @@ -929,6 +927,10 @@ static int __init mlx4_init(void) { int ret; + ret = mlx4_catas_init(); + if (ret) + return ret; + ret = pci_register_driver(&mlx4_driver); return ret < 0 ? ret : 0; } @@ -936,6 +938,7 @@ static int __init mlx4_init(void) static void __exit mlx4_cleanup(void) { pci_unregister_driver(&mlx4_driver); + mlx4_catas_cleanup(); } module_init(mlx4_init); diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 3d3b6d24d8d3..be304a7c2c91 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -37,7 +37,9 @@ #ifndef MLX4_H #define MLX4_H +#include <linux/mutex.h> #include <linux/radix-tree.h> +#include <linux/timer.h> #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> @@ -66,7 +68,6 @@ enum { enum { MLX4_EQ_ASYNC, MLX4_EQ_COMP, - MLX4_EQ_CATAS, MLX4_NUM_EQ }; @@ -247,7 +248,8 @@ struct mlx4_mcg_table { struct mlx4_catas_err { u32 __iomem *map; - int size; + struct timer_list timer; + struct list_head list; }; struct mlx4_priv { @@ -310,9 +312,11 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev); void mlx4_cleanup_srq_table(struct mlx4_dev *dev); void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); -void mlx4_map_catas_buf(struct mlx4_dev *dev); -void mlx4_unmap_catas_buf(struct mlx4_dev *dev); - +void mlx4_start_catas_poll(struct mlx4_dev *dev); +void mlx4_stop_catas_poll(struct mlx4_dev *dev); +int mlx4_catas_init(void); +void mlx4_catas_cleanup(void); +int mlx4_restart_one(struct pci_dev *pdev); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index 492cfaaaa75c..19b48c71cf7f 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c @@ -277,3 +277,24 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev) mlx4_CONF_SPECIAL_QP(dev, 0); mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); } + +int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, + struct mlx4_qp_context *context) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, + MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A); + if (!err) + memcpy(context, mailbox->buf + 8, sizeof *context); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_qp_query); + diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c index 2134f83aed87..b061c86d6839 100644 --- a/drivers/net/mlx4/srq.c +++ b/drivers/net/mlx4/srq.c @@ -102,6 +102,13 @@ static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark) MLX4_CMD_TIME_CLASS_B); } +static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int srq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ, + MLX4_CMD_TIME_CLASS_A); +} + int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) { @@ -205,6 +212,29 @@ int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark } EXPORT_SYMBOL_GPL(mlx4_srq_arm); +int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_srq_context *srq_context; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + srq_context = mailbox->buf; + + err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn); + if (err) + goto err_out; + *limit_watermark = srq_context->limit_watermark; + +err_out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_srq_query); + int __devinit mlx4_init_srq_table(struct mlx4_dev *dev) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e1732c164a40..deca65330b0f 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -1060,7 +1060,6 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) struct myri10ge_tx_buf *tx = &mgp->tx; struct sk_buff *skb; int idx, len; - int limit = 0; while (tx->pkt_done != mcp_index) { idx = tx->done & tx->mask; @@ -1091,11 +1090,6 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) bus), len, PCI_DMA_TODEVICE); } - - /* limit potential for livelock by only handling - * 2 full tx rings per call */ - if (unlikely(++limit > 2 * tx->mask)) - break; } /* start the queue if we've stopped it */ if (netif_queue_stopped(mgp->dev) diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 460a08718c69..6bb48ba80964 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -671,7 +671,7 @@ static ssize_t natsemi_show_##_name(struct device *dev, \ #define NATSEMI_CREATE_FILE(_dev, _name) \ device_create_file(&_dev->dev, &dev_attr_##_name) #define NATSEMI_REMOVE_FILE(_dev, _name) \ - device_create_file(&_dev->dev, &dev_attr_##_name) + device_remove_file(&_dev->dev, &dev_attr_##_name) NATSEMI_ATTR(dspcfg_workaround); @@ -2357,8 +2357,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) np->rx_dma[entry], buflen, PCI_DMA_FROMDEVICE); - eth_copy_and_sum(skb, - np->rx_skbuff[entry]->data, pkt_len, 0); + skb_copy_to_linear_data(skb, + np->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); pci_dma_sync_single_for_device(np->pci_dev, np->rx_dma[entry], diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index 995c0a5d4066..cfdeaf7aa163 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c @@ -669,10 +669,15 @@ static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state) static int ne2k_pci_resume (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); + int rc; pci_set_power_state(pdev, 0); pci_restore_state(pdev); - pci_enable_device(pdev); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + NS8390_init(dev, 1); netif_device_attach(dev); diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 56f8197b953b..b703ccfe040b 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -54,8 +54,6 @@ static char netxen_nic_driver_string[] = "NetXen Network Driver version " #define NETXEN_ADAPTER_UP_MAGIC 777 #define NETXEN_NIC_PEG_TUNE 0 -u8 nx_p2_id = NX_P2_C0; - #define DMA_32BIT_MASK 0x00000000ffffffffULL #define DMA_35BIT_MASK 0x00000007ffffffffULL @@ -307,8 +305,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_pdev; pci_set_master(pdev); - pci_read_config_byte(pdev, PCI_REVISION_ID, &nx_p2_id); - if (nx_p2_id == NX_P2_C1 && + if (pdev->revision == NX_P2_C1 && (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) && (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) { pci_using_dac = 1; @@ -552,7 +549,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); adapter->ahw.pdev = pdev; adapter->proc_cmd_buf_counter = 0; - adapter->ahw.revision_id = nx_p2_id; + adapter->ahw.revision_id = pdev->revision; /* make sure Window == 1 */ netxen_nic_pci_change_crbwindow(adapter, 1); diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index 3d5b4232f65f..22a3b3dc7d89 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c @@ -670,14 +670,10 @@ static void ni5010_set_multicast_list(struct net_device *dev) PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); - if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI) { + if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { dev->flags |= IFF_PROMISC; outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); - } else if (dev->mc_list) { - /* Sorry, multicast not supported */ - PRINTK((KERN_DEBUG "%s: No multicast, entering broadcast mode\n", dev->name)); - outb(RMD_BROADCAST, EDLC_RMODE); } else { PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name)); outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */ diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index 8dbd6d1900b5..5e7999db2096 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -936,7 +936,7 @@ static void ni52_rcv_int(struct net_device *dev) { skb_reserve(skb,2); skb_put(skb,totlen); - eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); + skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index 3818edf0ac18..4ef5fe345191 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c @@ -1096,7 +1096,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0) #ifdef RCV_VIA_SKB if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { skb_put(skb,len); - eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0); + skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len); } else { struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; @@ -1108,7 +1108,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0) } #else skb_put(skb,len); - eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0); + skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); #endif p->stats.rx_packets++; p->stats.rx_bytes += len; diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 104aab3c957f..ea80e6cb3dec 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -1582,7 +1582,7 @@ static void ns83820_set_multicast(struct net_device *ndev) else and_mask &= ~(RFCR_AAU | RFCR_AAM); - if (ndev->flags & IFF_ALLMULTI) + if (ndev->flags & IFF_ALLMULTI || ndev->mc_count) or_mask |= RFCR_AAM; else and_mask &= ~RFCR_AAM; diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index df8998b4f37e..3cdbe118200b 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c @@ -1567,7 +1567,7 @@ static void netdrv_rx_interrupt (struct net_device *dev, if (skb) { skb_reserve (skb, 2); /* 16 byte align the IP fields. */ - eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); + skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size); skb_put (skb, pkt_size); skb->protocol = eth_type_trans (skb, dev); diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c index 0d1c7a41c9c6..ea9414c4d900 100644 --- a/drivers/net/pcmcia/com20020_cs.c +++ b/drivers/net/pcmcia/com20020_cs.c @@ -147,7 +147,7 @@ static int com20020_probe(struct pcmcia_device *p_dev) DEBUG(0, "com20020_attach()\n"); /* Create new network device */ - info = kmalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); + info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); if (!info) goto fail_alloc_info; @@ -155,7 +155,6 @@ static int com20020_probe(struct pcmcia_device *p_dev) if (!dev) goto fail_alloc_dev; - memset(info, 0, sizeof(struct com20020_dev_t)); lp = dev->priv; lp->timeout = timeout; lp->backplane = backplane; diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c index 4ecb8ca5a992..4eafa4f42cff 100644 --- a/drivers/net/pcmcia/ibmtr_cs.c +++ b/drivers/net/pcmcia/ibmtr_cs.c @@ -146,9 +146,8 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link) DEBUG(0, "ibmtr_attach()\n"); /* Create new token-ring device */ - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; - memset(info,0,sizeof(*info)); dev = alloc_trdev(sizeof(struct tok_info)); if (!dev) { kfree(info); diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 9c171a7390e2..465485a3fbc6 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -1235,9 +1235,9 @@ static void pcnet32_rx_entry(struct net_device *dev, lp->rx_dma_addr[entry], pkt_len, PCI_DMA_FROMDEVICE); - eth_copy_and_sum(skb, + skb_copy_to_linear_data(skb, (unsigned char *)(lp->rx_skbuff[entry]->data), - pkt_len, 0); + pkt_len); pci_dma_sync_single_for_device(lp->pci_dev, lp->rx_dma_addr[entry], pkt_len, diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 596222b260d6..6a5385647911 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -21,6 +21,10 @@ /* Vitesse Extended Control Register 1 */ #define MII_VSC8244_EXT_CON1 0x17 #define MII_VSC8244_EXTCON1_INIT 0x0000 +#define MII_VSC8244_EXTCON1_TX_SKEW_MASK 0x0c00 +#define MII_VSC8244_EXTCON1_RX_SKEW_MASK 0x0300 +#define MII_VSC8244_EXTCON1_TX_SKEW 0x0800 +#define MII_VSC8244_EXTCON1_RX_SKEW 0x0200 /* Vitesse Interrupt Mask Register */ #define MII_VSC8244_IMASK 0x19 @@ -39,7 +43,7 @@ /* Vitesse Auxiliary Control/Status Register */ #define MII_VSC8244_AUX_CONSTAT 0x1c -#define MII_VSC8244_AUXCONSTAT_INIT 0x0004 +#define MII_VSC8244_AUXCONSTAT_INIT 0x0000 #define MII_VSC8244_AUXCONSTAT_DUPLEX 0x0020 #define MII_VSC8244_AUXCONSTAT_SPEED 0x0018 #define MII_VSC8244_AUXCONSTAT_GBIT 0x0010 @@ -51,6 +55,7 @@ MODULE_LICENSE("GPL"); static int vsc824x_config_init(struct phy_device *phydev) { + int extcon; int err; err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT, @@ -58,14 +63,34 @@ static int vsc824x_config_init(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_VSC8244_EXT_CON1, - MII_VSC8244_EXTCON1_INIT); + extcon = phy_read(phydev, MII_VSC8244_EXT_CON1); + + if (extcon < 0) + return err; + + extcon &= ~(MII_VSC8244_EXTCON1_TX_SKEW_MASK | + MII_VSC8244_EXTCON1_RX_SKEW_MASK); + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + extcon |= (MII_VSC8244_EXTCON1_TX_SKEW | + MII_VSC8244_EXTCON1_RX_SKEW); + + err = phy_write(phydev, MII_VSC8244_EXT_CON1, extcon); + return err; } static int vsc824x_ack_interrupt(struct phy_device *phydev) { - int err = phy_read(phydev, MII_VSC8244_ISTAT); + int err = 0; + + /* + * Don't bother to ACK the interrupts if interrupts + * are disabled. The 824x cannot clear the interrupts + * if they are disabled. + */ + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + err = phy_read(phydev, MII_VSC8244_ISTAT); return (err < 0) ? err : 0; } @@ -77,8 +102,19 @@ static int vsc824x_config_intr(struct phy_device *phydev) if (phydev->interrupts == PHY_INTERRUPT_ENABLED) err = phy_write(phydev, MII_VSC8244_IMASK, MII_VSC8244_IMASK_MASK); - else + else { + /* + * The Vitesse PHY cannot clear the interrupt + * once it has disabled them, so we clear them first + */ + err = phy_read(phydev, MII_VSC8244_ISTAT); + + if (err) + return err; + err = phy_write(phydev, MII_VSC8244_IMASK, 0); + } + return err; } diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index caabbc408c34..27f5b904f48e 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -159,12 +159,11 @@ ppp_asynctty_open(struct tty_struct *tty) int err; err = -ENOMEM; - ap = kmalloc(sizeof(*ap), GFP_KERNEL); + ap = kzalloc(sizeof(*ap), GFP_KERNEL); if (ap == 0) goto out; /* initialize the asyncppp structure */ - memset(ap, 0, sizeof(*ap)); ap->tty = tty; ap->mru = PPP_MRU; spin_lock_init(&ap->xmit_lock); diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c index 72c8d6628f58..eb98b661efba 100644 --- a/drivers/net/ppp_deflate.c +++ b/drivers/net/ppp_deflate.c @@ -121,12 +121,11 @@ static void *z_comp_alloc(unsigned char *options, int opt_len) if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) return NULL; - state = kmalloc(sizeof(*state), + state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) return NULL; - memset (state, 0, sizeof (struct ppp_deflate_state)); state->strm.next_in = NULL; state->w_size = w_size; state->strm.workspace = vmalloc(zlib_deflate_workspacesize()); @@ -341,11 +340,10 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len) if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) return NULL; - state = kmalloc(sizeof(*state), GFP_KERNEL); + state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) return NULL; - memset (state, 0, sizeof (struct ppp_deflate_state)); state->w_size = w_size; state->strm.next_out = NULL; state->strm.workspace = kmalloc(zlib_inflate_workspacesize(), diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 3ef0092dc09c..ef3325b69233 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -2684,8 +2684,7 @@ static void __exit ppp_cleanup(void) if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) printk(KERN_ERR "PPP: removing module but units remain!\n"); cardmap_destroy(&all_ppp_units); - if (unregister_chrdev(PPP_MAJOR, "ppp") != 0) - printk(KERN_ERR "PPP: failed to unregister PPP device\n"); + unregister_chrdev(PPP_MAJOR, "ppp"); device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); class_destroy(ppp_class); } diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index d5bdd2574659..f79cf87a2bff 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c @@ -200,11 +200,10 @@ static void *mppe_alloc(unsigned char *options, int optlen) || options[0] != CI_MPPE || options[1] != CILEN_MPPE) goto out; - state = kmalloc(sizeof(*state), GFP_KERNEL); + state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) goto out; - memset(state, 0, sizeof(*state)); state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(state->arc4)) { diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 5918fab38349..ce64032a465a 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -207,13 +207,12 @@ ppp_sync_open(struct tty_struct *tty) struct syncppp *ap; int err; - ap = kmalloc(sizeof(*ap), GFP_KERNEL); + ap = kzalloc(sizeof(*ap), GFP_KERNEL); err = -ENOMEM; if (ap == 0) goto out; /* initialize the syncppp structure */ - memset(ap, 0, sizeof(*ap)); ap->tty = tty; ap->mru = PPP_MRU; spin_lock_init(&ap->xmit_lock); diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c new file mode 100644 index 000000000000..f87176055d0e --- /dev/null +++ b/drivers/net/pppol2tp.c @@ -0,0 +1,2496 @@ +/***************************************************************************** + * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets + * + * PPPoX --- Generic PPP encapsulation socket family + * PPPoL2TP --- PPP over L2TP (RFC 2661) + * + * Version: 1.0.0 + * + * Authors: Martijn van Oosterhout <kleptog@svana.org> + * James Chapman (jchapman@katalix.com) + * Contributors: + * Michal Ostrowski <mostrows@speakeasy.net> + * Arnaldo Carvalho de Melo <acme@xconectiva.com.br> + * David S. Miller (davem@redhat.com) + * + * License: + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +/* This driver handles only L2TP data frames; control frames are handled by a + * userspace application. + * + * To send data in an L2TP session, userspace opens a PPPoL2TP socket and + * attaches it to a bound UDP socket with local tunnel_id / session_id and + * peer tunnel_id / session_id set. Data can then be sent or received using + * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket + * can be read or modified using ioctl() or [gs]etsockopt() calls. + * + * When a PPPoL2TP socket is connected with local and peer session_id values + * zero, the socket is treated as a special tunnel management socket. + * + * Here's example userspace code to create a socket for sending/receiving data + * over an L2TP session:- + * + * struct sockaddr_pppol2tp sax; + * int fd; + * int session_fd; + * + * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP); + * + * sax.sa_family = AF_PPPOX; + * sax.sa_protocol = PX_PROTO_OL2TP; + * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket + * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr; + * sax.pppol2tp.addr.sin_port = addr->sin_port; + * sax.pppol2tp.addr.sin_family = AF_INET; + * sax.pppol2tp.s_tunnel = tunnel_id; + * sax.pppol2tp.s_session = session_id; + * sax.pppol2tp.d_tunnel = peer_tunnel_id; + * sax.pppol2tp.d_session = peer_session_id; + * + * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax)); + * + * A pppd plugin that allows PPP traffic to be carried over L2TP using + * this driver is available from the OpenL2TP project at + * http://openl2tp.sourceforge.net. + */ + +#include <linux/module.h> +#include <linux/version.h> +#include <linux/string.h> +#include <linux/list.h> +#include <asm/uaccess.h> + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/jiffies.h> + +#include <linux/netdevice.h> +#include <linux/net.h> +#include <linux/inetdevice.h> +#include <linux/skbuff.h> +#include <linux/init.h> +#include <linux/ip.h> +#include <linux/udp.h> +#include <linux/if_pppox.h> +#include <linux/if_pppol2tp.h> +#include <net/sock.h> +#include <linux/ppp_channel.h> +#include <linux/ppp_defs.h> +#include <linux/if_ppp.h> +#include <linux/file.h> +#include <linux/hash.h> +#include <linux/sort.h> +#include <linux/proc_fs.h> +#include <net/dst.h> +#include <net/ip.h> +#include <net/udp.h> +#include <net/xfrm.h> + +#include <asm/byteorder.h> +#include <asm/atomic.h> + + +#define PPPOL2TP_DRV_VERSION "V1.0" + +/* L2TP header constants */ +#define L2TP_HDRFLAG_T 0x8000 +#define L2TP_HDRFLAG_L 0x4000 +#define L2TP_HDRFLAG_S 0x0800 +#define L2TP_HDRFLAG_O 0x0200 +#define L2TP_HDRFLAG_P 0x0100 + +#define L2TP_HDR_VER_MASK 0x000F +#define L2TP_HDR_VER 0x0002 + +/* Space for UDP, L2TP and PPP headers */ +#define PPPOL2TP_HEADER_OVERHEAD 40 + +/* Just some random numbers */ +#define L2TP_TUNNEL_MAGIC 0x42114DDA +#define L2TP_SESSION_MAGIC 0x0C04EB7D + +#define PPPOL2TP_HASH_BITS 4 +#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS) + +/* Default trace flags */ +#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0 + +#define PRINTK(_mask, _type, _lvl, _fmt, args...) \ + do { \ + if ((_mask) & (_type)) \ + printk(_lvl "PPPOL2TP: " _fmt, ##args); \ + } while(0) + +/* Number of bytes to build transmit L2TP headers. + * Unfortunately the size is different depending on whether sequence numbers + * are enabled. + */ +#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10 +#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6 + +struct pppol2tp_tunnel; + +/* Describes a session. It is the sk_user_data field in the PPPoL2TP + * socket. Contains information to determine incoming packets and transmit + * outgoing ones. + */ +struct pppol2tp_session +{ + int magic; /* should be + * L2TP_SESSION_MAGIC */ + int owner; /* pid that opened the socket */ + + struct sock *sock; /* Pointer to the session + * PPPoX socket */ + struct sock *tunnel_sock; /* Pointer to the tunnel UDP + * socket */ + + struct pppol2tp_addr tunnel_addr; /* Description of tunnel */ + + struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel + * context */ + + char name[20]; /* "sess xxxxx/yyyyy", where + * x=tunnel_id, y=session_id */ + int mtu; + int mru; + int flags; /* accessed by PPPIOCGFLAGS. + * Unused. */ + unsigned recv_seq:1; /* expect receive packets with + * sequence numbers? */ + unsigned send_seq:1; /* send packets with sequence + * numbers? */ + unsigned lns_mode:1; /* behave as LNS? LAC enables + * sequence numbers under + * control of LNS. */ + int debug; /* bitmask of debug message + * categories */ + int reorder_timeout; /* configured reorder timeout + * (in jiffies) */ + u16 nr; /* session NR state (receive) */ + u16 ns; /* session NR state (send) */ + struct sk_buff_head reorder_q; /* receive reorder queue */ + struct pppol2tp_ioc_stats stats; + struct hlist_node hlist; /* Hash list node */ +}; + +/* The sk_user_data field of the tunnel's UDP socket. It contains info to track + * all the associated sessions so incoming packets can be sorted out + */ +struct pppol2tp_tunnel +{ + int magic; /* Should be L2TP_TUNNEL_MAGIC */ + rwlock_t hlist_lock; /* protect session_hlist */ + struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE]; + /* hashed list of sessions, + * hashed by id */ + int debug; /* bitmask of debug message + * categories */ + char name[12]; /* "tunl xxxxx" */ + struct pppol2tp_ioc_stats stats; + + void (*old_sk_destruct)(struct sock *); + + struct sock *sock; /* Parent socket */ + struct list_head list; /* Keep a list of all open + * prepared sockets */ + + atomic_t ref_count; +}; + +/* Private data stored for received packets in the skb. + */ +struct pppol2tp_skb_cb { + u16 ns; + u16 nr; + u16 has_seq; + u16 length; + unsigned long expires; +}; + +#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)]) + +static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); +static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel); + +static atomic_t pppol2tp_tunnel_count; +static atomic_t pppol2tp_session_count; +static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; +static struct proto_ops pppol2tp_ops; +static LIST_HEAD(pppol2tp_tunnel_list); +static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock); + +/* Helpers to obtain tunnel/session contexts from sockets. + */ +static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk) +{ + struct pppol2tp_session *session; + + if (sk == NULL) + return NULL; + + session = (struct pppol2tp_session *)(sk->sk_user_data); + if (session == NULL) + return NULL; + + BUG_ON(session->magic != L2TP_SESSION_MAGIC); + + return session; +} + +static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk) +{ + struct pppol2tp_tunnel *tunnel; + + if (sk == NULL) + return NULL; + + tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data); + if (tunnel == NULL) + return NULL; + + BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); + + return tunnel; +} + +/* Tunnel reference counts. Incremented per session that is added to + * the tunnel. + */ +static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel) +{ + atomic_inc(&tunnel->ref_count); +} + +static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel) +{ + if (atomic_dec_and_test(&tunnel->ref_count)) + pppol2tp_tunnel_free(tunnel); +} + +/* Session hash list. + * The session_id SHOULD be random according to RFC2661, but several + * L2TP implementations (Cisco and Microsoft) use incrementing + * session_ids. So we do a real hash on the session_id, rather than a + * simple bitmask. + */ +static inline struct hlist_head * +pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id) +{ + unsigned long hash_val = (unsigned long) session_id; + return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)]; +} + +/* Lookup a session by id + */ +static struct pppol2tp_session * +pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id) +{ + struct hlist_head *session_list = + pppol2tp_session_id_hash(tunnel, session_id); + struct pppol2tp_session *session; + struct hlist_node *walk; + + read_lock(&tunnel->hlist_lock); + hlist_for_each_entry(session, walk, session_list, hlist) { + if (session->tunnel_addr.s_session == session_id) { + read_unlock(&tunnel->hlist_lock); + return session; + } + } + read_unlock(&tunnel->hlist_lock); + + return NULL; +} + +/* Lookup a tunnel by id + */ +static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id) +{ + struct pppol2tp_tunnel *tunnel = NULL; + + read_lock(&pppol2tp_tunnel_list_lock); + list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) { + if (tunnel->stats.tunnel_id == tunnel_id) { + read_unlock(&pppol2tp_tunnel_list_lock); + return tunnel; + } + } + read_unlock(&pppol2tp_tunnel_list_lock); + + return NULL; +} + +/***************************************************************************** + * Receive data handling + *****************************************************************************/ + +/* Queue a skb in order. We come here only if the skb has an L2TP sequence + * number. + */ +static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb) +{ + struct sk_buff *skbp; + u16 ns = PPPOL2TP_SKB_CB(skb)->ns; + + spin_lock(&session->reorder_q.lock); + skb_queue_walk(&session->reorder_q, skbp) { + if (PPPOL2TP_SKB_CB(skbp)->ns > ns) { + __skb_insert(skb, skbp->prev, skbp, &session->reorder_q); + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, + "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", + session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns, + skb_queue_len(&session->reorder_q)); + session->stats.rx_oos_packets++; + goto out; + } + } + + __skb_queue_tail(&session->reorder_q, skb); + +out: + spin_unlock(&session->reorder_q.lock); +} + +/* Dequeue a single skb. + */ +static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb) +{ + struct pppol2tp_tunnel *tunnel = session->tunnel; + int length = PPPOL2TP_SKB_CB(skb)->length; + struct sock *session_sock = NULL; + + /* We're about to requeue the skb, so unlink it and return resources + * to its current owner (a socket receive buffer). + */ + skb_unlink(skb, &session->reorder_q); + skb_orphan(skb); + + tunnel->stats.rx_packets++; + tunnel->stats.rx_bytes += length; + session->stats.rx_packets++; + session->stats.rx_bytes += length; + + if (PPPOL2TP_SKB_CB(skb)->has_seq) { + /* Bump our Nr */ + session->nr++; + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, + "%s: updated nr to %hu\n", session->name, session->nr); + } + + /* If the socket is bound, send it in to PPP's input queue. Otherwise + * queue it on the session socket. + */ + session_sock = session->sock; + if (session_sock->sk_state & PPPOX_BOUND) { + struct pppox_sock *po; + PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, + "%s: recv %d byte data frame, passing to ppp\n", + session->name, length); + + /* We need to forget all info related to the L2TP packet + * gathered in the skb as we are going to reuse the same + * skb for the inner packet. + * Namely we need to: + * - reset xfrm (IPSec) information as it applies to + * the outer L2TP packet and not to the inner one + * - release the dst to force a route lookup on the inner + * IP packet since skb->dst currently points to the dst + * of the UDP tunnel + * - reset netfilter information as it doesn't apply + * to the inner packet either + */ + secpath_reset(skb); + dst_release(skb->dst); + skb->dst = NULL; + nf_reset(skb); + + po = pppox_sk(session_sock); + ppp_input(&po->chan, skb); + } else { + PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, + "%s: socket not bound\n", session->name); + + /* Not bound. Nothing we can do, so discard. */ + session->stats.rx_errors++; + kfree_skb(skb); + } + + sock_put(session->sock); +} + +/* Dequeue skbs from the session's reorder_q, subject to packet order. + * Skbs that have been in the queue for too long are simply discarded. + */ +static void pppol2tp_recv_dequeue(struct pppol2tp_session *session) +{ + struct sk_buff *skb; + struct sk_buff *tmp; + + /* If the pkt at the head of the queue has the nr that we + * expect to send up next, dequeue it and any other + * in-sequence packets behind it. + */ + spin_lock(&session->reorder_q.lock); + skb_queue_walk_safe(&session->reorder_q, skb, tmp) { + if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) { + session->stats.rx_seq_discards++; + session->stats.rx_errors++; + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, + "%s: oos pkt %hu len %d discarded (too old), " + "waiting for %hu, reorder_q_len=%d\n", + session->name, PPPOL2TP_SKB_CB(skb)->ns, + PPPOL2TP_SKB_CB(skb)->length, session->nr, + skb_queue_len(&session->reorder_q)); + __skb_unlink(skb, &session->reorder_q); + kfree_skb(skb); + continue; + } + + if (PPPOL2TP_SKB_CB(skb)->has_seq) { + if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) { + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, + "%s: holding oos pkt %hu len %d, " + "waiting for %hu, reorder_q_len=%d\n", + session->name, PPPOL2TP_SKB_CB(skb)->ns, + PPPOL2TP_SKB_CB(skb)->length, session->nr, + skb_queue_len(&session->reorder_q)); + goto out; + } + } + spin_unlock(&session->reorder_q.lock); + pppol2tp_recv_dequeue_skb(session, skb); + spin_lock(&session->reorder_q.lock); + } + +out: + spin_unlock(&session->reorder_q.lock); +} + +/* Internal receive frame. Do the real work of receiving an L2TP data frame + * here. The skb is not on a list when we get here. + * Returns 0 if the packet was a data packet and was successfully passed on. + * Returns 1 if the packet was not a good data packet and could not be + * forwarded. All such packets are passed up to userspace to deal with. + */ +static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb) +{ + struct pppol2tp_session *session = NULL; + struct pppol2tp_tunnel *tunnel; + unsigned char *ptr; + u16 hdrflags; + u16 tunnel_id, session_id; + int length; + struct udphdr *uh; + + tunnel = pppol2tp_sock_to_tunnel(sock); + if (tunnel == NULL) + goto error; + + /* Short packet? */ + if (skb->len < sizeof(struct udphdr)) { + PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, + "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); + goto error; + } + + /* Point to L2TP header */ + ptr = skb->data + sizeof(struct udphdr); + + /* Get L2TP header flags */ + hdrflags = ntohs(*(__be16*)ptr); + + /* Trace packet contents, if enabled */ + if (tunnel->debug & PPPOL2TP_MSG_DATA) { + printk(KERN_DEBUG "%s: recv: ", tunnel->name); + + for (length = 0; length < 16; length++) + printk(" %02X", ptr[length]); + printk("\n"); + } + + /* Get length of L2TP packet */ + uh = (struct udphdr *) skb_transport_header(skb); + length = ntohs(uh->len) - sizeof(struct udphdr); + + /* Too short? */ + if (length < 12) { + PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, + "%s: recv short L2TP packet (len=%d)\n", tunnel->name, length); + goto error; + } + + /* If type is control packet, it is handled by userspace. */ + if (hdrflags & L2TP_HDRFLAG_T) { + PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, + "%s: recv control packet, len=%d\n", tunnel->name, length); + goto error; + } + + /* Skip flags */ + ptr += 2; + + /* If length is present, skip it */ + if (hdrflags & L2TP_HDRFLAG_L) + ptr += 2; + + /* Extract tunnel and session ID */ + tunnel_id = ntohs(*(__be16 *) ptr); + ptr += 2; + session_id = ntohs(*(__be16 *) ptr); + ptr += 2; + + /* Find the session context */ + session = pppol2tp_session_find(tunnel, session_id); + if (!session) { + /* Not found? Pass to userspace to deal with */ + PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, + "%s: no socket found (%hu/%hu). Passing up.\n", + tunnel->name, tunnel_id, session_id); + goto error; + } + sock_hold(session->sock); + + /* The ref count on the socket was increased by the above call since + * we now hold a pointer to the session. Take care to do sock_put() + * when exiting this function from now on... + */ + + /* Handle the optional sequence numbers. If we are the LAC, + * enable/disable sequence numbers under the control of the LNS. If + * no sequence numbers present but we were expecting them, discard + * frame. + */ + if (hdrflags & L2TP_HDRFLAG_S) { + u16 ns, nr; + ns = ntohs(*(__be16 *) ptr); + ptr += 2; + nr = ntohs(*(__be16 *) ptr); + ptr += 2; + + /* Received a packet with sequence numbers. If we're the LNS, + * check if we sre sending sequence numbers and if not, + * configure it so. + */ + if ((!session->lns_mode) && (!session->send_seq)) { + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO, + "%s: requested to enable seq numbers by LNS\n", + session->name); + session->send_seq = -1; + } + + /* Store L2TP info in the skb */ + PPPOL2TP_SKB_CB(skb)->ns = ns; + PPPOL2TP_SKB_CB(skb)->nr = nr; + PPPOL2TP_SKB_CB(skb)->has_seq = 1; + + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, + "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n", + session->name, ns, nr, session->nr); + } else { + /* No sequence numbers. + * If user has configured mandatory sequence numbers, discard. + */ + if (session->recv_seq) { + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING, + "%s: recv data has no seq numbers when required. " + "Discarding\n", session->name); + session->stats.rx_seq_discards++; + session->stats.rx_errors++; + goto discard; + } + + /* If we're the LAC and we're sending sequence numbers, the + * LNS has requested that we no longer send sequence numbers. + * If we're the LNS and we're sending sequence numbers, the + * LAC is broken. Discard the frame. + */ + if ((!session->lns_mode) && (session->send_seq)) { + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO, + "%s: requested to disable seq numbers by LNS\n", + session->name); + session->send_seq = 0; + } else if (session->send_seq) { + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING, + "%s: recv data has no seq numbers when required. " + "Discarding\n", session->name); + session->stats.rx_seq_discards++; + session->stats.rx_errors++; + goto discard; + } + + /* Store L2TP info in the skb */ + PPPOL2TP_SKB_CB(skb)->has_seq = 0; + } + + /* If offset bit set, skip it. */ + if (hdrflags & L2TP_HDRFLAG_O) + ptr += 2 + ntohs(*(__be16 *) ptr); + + skb_pull(skb, ptr - skb->data); + + /* Skip PPP header, if present. In testing, Microsoft L2TP clients + * don't send the PPP header (PPP header compression enabled), but + * other clients can include the header. So we cope with both cases + * here. The PPP header is always FF03 when using L2TP. + * + * Note that skb->data[] isn't dereferenced from a u16 ptr here since + * the field may be unaligned. + */ + if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03)) + skb_pull(skb, 2); + + /* Prepare skb for adding to the session's reorder_q. Hold + * packets for max reorder_timeout or 1 second if not + * reordering. + */ + PPPOL2TP_SKB_CB(skb)->length = length; + PPPOL2TP_SKB_CB(skb)->expires = jiffies + + (session->reorder_timeout ? session->reorder_timeout : HZ); + + /* Add packet to the session's receive queue. Reordering is done here, if + * enabled. Saved L2TP protocol info is stored in skb->sb[]. + */ + if (PPPOL2TP_SKB_CB(skb)->has_seq) { + if (session->reorder_timeout != 0) { + /* Packet reordering enabled. Add skb to session's + * reorder queue, in order of ns. + */ + pppol2tp_recv_queue_skb(session, skb); + } else { + /* Packet reordering disabled. Discard out-of-sequence + * packets + */ + if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) { + session->stats.rx_seq_discards++; + session->stats.rx_errors++; + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, + "%s: oos pkt %hu len %d discarded, " + "waiting for %hu, reorder_q_len=%d\n", + session->name, PPPOL2TP_SKB_CB(skb)->ns, + PPPOL2TP_SKB_CB(skb)->length, session->nr, + skb_queue_len(&session->reorder_q)); + goto discard; + } + skb_queue_tail(&session->reorder_q, skb); + } + } else { + /* No sequence numbers. Add the skb to the tail of the + * reorder queue. This ensures that it will be + * delivered after all previous sequenced skbs. + */ + skb_queue_tail(&session->reorder_q, skb); + } + + /* Try to dequeue as many skbs from reorder_q as we can. */ + pppol2tp_recv_dequeue(session); + + return 0; + +discard: + kfree_skb(skb); + sock_put(session->sock); + + return 0; + +error: + return 1; +} + +/* UDP encapsulation receive handler. See net/ipv4/udp.c. + * Return codes: + * 0 : success. + * <0: error + * >0: skb should be passed up to userspace as UDP. + */ +static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) +{ + struct pppol2tp_tunnel *tunnel; + + tunnel = pppol2tp_sock_to_tunnel(sk); + if (tunnel == NULL) + goto pass_up; + + PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, + "%s: received %d bytes\n", tunnel->name, skb->len); + + if (pppol2tp_recv_core(sk, skb)) + goto pass_up; + + return 0; + +pass_up: + return 1; +} + +/* Receive message. This is the recvmsg for the PPPoL2TP socket. + */ +static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len, + int flags) +{ + int err; + struct sk_buff *skb; + struct sock *sk = sock->sk; + + err = -EIO; + if (sk->sk_state & PPPOX_BOUND) + goto end; + + msg->msg_namelen = 0; + + err = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, + flags & MSG_DONTWAIT, &err); + if (skb) { + err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data, + skb->len); + if (err < 0) + goto do_skb_free; + err = skb->len; + } +do_skb_free: + kfree_skb(skb); +end: + return err; +} + +/************************************************************************ + * Transmit handling + ***********************************************************************/ + +/* Tell how big L2TP headers are for a particular session. This + * depends on whether sequence numbers are being used. + */ +static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session) +{ + if (session->send_seq) + return PPPOL2TP_L2TP_HDR_SIZE_SEQ; + + return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; +} + +/* Build an L2TP header for the session into the buffer provided. + */ +static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session, + void *buf) +{ + __be16 *bufp = buf; + u16 flags = L2TP_HDR_VER; + + if (session->send_seq) + flags |= L2TP_HDRFLAG_S; + + /* Setup L2TP header. + * FIXME: Can this ever be unaligned? Is direct dereferencing of + * 16-bit header fields safe here for all architectures? + */ + *bufp++ = htons(flags); + *bufp++ = htons(session->tunnel_addr.d_tunnel); + *bufp++ = htons(session->tunnel_addr.d_session); + if (session->send_seq) { + *bufp++ = htons(session->ns); + *bufp++ = 0; + session->ns++; + PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, + "%s: updated ns to %hu\n", session->name, session->ns); + } +} + +/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here + * when a user application does a sendmsg() on the session socket. L2TP and + * PPP headers must be inserted into the user's data. + */ +static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, + size_t total_len) +{ + static const unsigned char ppph[2] = { 0xff, 0x03 }; + struct sock *sk = sock->sk; + struct inet_sock *inet; + __wsum csum = 0; + struct sk_buff *skb; + int error; + int hdr_len; + struct pppol2tp_session *session; + struct pppol2tp_tunnel *tunnel; + struct udphdr *uh; + unsigned int len; + + error = -ENOTCONN; + if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) + goto error; + + /* Get session and tunnel contexts */ + error = -EBADF; + session = pppol2tp_sock_to_session(sk); + if (session == NULL) + goto error; + + tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); + if (tunnel == NULL) + goto error; + + /* What header length is configured for this session? */ + hdr_len = pppol2tp_l2tp_header_len(session); + + /* Allocate a socket buffer */ + error = -ENOMEM; + skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) + + sizeof(struct udphdr) + hdr_len + + sizeof(ppph) + total_len, + 0, GFP_KERNEL); + if (!skb) + goto error; + + /* Reserve space for headers. */ + skb_reserve(skb, NET_SKB_PAD); + skb_reset_network_header(skb); + skb_reserve(skb, sizeof(struct iphdr)); + skb_reset_transport_header(skb); + + /* Build UDP header */ + inet = inet_sk(session->tunnel_sock); + uh = (struct udphdr *) skb->data; + uh->source = inet->sport; + uh->dest = inet->dport; + uh->len = htons(hdr_len + sizeof(ppph) + total_len); + uh->check = 0; + skb_put(skb, sizeof(struct udphdr)); + + /* Build L2TP header */ + pppol2tp_build_l2tp_header(session, skb->data); + skb_put(skb, hdr_len); + + /* Add PPP header */ + skb->data[0] = ppph[0]; + skb->data[1] = ppph[1]; + skb_put(skb, 2); + + /* Copy user data into skb */ + error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); + if (error < 0) { + kfree_skb(skb); + goto error; + } + skb_put(skb, total_len); + + /* Calculate UDP checksum if configured to do so */ + if (session->tunnel_sock->sk_no_check != UDP_CSUM_NOXMIT) + csum = udp_csum_outgoing(sk, skb); + + /* Debug */ + if (session->send_seq) + PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, + "%s: send %Zd bytes, ns=%hu\n", session->name, + total_len, session->ns - 1); + else + PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, + "%s: send %Zd bytes\n", session->name, total_len); + + if (session->debug & PPPOL2TP_MSG_DATA) { + int i; + unsigned char *datap = skb->data; + + printk(KERN_DEBUG "%s: xmit:", session->name); + for (i = 0; i < total_len; i++) { + printk(" %02X", *datap++); + if (i == 15) { + printk(" ..."); + break; + } + } + printk("\n"); + } + + /* Queue the packet to IP for output */ + len = skb->len; + error = ip_queue_xmit(skb, 1); + + /* Update stats */ + if (error >= 0) { + tunnel->stats.tx_packets++; + tunnel->stats.tx_bytes += len; + session->stats.tx_packets++; + session->stats.tx_bytes += len; + } else { + tunnel->stats.tx_errors++; + session->stats.tx_errors++; + } + +error: + return error; +} + +/* Transmit function called by generic PPP driver. Sends PPP frame + * over PPPoL2TP socket. + * + * This is almost the same as pppol2tp_sendmsg(), but rather than + * being called with a msghdr from userspace, it is called with a skb + * from the kernel. + * + * The supplied skb from ppp doesn't have enough headroom for the + * insertion of L2TP, UDP and IP headers so we need to allocate more + * headroom in the skb. This will create a cloned skb. But we must be + * careful in the error case because the caller will expect to free + * the skb it supplied, not our cloned skb. So we take care to always + * leave the original skb unfreed if we return an error. + */ +static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) +{ + static const u8 ppph[2] = { 0xff, 0x03 }; + struct sock *sk = (struct sock *) chan->private; + struct sock *sk_tun; + int hdr_len; + struct pppol2tp_session *session; + struct pppol2tp_tunnel *tunnel; + int rc; + int headroom; + int data_len = skb->len; + struct inet_sock *inet; + __wsum csum = 0; + struct sk_buff *skb2 = NULL; + struct udphdr *uh; + unsigned int len; + + if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) + goto abort; + + /* Get session and tunnel contexts from the socket */ + session = pppol2tp_sock_to_session(sk); + if (session == NULL) + goto abort; + + sk_tun = session->tunnel_sock; + if (sk_tun == NULL) + goto abort; + tunnel = pppol2tp_sock_to_tunnel(sk_tun); + if (tunnel == NULL) + goto abort; + + /* What header length is configured for this session? */ + hdr_len = pppol2tp_l2tp_header_len(session); + + /* Check that there's enough headroom in the skb to insert IP, + * UDP and L2TP and PPP headers. If not enough, expand it to + * make room. Note that a new skb (or a clone) is + * allocated. If we return an error from this point on, make + * sure we free the new skb but do not free the original skb + * since that is done by the caller for the error case. + */ + headroom = NET_SKB_PAD + sizeof(struct iphdr) + + sizeof(struct udphdr) + hdr_len + sizeof(ppph); + if (skb_headroom(skb) < headroom) { + skb2 = skb_realloc_headroom(skb, headroom); + if (skb2 == NULL) + goto abort; + } else + skb2 = skb; + + /* Check that the socket has room */ + if (atomic_read(&sk_tun->sk_wmem_alloc) < sk_tun->sk_sndbuf) + skb_set_owner_w(skb2, sk_tun); + else + goto discard; + + /* Setup PPP header */ + skb_push(skb2, sizeof(ppph)); + skb2->data[0] = ppph[0]; + skb2->data[1] = ppph[1]; + + /* Setup L2TP header */ + skb_push(skb2, hdr_len); + pppol2tp_build_l2tp_header(session, skb2->data); + + /* Setup UDP header */ + inet = inet_sk(sk_tun); + skb_push(skb2, sizeof(struct udphdr)); + skb_reset_transport_header(skb2); + uh = (struct udphdr *) skb2->data; + uh->source = inet->sport; + uh->dest = inet->dport; + uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len); + uh->check = 0; + + /* Calculate UDP checksum if configured to do so */ + if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT) + csum = udp_csum_outgoing(sk_tun, skb2); + + /* Debug */ + if (session->send_seq) + PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, + "%s: send %d bytes, ns=%hu\n", session->name, + data_len, session->ns - 1); + else + PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, + "%s: send %d bytes\n", session->name, data_len); + + if (session->debug & PPPOL2TP_MSG_DATA) { + int i; + unsigned char *datap = skb2->data; + + printk(KERN_DEBUG "%s: xmit:", session->name); + for (i = 0; i < data_len; i++) { + printk(" %02X", *datap++); + if (i == 31) { + printk(" ..."); + break; + } + } + printk("\n"); + } + + memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt)); + IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | + IPSKB_REROUTED); + nf_reset(skb2); + + /* Get routing info from the tunnel socket */ + dst_release(skb2->dst); + skb2->dst = sk_dst_get(sk_tun); + + /* Queue the packet to IP for output */ + len = skb2->len; + rc = ip_queue_xmit(skb2, 1); + + /* Update stats */ + if (rc >= 0) { + tunnel->stats.tx_packets++; + tunnel->stats.tx_bytes += len; + session->stats.tx_packets++; + session->stats.tx_bytes += len; + } else { + tunnel->stats.tx_errors++; + session->stats.tx_errors++; + } + + /* Free the original skb */ + kfree_skb(skb); + + return 1; + +discard: + /* Free the new skb. Caller will free original skb. */ + if (skb2 != skb) + kfree_skb(skb2); +abort: + return 0; +} + +/***************************************************************************** + * Session (and tunnel control) socket create/destroy. + *****************************************************************************/ + +/* When the tunnel UDP socket is closed, all the attached sockets need to go + * too. + */ +static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel) +{ + int hash; + struct hlist_node *walk; + struct hlist_node *tmp; + struct pppol2tp_session *session; + struct sock *sk; + + if (tunnel == NULL) + BUG(); + + PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: closing all sessions...\n", tunnel->name); + + write_lock(&tunnel->hlist_lock); + for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) { +again: + hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { + session = hlist_entry(walk, struct pppol2tp_session, hlist); + + sk = session->sock; + + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: closing session\n", session->name); + + hlist_del_init(&session->hlist); + + /* Since we should hold the sock lock while + * doing any unbinding, we need to release the + * lock we're holding before taking that lock. + * Hold a reference to the sock so it doesn't + * disappear as we're jumping between locks. + */ + sock_hold(sk); + write_unlock(&tunnel->hlist_lock); + lock_sock(sk); + + if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { + pppox_unbind_sock(sk); + sk->sk_state = PPPOX_DEAD; + sk->sk_state_change(sk); + } + + /* Purge any queued data */ + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); + skb_queue_purge(&session->reorder_q); + + release_sock(sk); + sock_put(sk); + + /* Now restart from the beginning of this hash + * chain. We always remove a session from the + * list so we are guaranteed to make forward + * progress. + */ + write_lock(&tunnel->hlist_lock); + goto again; + } + } + write_unlock(&tunnel->hlist_lock); +} + +/* Really kill the tunnel. + * Come here only when all sessions have been cleared from the tunnel. + */ +static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) +{ + /* Remove from socket list */ + write_lock(&pppol2tp_tunnel_list_lock); + list_del_init(&tunnel->list); + write_unlock(&pppol2tp_tunnel_list_lock); + + atomic_dec(&pppol2tp_tunnel_count); + kfree(tunnel); +} + +/* Tunnel UDP socket destruct hook. + * The tunnel context is deleted only when all session sockets have been + * closed. + */ +static void pppol2tp_tunnel_destruct(struct sock *sk) +{ + struct pppol2tp_tunnel *tunnel; + + tunnel = pppol2tp_sock_to_tunnel(sk); + if (tunnel == NULL) + goto end; + + PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: closing...\n", tunnel->name); + + /* Close all sessions */ + pppol2tp_tunnel_closeall(tunnel); + + /* No longer an encapsulation socket. See net/ipv4/udp.c */ + (udp_sk(sk))->encap_type = 0; + (udp_sk(sk))->encap_rcv = NULL; + + /* Remove hooks into tunnel socket */ + tunnel->sock = NULL; + sk->sk_destruct = tunnel->old_sk_destruct; + sk->sk_user_data = NULL; + + /* Call original (UDP) socket descructor */ + if (sk->sk_destruct != NULL) + (*sk->sk_destruct)(sk); + + pppol2tp_tunnel_dec_refcount(tunnel); + +end: + return; +} + +/* Really kill the session socket. (Called from sock_put() if + * refcnt == 0.) + */ +static void pppol2tp_session_destruct(struct sock *sk) +{ + struct pppol2tp_session *session = NULL; + + if (sk->sk_user_data != NULL) { + struct pppol2tp_tunnel *tunnel; + + session = pppol2tp_sock_to_session(sk); + if (session == NULL) + goto out; + + /* Don't use pppol2tp_sock_to_tunnel() here to + * get the tunnel context because the tunnel + * socket might have already been closed (its + * sk->sk_user_data will be NULL) so use the + * session's private tunnel ptr instead. + */ + tunnel = session->tunnel; + if (tunnel != NULL) { + BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); + + /* If session_id is zero, this is a null + * session context, which was created for a + * socket that is being used only to manage + * tunnels. + */ + if (session->tunnel_addr.s_session != 0) { + /* Delete the session socket from the + * hash + */ + write_lock(&tunnel->hlist_lock); + hlist_del_init(&session->hlist); + write_unlock(&tunnel->hlist_lock); + + atomic_dec(&pppol2tp_session_count); + } + + /* This will delete the tunnel context if this + * is the last session on the tunnel. + */ + session->tunnel = NULL; + session->tunnel_sock = NULL; + pppol2tp_tunnel_dec_refcount(tunnel); + } + } + + kfree(session); +out: + return; +} + +/* Called when the PPPoX socket (session) is closed. + */ +static int pppol2tp_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int error; + + if (!sk) + return 0; + + error = -EBADF; + lock_sock(sk); + if (sock_flag(sk, SOCK_DEAD) != 0) + goto error; + + pppox_unbind_sock(sk); + + /* Signal the death of the socket. */ + sk->sk_state = PPPOX_DEAD; + sock_orphan(sk); + sock->sk = NULL; + + /* Purge any queued data */ + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); + + release_sock(sk); + + /* This will delete the session context via + * pppol2tp_session_destruct() if the socket's refcnt drops to + * zero. + */ + sock_put(sk); + + return 0; + +error: + release_sock(sk); + return error; +} + +/* Internal function to prepare a tunnel (UDP) socket to have PPPoX + * sockets attached to it. + */ +static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id, + int *error) +{ + int err; + struct socket *sock = NULL; + struct sock *sk; + struct pppol2tp_tunnel *tunnel; + struct sock *ret = NULL; + + /* Get the tunnel UDP socket from the fd, which was opened by + * the userspace L2TP daemon. + */ + err = -EBADF; + sock = sockfd_lookup(fd, &err); + if (!sock) { + PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, + "tunl %hu: sockfd_lookup(fd=%d) returned %d\n", + tunnel_id, fd, err); + goto err; + } + + /* Quick sanity checks */ + err = -ESOCKTNOSUPPORT; + if (sock->type != SOCK_DGRAM) { + PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, + "tunl %hu: fd %d wrong type, got %d, expected %d\n", + tunnel_id, fd, sock->type, SOCK_DGRAM); + goto err; + } + err = -EAFNOSUPPORT; + if (sock->ops->family != AF_INET) { + PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, + "tunl %hu: fd %d wrong family, got %d, expected %d\n", + tunnel_id, fd, sock->ops->family, AF_INET); + goto err; + } + + err = -ENOTCONN; + sk = sock->sk; + + /* Check if this socket has already been prepped */ + tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data; + if (tunnel != NULL) { + /* User-data field already set */ + err = -EBUSY; + BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); + + /* This socket has already been prepped */ + ret = tunnel->sock; + goto out; + } + + /* This socket is available and needs prepping. Create a new tunnel + * context and init it. + */ + sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL); + if (sk->sk_user_data == NULL) { + err = -ENOMEM; + goto err; + } + + tunnel->magic = L2TP_TUNNEL_MAGIC; + sprintf(&tunnel->name[0], "tunl %hu", tunnel_id); + + tunnel->stats.tunnel_id = tunnel_id; + tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS; + + /* Hook on the tunnel socket destructor so that we can cleanup + * if the tunnel socket goes away. + */ + tunnel->old_sk_destruct = sk->sk_destruct; + sk->sk_destruct = &pppol2tp_tunnel_destruct; + + tunnel->sock = sk; + sk->sk_allocation = GFP_ATOMIC; + + /* Misc init */ + rwlock_init(&tunnel->hlist_lock); + + /* Add tunnel to our list */ + INIT_LIST_HEAD(&tunnel->list); + write_lock(&pppol2tp_tunnel_list_lock); + list_add(&tunnel->list, &pppol2tp_tunnel_list); + write_unlock(&pppol2tp_tunnel_list_lock); + atomic_inc(&pppol2tp_tunnel_count); + + /* Bump the reference count. The tunnel context is deleted + * only when this drops to zero. + */ + pppol2tp_tunnel_inc_refcount(tunnel); + + /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ + (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP; + (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv; + + ret = tunnel->sock; + + *error = 0; +out: + if (sock) + sockfd_put(sock); + + return ret; + +err: + *error = err; + goto out; +} + +static struct proto pppol2tp_sk_proto = { + .name = "PPPOL2TP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct pppox_sock), +}; + +/* socket() handler. Initialize a new struct sock. + */ +static int pppol2tp_create(struct socket *sock) +{ + int error = -ENOMEM; + struct sock *sk; + + sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1); + if (!sk) + goto out; + + sock_init_data(sock, sk); + + sock->state = SS_UNCONNECTED; + sock->ops = &pppol2tp_ops; + + sk->sk_backlog_rcv = pppol2tp_recv_core; + sk->sk_protocol = PX_PROTO_OL2TP; + sk->sk_family = PF_PPPOX; + sk->sk_state = PPPOX_NONE; + sk->sk_type = SOCK_STREAM; + sk->sk_destruct = pppol2tp_session_destruct; + + error = 0; + +out: + return error; +} + +/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket + */ +static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, + int sockaddr_len, int flags) +{ + struct sock *sk = sock->sk; + struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; + struct pppox_sock *po = pppox_sk(sk); + struct sock *tunnel_sock = NULL; + struct pppol2tp_session *session = NULL; + struct pppol2tp_tunnel *tunnel; + struct dst_entry *dst; + int error = 0; + + lock_sock(sk); + + error = -EINVAL; + if (sp->sa_protocol != PX_PROTO_OL2TP) + goto end; + + /* Check for already bound sockets */ + error = -EBUSY; + if (sk->sk_state & PPPOX_CONNECTED) + goto end; + + /* We don't supporting rebinding anyway */ + error = -EALREADY; + if (sk->sk_user_data) + goto end; /* socket is already attached */ + + /* Don't bind if s_tunnel is 0 */ + error = -EINVAL; + if (sp->pppol2tp.s_tunnel == 0) + goto end; + + /* Special case: prepare tunnel socket if s_session and + * d_session is 0. Otherwise look up tunnel using supplied + * tunnel id. + */ + if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) { + tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd, + sp->pppol2tp.s_tunnel, + &error); + if (tunnel_sock == NULL) + goto end; + + tunnel = tunnel_sock->sk_user_data; + } else { + tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel); + + /* Error if we can't find the tunnel */ + error = -ENOENT; + if (tunnel == NULL) + goto end; + + tunnel_sock = tunnel->sock; + } + + /* Check that this session doesn't already exist */ + error = -EEXIST; + session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session); + if (session != NULL) + goto end; + + /* Allocate and initialize a new session context. */ + session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL); + if (session == NULL) { + error = -ENOMEM; + goto end; + } + + skb_queue_head_init(&session->reorder_q); + + session->magic = L2TP_SESSION_MAGIC; + session->owner = current->pid; + session->sock = sk; + session->tunnel = tunnel; + session->tunnel_sock = tunnel_sock; + session->tunnel_addr = sp->pppol2tp; + sprintf(&session->name[0], "sess %hu/%hu", + session->tunnel_addr.s_tunnel, + session->tunnel_addr.s_session); + + session->stats.tunnel_id = session->tunnel_addr.s_tunnel; + session->stats.session_id = session->tunnel_addr.s_session; + + INIT_HLIST_NODE(&session->hlist); + + /* Inherit debug options from tunnel */ + session->debug = tunnel->debug; + + /* Default MTU must allow space for UDP/L2TP/PPP + * headers. + */ + session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; + + /* If PMTU discovery was enabled, use the MTU that was discovered */ + dst = sk_dst_get(sk); + if (dst != NULL) { + u32 pmtu = dst_mtu(__sk_dst_get(sk)); + if (pmtu != 0) + session->mtu = session->mru = pmtu - + PPPOL2TP_HEADER_OVERHEAD; + dst_release(dst); + } + + /* Special case: if source & dest session_id == 0x0000, this socket is + * being created to manage the tunnel. Don't add the session to the + * session hash list, just set up the internal context for use by + * ioctl() and sockopt() handlers. + */ + if ((session->tunnel_addr.s_session == 0) && + (session->tunnel_addr.d_session == 0)) { + error = 0; + sk->sk_user_data = session; + goto out_no_ppp; + } + + /* Get tunnel context from the tunnel socket */ + tunnel = pppol2tp_sock_to_tunnel(tunnel_sock); + if (tunnel == NULL) { + error = -EBADF; + goto end; + } + + /* Right now, because we don't have a way to push the incoming skb's + * straight through the UDP layer, the only header we need to worry + * about is the L2TP header. This size is different depending on + * whether sequence numbers are enabled for the data channel. + */ + po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; + + po->chan.private = sk; + po->chan.ops = &pppol2tp_chan_ops; + po->chan.mtu = session->mtu; + + error = ppp_register_channel(&po->chan); + if (error) + goto end; + + /* This is how we get the session context from the socket. */ + sk->sk_user_data = session; + + /* Add session to the tunnel's hash list */ + write_lock(&tunnel->hlist_lock); + hlist_add_head(&session->hlist, + pppol2tp_session_id_hash(tunnel, + session->tunnel_addr.s_session)); + write_unlock(&tunnel->hlist_lock); + + atomic_inc(&pppol2tp_session_count); + +out_no_ppp: + pppol2tp_tunnel_inc_refcount(tunnel); + sk->sk_state = PPPOX_CONNECTED; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: created\n", session->name); + +end: + release_sock(sk); + + if (error != 0) + PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, + "%s: connect failed: %d\n", session->name, error); + + return error; +} + +/* getname() support. + */ +static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, + int *usockaddr_len, int peer) +{ + int len = sizeof(struct sockaddr_pppol2tp); + struct sockaddr_pppol2tp sp; + int error = 0; + struct pppol2tp_session *session; + + error = -ENOTCONN; + if (sock->sk->sk_state != PPPOX_CONNECTED) + goto end; + + session = pppol2tp_sock_to_session(sock->sk); + if (session == NULL) { + error = -EBADF; + goto end; + } + + sp.sa_family = AF_PPPOX; + sp.sa_protocol = PX_PROTO_OL2TP; + memcpy(&sp.pppol2tp, &session->tunnel_addr, + sizeof(struct pppol2tp_addr)); + + memcpy(uaddr, &sp, len); + + *usockaddr_len = len; + + error = 0; + +end: + return error; +} + +/**************************************************************************** + * ioctl() handlers. + * + * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP + * sockets. However, in order to control kernel tunnel features, we allow + * userspace to create a special "tunnel" PPPoX socket which is used for + * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow + * the user application to issue L2TP setsockopt(), getsockopt() and ioctl() + * calls. + ****************************************************************************/ + +/* Session ioctl helper. + */ +static int pppol2tp_session_ioctl(struct pppol2tp_session *session, + unsigned int cmd, unsigned long arg) +{ + struct ifreq ifr; + int err = 0; + struct sock *sk = session->sock; + int val = (int) arg; + + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, + "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", + session->name, cmd, arg); + + sock_hold(sk); + + switch (cmd) { + case SIOCGIFMTU: + err = -ENXIO; + if (!(sk->sk_state & PPPOX_CONNECTED)) + break; + + err = -EFAULT; + if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) + break; + ifr.ifr_mtu = session->mtu; + if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) + break; + + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get mtu=%d\n", session->name, session->mtu); + err = 0; + break; + + case SIOCSIFMTU: + err = -ENXIO; + if (!(sk->sk_state & PPPOX_CONNECTED)) + break; + + err = -EFAULT; + if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) + break; + + session->mtu = ifr.ifr_mtu; + + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: set mtu=%d\n", session->name, session->mtu); + err = 0; + break; + + case PPPIOCGMRU: + err = -ENXIO; + if (!(sk->sk_state & PPPOX_CONNECTED)) + break; + + err = -EFAULT; + if (put_user(session->mru, (int __user *) arg)) + break; + + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get mru=%d\n", session->name, session->mru); + err = 0; + break; + + case PPPIOCSMRU: + err = -ENXIO; + if (!(sk->sk_state & PPPOX_CONNECTED)) + break; + + err = -EFAULT; + if (get_user(val,(int __user *) arg)) + break; + + session->mru = val; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: set mru=%d\n", session->name, session->mru); + err = 0; + break; + + case PPPIOCGFLAGS: + err = -EFAULT; + if (put_user(session->flags, (int __user *) arg)) + break; + + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get flags=%d\n", session->name, session->flags); + err = 0; + break; + + case PPPIOCSFLAGS: + err = -EFAULT; + if (get_user(val, (int __user *) arg)) + break; + session->flags = val; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: set flags=%d\n", session->name, session->flags); + err = 0; + break; + + case PPPIOCGL2TPSTATS: + err = -ENXIO; + if (!(sk->sk_state & PPPOX_CONNECTED)) + break; + + if (copy_to_user((void __user *) arg, &session->stats, + sizeof(session->stats))) + break; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get L2TP stats\n", session->name); + err = 0; + break; + + default: + err = -ENOSYS; + break; + } + + sock_put(sk); + + return err; +} + +/* Tunnel ioctl helper. + * + * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data + * specifies a session_id, the session ioctl handler is called. This allows an + * application to retrieve session stats via a tunnel socket. + */ +static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + struct sock *sk = tunnel->sock; + struct pppol2tp_ioc_stats stats_req; + + PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, + "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name, + cmd, arg); + + sock_hold(sk); + + switch (cmd) { + case PPPIOCGL2TPSTATS: + err = -ENXIO; + if (!(sk->sk_state & PPPOX_CONNECTED)) + break; + + if (copy_from_user(&stats_req, (void __user *) arg, + sizeof(stats_req))) { + err = -EFAULT; + break; + } + if (stats_req.session_id != 0) { + /* resend to session ioctl handler */ + struct pppol2tp_session *session = + pppol2tp_session_find(tunnel, stats_req.session_id); + if (session != NULL) + err = pppol2tp_session_ioctl(session, cmd, arg); + else + err = -EBADR; + break; + } +#ifdef CONFIG_XFRM + tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0; +#endif + if (copy_to_user((void __user *) arg, &tunnel->stats, + sizeof(tunnel->stats))) { + err = -EFAULT; + break; + } + PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get L2TP stats\n", tunnel->name); + err = 0; + break; + + default: + err = -ENOSYS; + break; + } + + sock_put(sk); + + return err; +} + +/* Main ioctl() handler. + * Dispatch to tunnel or session helpers depending on the socket. + */ +static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + struct sock *sk = sock->sk; + struct pppol2tp_session *session; + struct pppol2tp_tunnel *tunnel; + int err; + + if (!sk) + return 0; + + err = -EBADF; + if (sock_flag(sk, SOCK_DEAD) != 0) + goto end; + + err = -ENOTCONN; + if ((sk->sk_user_data == NULL) || + (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)))) + goto end; + + /* Get session context from the socket */ + err = -EBADF; + session = pppol2tp_sock_to_session(sk); + if (session == NULL) + goto end; + + /* Special case: if session's session_id is zero, treat ioctl as a + * tunnel ioctl + */ + if ((session->tunnel_addr.s_session == 0) && + (session->tunnel_addr.d_session == 0)) { + err = -EBADF; + tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); + if (tunnel == NULL) + goto end; + + err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); + goto end; + } + + err = pppol2tp_session_ioctl(session, cmd, arg); + +end: + return err; +} + +/***************************************************************************** + * setsockopt() / getsockopt() support. + * + * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP + * sockets. In order to control kernel tunnel features, we allow userspace to + * create a special "tunnel" PPPoX socket which is used for control only. + * Tunnel PPPoX sockets have session_id == 0 and simply allow the user + * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls. + *****************************************************************************/ + +/* Tunnel setsockopt() helper. + */ +static int pppol2tp_tunnel_setsockopt(struct sock *sk, + struct pppol2tp_tunnel *tunnel, + int optname, int val) +{ + int err = 0; + + switch (optname) { + case PPPOL2TP_SO_DEBUG: + tunnel->debug = val; + PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: set debug=%x\n", tunnel->name, tunnel->debug); + break; + + default: + err = -ENOPROTOOPT; + break; + } + + return err; +} + +/* Session setsockopt helper. + */ +static int pppol2tp_session_setsockopt(struct sock *sk, + struct pppol2tp_session *session, + int optname, int val) +{ + int err = 0; + + switch (optname) { + case PPPOL2TP_SO_RECVSEQ: + if ((val != 0) && (val != 1)) { + err = -EINVAL; + break; + } + session->recv_seq = val ? -1 : 0; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: set recv_seq=%d\n", session->name, + session->recv_seq); + break; + + case PPPOL2TP_SO_SENDSEQ: + if ((val != 0) && (val != 1)) { + err = -EINVAL; + break; + } + session->send_seq = val ? -1 : 0; + { + struct sock *ssk = session->sock; + struct pppox_sock *po = pppox_sk(ssk); + po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : + PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; + } + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: set send_seq=%d\n", session->name, session->send_seq); + break; + + case PPPOL2TP_SO_LNSMODE: + if ((val != 0) && (val != 1)) { + err = -EINVAL; + break; + } + session->lns_mode = val ? -1 : 0; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: set lns_mode=%d\n", session->name, + session->lns_mode); + break; + + case PPPOL2TP_SO_DEBUG: + session->debug = val; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: set debug=%x\n", session->name, session->debug); + break; + + case PPPOL2TP_SO_REORDERTO: + session->reorder_timeout = msecs_to_jiffies(val); + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: set reorder_timeout=%d\n", session->name, + session->reorder_timeout); + break; + + default: + err = -ENOPROTOOPT; + break; + } + + return err; +} + +/* Main setsockopt() entry point. + * Does API checks, then calls either the tunnel or session setsockopt + * handler, according to whether the PPPoL2TP socket is a for a regular + * session or the special tunnel type. + */ +static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, int optlen) +{ + struct sock *sk = sock->sk; + struct pppol2tp_session *session = sk->sk_user_data; + struct pppol2tp_tunnel *tunnel; + int val; + int err; + + if (level != SOL_PPPOL2TP) + return udp_prot.setsockopt(sk, level, optname, optval, optlen); + + if (optlen < sizeof(int)) + return -EINVAL; + + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + err = -ENOTCONN; + if (sk->sk_user_data == NULL) + goto end; + + /* Get session context from the socket */ + err = -EBADF; + session = pppol2tp_sock_to_session(sk); + if (session == NULL) + goto end; + + /* Special case: if session_id == 0x0000, treat as operation on tunnel + */ + if ((session->tunnel_addr.s_session == 0) && + (session->tunnel_addr.d_session == 0)) { + err = -EBADF; + tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); + if (tunnel == NULL) + goto end; + + err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); + } else + err = pppol2tp_session_setsockopt(sk, session, optname, val); + + err = 0; + +end: + return err; +} + +/* Tunnel getsockopt helper. Called with sock locked. + */ +static int pppol2tp_tunnel_getsockopt(struct sock *sk, + struct pppol2tp_tunnel *tunnel, + int optname, int __user *val) +{ + int err = 0; + + switch (optname) { + case PPPOL2TP_SO_DEBUG: + *val = tunnel->debug; + PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get debug=%x\n", tunnel->name, tunnel->debug); + break; + + default: + err = -ENOPROTOOPT; + break; + } + + return err; +} + +/* Session getsockopt helper. Called with sock locked. + */ +static int pppol2tp_session_getsockopt(struct sock *sk, + struct pppol2tp_session *session, + int optname, int __user *val) +{ + int err = 0; + + switch (optname) { + case PPPOL2TP_SO_RECVSEQ: + *val = session->recv_seq; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get recv_seq=%d\n", session->name, *val); + break; + + case PPPOL2TP_SO_SENDSEQ: + *val = session->send_seq; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get send_seq=%d\n", session->name, *val); + break; + + case PPPOL2TP_SO_LNSMODE: + *val = session->lns_mode; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get lns_mode=%d\n", session->name, *val); + break; + + case PPPOL2TP_SO_DEBUG: + *val = session->debug; + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get debug=%d\n", session->name, *val); + break; + + case PPPOL2TP_SO_REORDERTO: + *val = (int) jiffies_to_msecs(session->reorder_timeout); + PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, + "%s: get reorder_timeout=%d\n", session->name, *val); + break; + + default: + err = -ENOPROTOOPT; + } + + return err; +} + +/* Main getsockopt() entry point. + * Does API checks, then calls either the tunnel or session getsockopt + * handler, according to whether the PPPoX socket is a for a regular session + * or the special tunnel type. + */ +static int pppol2tp_getsockopt(struct socket *sock, int level, + int optname, char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct pppol2tp_session *session = sk->sk_user_data; + struct pppol2tp_tunnel *tunnel; + int val, len; + int err; + + if (level != SOL_PPPOL2TP) + return udp_prot.getsockopt(sk, level, optname, optval, optlen); + + if (get_user(len, (int __user *) optlen)) + return -EFAULT; + + len = min_t(unsigned int, len, sizeof(int)); + + if (len < 0) + return -EINVAL; + + err = -ENOTCONN; + if (sk->sk_user_data == NULL) + goto end; + + /* Get the session context */ + err = -EBADF; + session = pppol2tp_sock_to_session(sk); + if (session == NULL) + goto end; + + /* Special case: if session_id == 0x0000, treat as operation on tunnel */ + if ((session->tunnel_addr.s_session == 0) && + (session->tunnel_addr.d_session == 0)) { + err = -EBADF; + tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); + if (tunnel == NULL) + goto end; + + err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); + } else + err = pppol2tp_session_getsockopt(sk, session, optname, &val); + + err = -EFAULT; + if (put_user(len, (int __user *) optlen)) + goto end; + + if (copy_to_user((void __user *) optval, &val, len)) + goto end; + + err = 0; +end: + return err; +} + +/***************************************************************************** + * /proc filesystem for debug + *****************************************************************************/ + +#ifdef CONFIG_PROC_FS + +#include <linux/seq_file.h> + +struct pppol2tp_seq_data { + struct pppol2tp_tunnel *tunnel; /* current tunnel */ + struct pppol2tp_session *session; /* NULL means get first session in tunnel */ +}; + +static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr) +{ + struct pppol2tp_session *session = NULL; + struct hlist_node *walk; + int found = 0; + int next = 0; + int i; + + read_lock(&tunnel->hlist_lock); + for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) { + hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) { + if (curr == NULL) { + found = 1; + goto out; + } + if (session == curr) { + next = 1; + continue; + } + if (next) { + found = 1; + goto out; + } + } + } +out: + read_unlock(&tunnel->hlist_lock); + if (!found) + session = NULL; + + return session; +} + +static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr) +{ + struct pppol2tp_tunnel *tunnel = NULL; + + read_lock(&pppol2tp_tunnel_list_lock); + if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) { + goto out; + } + tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); +out: + read_unlock(&pppol2tp_tunnel_list_lock); + + return tunnel; +} + +static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) +{ + struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; + loff_t pos = *offs; + + if (!pos) + goto out; + + BUG_ON(m->private == NULL); + pd = m->private; + + if (pd->tunnel == NULL) { + if (!list_empty(&pppol2tp_tunnel_list)) + pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list); + } else { + pd->session = next_session(pd->tunnel, pd->session); + if (pd->session == NULL) { + pd->tunnel = next_tunnel(pd->tunnel); + } + } + + /* NULL tunnel and session indicates end of list */ + if ((pd->tunnel == NULL) && (pd->session == NULL)) + pd = NULL; + +out: + return pd; +} + +static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) +{ + (*pos)++; + return NULL; +} + +static void pppol2tp_seq_stop(struct seq_file *p, void *v) +{ + /* nothing to do */ +} + +static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) +{ + struct pppol2tp_tunnel *tunnel = v; + + seq_printf(m, "\nTUNNEL '%s', %c %d\n", + tunnel->name, + (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N', + atomic_read(&tunnel->ref_count) - 1); + seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", + tunnel->debug, + tunnel->stats.tx_packets, tunnel->stats.tx_bytes, + tunnel->stats.tx_errors, + tunnel->stats.rx_packets, tunnel->stats.rx_bytes, + tunnel->stats.rx_errors); +} + +static void pppol2tp_seq_session_show(struct seq_file *m, void *v) +{ + struct pppol2tp_session *session = v; + + seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " + "%04X/%04X %d %c\n", + session->name, + ntohl(session->tunnel_addr.addr.sin_addr.s_addr), + ntohs(session->tunnel_addr.addr.sin_port), + session->tunnel_addr.s_tunnel, + session->tunnel_addr.s_session, + session->tunnel_addr.d_tunnel, + session->tunnel_addr.d_session, + session->sock->sk_state, + (session == session->sock->sk_user_data) ? + 'Y' : 'N'); + seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", + session->mtu, session->mru, + session->recv_seq ? 'R' : '-', + session->send_seq ? 'S' : '-', + session->lns_mode ? "LNS" : "LAC", + session->debug, + jiffies_to_msecs(session->reorder_timeout)); + seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", + session->nr, session->ns, + session->stats.tx_packets, + session->stats.tx_bytes, + session->stats.tx_errors, + session->stats.rx_packets, + session->stats.rx_bytes, + session->stats.rx_errors); +} + +static int pppol2tp_seq_show(struct seq_file *m, void *v) +{ + struct pppol2tp_seq_data *pd = v; + + /* display header on line 1 */ + if (v == SEQ_START_TOKEN) { + seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n"); + seq_puts(m, "TUNNEL name, user-data-ok session-count\n"); + seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); + seq_puts(m, " SESSION name, addr/port src-tid/sid " + "dest-tid/sid state user-data-ok\n"); + seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n"); + seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); + goto out; + } + + /* Show the tunnel or session context. + */ + if (pd->session == NULL) + pppol2tp_seq_tunnel_show(m, pd->tunnel); + else + pppol2tp_seq_session_show(m, pd->session); + +out: + return 0; +} + +static struct seq_operations pppol2tp_seq_ops = { + .start = pppol2tp_seq_start, + .next = pppol2tp_seq_next, + .stop = pppol2tp_seq_stop, + .show = pppol2tp_seq_show, +}; + +/* Called when our /proc file is opened. We allocate data for use when + * iterating our tunnel / session contexts and store it in the private + * data of the seq_file. + */ +static int pppol2tp_proc_open(struct inode *inode, struct file *file) +{ + struct seq_file *m; + struct pppol2tp_seq_data *pd; + int ret = 0; + + ret = seq_open(file, &pppol2tp_seq_ops); + if (ret < 0) + goto out; + + m = file->private_data; + + /* Allocate and fill our proc_data for access later */ + ret = -ENOMEM; + m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL); + if (m->private == NULL) + goto out; + + pd = m->private; + ret = 0; + +out: + return ret; +} + +/* Called when /proc file access completes. + */ +static int pppol2tp_proc_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = (struct seq_file *)file->private_data; + + kfree(m->private); + m->private = NULL; + + return seq_release(inode, file); +} + +static struct file_operations pppol2tp_proc_fops = { + .owner = THIS_MODULE, + .open = pppol2tp_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = pppol2tp_proc_release, +}; + +static struct proc_dir_entry *pppol2tp_proc; + +#endif /* CONFIG_PROC_FS */ + +/***************************************************************************** + * Init and cleanup + *****************************************************************************/ + +static struct proto_ops pppol2tp_ops = { + .family = AF_PPPOX, + .owner = THIS_MODULE, + .release = pppol2tp_release, + .bind = sock_no_bind, + .connect = pppol2tp_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = pppol2tp_getname, + .poll = datagram_poll, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = pppol2tp_setsockopt, + .getsockopt = pppol2tp_getsockopt, + .sendmsg = pppol2tp_sendmsg, + .recvmsg = pppol2tp_recvmsg, + .mmap = sock_no_mmap, + .ioctl = pppox_ioctl, +}; + +static struct pppox_proto pppol2tp_proto = { + .create = pppol2tp_create, + .ioctl = pppol2tp_ioctl +}; + +static int __init pppol2tp_init(void) +{ + int err; + + err = proto_register(&pppol2tp_sk_proto, 0); + if (err) + goto out; + err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto); + if (err) + goto out_unregister_pppol2tp_proto; + +#ifdef CONFIG_PROC_FS + pppol2tp_proc = create_proc_entry("pppol2tp", 0, proc_net); + if (!pppol2tp_proc) { + err = -ENOMEM; + goto out_unregister_pppox_proto; + } + pppol2tp_proc->proc_fops = &pppol2tp_proc_fops; +#endif /* CONFIG_PROC_FS */ + printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", + PPPOL2TP_DRV_VERSION); + +out: + return err; + +out_unregister_pppox_proto: + unregister_pppox_proto(PX_PROTO_OL2TP); +out_unregister_pppol2tp_proto: + proto_unregister(&pppol2tp_sk_proto); + goto out; +} + +static void __exit pppol2tp_exit(void) +{ + unregister_pppox_proto(PX_PROTO_OL2TP); + +#ifdef CONFIG_PROC_FS + remove_proc_entry("pppol2tp", proc_net); +#endif + proto_unregister(&pppol2tp_sk_proto); +} + +module_init(pppol2tp_init); +module_exit(pppol2tp_exit); + +MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>," + "James Chapman <jchapman@katalix.com>"); +MODULE_DESCRIPTION("PPP over L2TP over UDP"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PPPOL2TP_DRV_VERSION); diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 982a9010c7a9..bb6896ae3151 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2338,7 +2338,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, { struct skb_shared_info *info = skb_shinfo(skb); unsigned int cur_frag, entry; - struct TxDesc *txd; + struct TxDesc * uninitialized_var(txd); entry = tp->cur_tx; for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index fa29a403a247..afef6c0c59fe 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -796,12 +796,14 @@ static void free_shared_mem(struct s2io_nic *nic) struct mac_info *mac_control; struct config_param *config; int lst_size, lst_per_page; - struct net_device *dev = nic->dev; + struct net_device *dev; int page_num = 0; if (!nic) return; + dev = nic->dev; + mac_control = &nic->mac_control; config = &nic->config; @@ -1135,7 +1137,7 @@ static int init_nic(struct s2io_nic *nic) * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. */ if ((nic->device_type == XFRAME_I_DEVICE) && - (get_xena_rev_id(nic->pdev) < 4)) + (nic->pdev->revision < 4)) writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable); val64 = readq(&bar0->tx_fifo_partition_0); @@ -1873,7 +1875,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag) herc = (sp->device_type == XFRAME_II_DEVICE); if (flag == FALSE) { - if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) { + if ((!herc && (sp->pdev->revision >= 4)) || herc) { if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE)) ret = 1; } else { @@ -1881,7 +1883,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag) ret = 1; } } else { - if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) { + if ((!herc && (sp->pdev->revision >= 4)) || herc) { if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) == ADAPTER_STATUS_RMAC_PCC_IDLE)) ret = 1; @@ -7076,23 +7078,6 @@ static void s2io_link(struct s2io_nic * sp, int link) } /** - * get_xena_rev_id - to identify revision ID of xena. - * @pdev : PCI Dev structure - * Description: - * Function to identify the Revision ID of xena. - * Return value: - * returns the revision ID of the device. - */ - -static int get_xena_rev_id(struct pci_dev *pdev) -{ - u8 id = 0; - int ret; - ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id); - return id; -} - -/** * s2io_init_pci -Initialization of PCI and PCI-X configuration registers . * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. @@ -7550,7 +7535,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) s2io_vpd_read(sp); DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n"); DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, - sp->product_name, get_xena_rev_id(sp->pdev)); + sp->product_name, pdev->revision); DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, s2io_driver_version); DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 58592780f519..3887fe63a908 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -1033,7 +1033,6 @@ static void s2io_set_link(struct work_struct *work); static int s2io_set_swapper(struct s2io_nic * sp); static void s2io_card_down(struct s2io_nic *nic); static int s2io_card_up(struct s2io_nic *nic); -static int get_xena_rev_id(struct pci_dev *pdev); static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, int bit_state); static int s2io_add_isr(struct s2io_nic * sp); diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c index ad94358ece89..7dae4d404978 100644 --- a/drivers/net/saa9730.c +++ b/drivers/net/saa9730.c @@ -690,9 +690,9 @@ static int lan_saa9730_rx(struct net_device *dev) lp->stats.rx_packets++; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ - eth_copy_and_sum(skb, + skb_copy_to_linear_data(skb, (unsigned char *) pData, - len, 0); + len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; @@ -940,15 +940,14 @@ static void lan_saa9730_set_multicast(struct net_device *dev) CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC, &lp->lan_saa9730_regs->CamCtl); } else { - if (dev->flags & IFF_ALLMULTI) { + if (dev->flags & IFF_ALLMULTI || dev->mc_count) { /* accept all multicast packets */ - writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC | - CAM_CONTROL_BROAD_ACC, - &lp->lan_saa9730_regs->CamCtl); - } else { /* * Will handle the multicast stuff later. -carstenl */ + writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC | + CAM_CONTROL_BROAD_ACC, + &lp->lan_saa9730_regs->CamCtl); } } diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index 2106becf6990..384b4685e977 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -320,7 +320,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp skb_put(skb, len); /* Copy out of kseg1 to avoid silly cache flush. */ - eth_copy_and_sum(skb, pkt_pointer + 2, len, 0); + skb_copy_to_linear_data(skb, pkt_pointer + 2, len); skb->protocol = eth_type_trans(skb, dev); /* We don't want to receive our own packets */ diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c index e886e8d7cfdf..4c3d98ff4cd4 100644 --- a/drivers/net/shaper.c +++ b/drivers/net/shaper.c @@ -600,10 +600,9 @@ static int __init shaper_init(void) return -ENODEV; alloc_size = sizeof(*dev) * shapers; - devs = kmalloc(alloc_size, GFP_KERNEL); + devs = kzalloc(alloc_size, GFP_KERNEL); if (!devs) return -ENOMEM; - memset(devs, 0, alloc_size); for (i = 0; i < shapers; i++) { diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index bc8de48da313..ec2ad9f0efa2 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c @@ -548,7 +548,7 @@ static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); if (skb) { skb_reserve(skb, NET_IP_ALIGN); - eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); + skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); *sk_buff = skb; sis190_give_to_asic(desc, rx_buf_sz); ret = 0; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index a2f32151559e..13f08a390e1f 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -692,6 +692,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) { struct sky2_port *sky2 = netdev_priv(hw->dev[port]); u16 reg; + u32 rx_reg; int i; const u8 *addr = hw->dev[port]->dev_addr; @@ -768,11 +769,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) /* Configure Rx MAC FIFO */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); - reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; if (hw->chip_id == CHIP_ID_YUKON_EX) - reg |= GMF_RX_OVER_ON; + rx_reg |= GMF_RX_OVER_ON; - sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), reg); + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); /* Flush Rx MAC FIFO on any flow control or error */ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 786d4b9c07ec..8b6478663a56 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -740,7 +740,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, pci_set_master(pdev); /* enable MWI -- it vastly improves Rx performance on sparc64 */ - pci_set_mwi(pdev); + pci_try_set_mwi(pdev); #ifdef ZEROCOPY /* Starfire can do TCP/UDP checksumming */ @@ -1456,7 +1456,7 @@ static int __netdev_rx(struct net_device *dev, int *quota) pci_dma_sync_single_for_cpu(np->pci_dev, np->rx_info[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); - eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0); + skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len); pci_dma_sync_single_for_device(np->pci_dev, np->rx_info[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index a123ea87893b..b77ab6e8fd35 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -777,7 +777,7 @@ static void sun3_82586_rcv_int(struct net_device *dev) { skb_reserve(skb,2); skb_put(skb,totlen); - eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0); + skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); p->stats.rx_packets++; diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index 791e081fdc15..f1548c033327 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c @@ -853,10 +853,9 @@ static int lance_rx( struct net_device *dev ) skb_reserve( skb, 2 ); /* 16 byte align */ skb_put( skb, pkt_len ); /* Make room */ -// skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len); - eth_copy_and_sum(skb, + skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), - pkt_len, 0); + pkt_len); skb->protocol = eth_type_trans( skb, dev ); netif_rx( skb ); diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index 2ad8d58dee3b..b3e0158def4f 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c @@ -860,7 +860,7 @@ static void bigmac_rx(struct bigmac *bp) sbus_dma_sync_single_for_cpu(bp->bigmac_sdev, this->rx_addr, len, SBUS_DMA_FROMDEVICE); - eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0); + skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); sbus_dma_sync_single_for_device(bp->bigmac_sdev, this->rx_addr, len, SBUS_DMA_FROMDEVICE); diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index e1f912d04043..af0c9831074c 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -397,7 +397,6 @@ struct netdev_private { unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ struct pci_dev *pci_dev; void __iomem *base; - unsigned char pci_rev_id; }; /* The station address location in the EEPROM. */ @@ -544,8 +543,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, dev->change_mtu = &change_mtu; pci_set_drvdata(pdev, dev); - pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id); - i = register_netdev(dev); if (i) goto err_out_unmap_rx; @@ -828,7 +825,7 @@ static int netdev_open(struct net_device *dev) iowrite8(100, ioaddr + RxDMAPollPeriod); iowrite8(127, ioaddr + TxDMAPollPeriod); /* Fix DFE-580TX packet drop issue */ - if (np->pci_rev_id >= 0x14) + if (np->pci_dev->revision >= 0x14) iowrite8(0x01, ioaddr + DebugCtrl1); netif_start_queue(dev); @@ -1194,7 +1191,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) hw_frame_id = ioread8(ioaddr + TxFrameId); } - if (np->pci_rev_id >= 0x14) { + if (np->pci_dev->revision >= 0x14) { spin_lock(&np->lock); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; @@ -1313,7 +1310,7 @@ static void rx_poll(unsigned long data) np->rx_buf_sz, PCI_DMA_FROMDEVICE); - eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); + skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); pci_dma_sync_single_for_device(np->pci_dev, desc->frag[0].addr, np->rx_buf_sz, diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 15146a119230..8b35f13318ea 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -3095,12 +3095,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, #ifdef CONFIG_SPARC hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); - if (hp->hm_revision == 0xff) { - unsigned char prev; - - pci_read_config_byte(pdev, PCI_REVISION_ID, &prev); - hp->hm_revision = 0xc0 | (prev & 0x0f); - } + if (hp->hm_revision == 0xff) + hp->hm_revision = 0xc0 | (pdev->revision & 0x0f); #else /* works with this on non-sparc hosts */ hp->hm_revision = 0x20; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 42722530ab24..053b7cb0d944 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -549,9 +549,9 @@ static void lance_rx_dvma(struct net_device *dev) skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ - eth_copy_and_sum(skb, + skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [entry][0]), - len, 0); + len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index fa70e0b78af7..1b65ae8a1c7c 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -439,8 +439,8 @@ static void qe_rx(struct sunqe *qep) } else { skb_reserve(skb, 2); skb_put(skb, len); - eth_copy_and_sum(skb, (unsigned char *) this_qbuf, - len, 0); + skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, + len); skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); qep->dev->last_rx = jiffies; diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c new file mode 100644 index 000000000000..61f98251feab --- /dev/null +++ b/drivers/net/sunvnet.c @@ -0,0 +1,1295 @@ +/* sunvnet.c: Sun LDOM Virtual Network Driver. + * + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/mutex.h> + +#include <asm/vio.h> +#include <asm/ldc.h> + +#include "sunvnet.h" + +#define DRV_MODULE_NAME "sunvnet" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "1.0" +#define DRV_MODULE_RELDATE "June 25, 2007" + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("Sun LDOM virtual network driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +/* Ordered from largest major to lowest */ +static struct vio_version vnet_versions[] = { + { .major = 1, .minor = 0 }, +}; + +static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) +{ + return vio_dring_avail(dr, VNET_TX_RING_SIZE); +} + +static int vnet_handle_unknown(struct vnet_port *port, void *arg) +{ + struct vio_msg_tag *pkt = arg; + + printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", + pkt->type, pkt->stype, pkt->stype_env, pkt->sid); + printk(KERN_ERR PFX "Resetting connection.\n"); + + ldc_disconnect(port->vio.lp); + + return -ECONNRESET; +} + +static int vnet_send_attr(struct vio_driver_state *vio) +{ + struct vnet_port *port = to_vnet_port(vio); + struct net_device *dev = port->vp->dev; + struct vio_net_attr_info pkt; + int i; + + memset(&pkt, 0, sizeof(pkt)); + pkt.tag.type = VIO_TYPE_CTRL; + pkt.tag.stype = VIO_SUBTYPE_INFO; + pkt.tag.stype_env = VIO_ATTR_INFO; + pkt.tag.sid = vio_send_sid(vio); + pkt.xfer_mode = VIO_DRING_MODE; + pkt.addr_type = VNET_ADDR_ETHERMAC; + pkt.ack_freq = 0; + for (i = 0; i < 6; i++) + pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); + pkt.mtu = ETH_FRAME_LEN; + + viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " + "ackfreq[%u] mtu[%llu]\n", + pkt.xfer_mode, pkt.addr_type, + (unsigned long long) pkt.addr, + pkt.ack_freq, + (unsigned long long) pkt.mtu); + + return vio_ldc_send(vio, &pkt, sizeof(pkt)); +} + +static int handle_attr_info(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] " + "ackfreq[%u] mtu[%llu]\n", + pkt->xfer_mode, pkt->addr_type, + (unsigned long long) pkt->addr, + pkt->ack_freq, + (unsigned long long) pkt->mtu); + + pkt->tag.sid = vio_send_sid(vio); + + if (pkt->xfer_mode != VIO_DRING_MODE || + pkt->addr_type != VNET_ADDR_ETHERMAC || + pkt->mtu != ETH_FRAME_LEN) { + viodbg(HS, "SEND NET ATTR NACK\n"); + + pkt->tag.stype = VIO_SUBTYPE_NACK; + + (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); + + return -ECONNRESET; + } else { + viodbg(HS, "SEND NET ATTR ACK\n"); + + pkt->tag.stype = VIO_SUBTYPE_ACK; + + return vio_ldc_send(vio, pkt, sizeof(*pkt)); + } + +} + +static int handle_attr_ack(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + viodbg(HS, "GOT NET ATTR ACK\n"); + + return 0; +} + +static int handle_attr_nack(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + viodbg(HS, "GOT NET ATTR NACK\n"); + + return -ECONNRESET; +} + +static int vnet_handle_attr(struct vio_driver_state *vio, void *arg) +{ + struct vio_net_attr_info *pkt = arg; + + switch (pkt->tag.stype) { + case VIO_SUBTYPE_INFO: + return handle_attr_info(vio, pkt); + + case VIO_SUBTYPE_ACK: + return handle_attr_ack(vio, pkt); + + case VIO_SUBTYPE_NACK: + return handle_attr_nack(vio, pkt); + + default: + return -ECONNRESET; + } +} + +static void vnet_handshake_complete(struct vio_driver_state *vio) +{ + struct vio_dring_state *dr; + + dr = &vio->drings[VIO_DRIVER_RX_RING]; + dr->snd_nxt = dr->rcv_nxt = 1; + + dr = &vio->drings[VIO_DRIVER_TX_RING]; + dr->snd_nxt = dr->rcv_nxt = 1; +} + +/* The hypervisor interface that implements copying to/from imported + * memory from another domain requires that copies are done to 8-byte + * aligned buffers, and that the lengths of such copies are also 8-byte + * multiples. + * + * So we align skb->data to an 8-byte multiple and pad-out the data + * area so we can round the copy length up to the next multiple of + * 8 for the copy. + * + * The transmitter puts the actual start of the packet 6 bytes into + * the buffer it sends over, so that the IP headers after the ethernet + * header are aligned properly. These 6 bytes are not in the descriptor + * length, they are simply implied. This offset is represented using + * the VNET_PACKET_SKIP macro. + */ +static struct sk_buff *alloc_and_align_skb(struct net_device *dev, + unsigned int len) +{ + struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); + unsigned long addr, off; + + if (unlikely(!skb)) + return NULL; + + addr = (unsigned long) skb->data; + off = ((addr + 7UL) & ~7UL) - addr; + if (off) + skb_reserve(skb, off); + + return skb; +} + +static int vnet_rx_one(struct vnet_port *port, unsigned int len, + struct ldc_trans_cookie *cookies, int ncookies) +{ + struct net_device *dev = port->vp->dev; + unsigned int copy_len; + struct sk_buff *skb; + int err; + + err = -EMSGSIZE; + if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) { + dev->stats.rx_length_errors++; + goto out_dropped; + } + + skb = alloc_and_align_skb(dev, len); + err = -ENOMEM; + if (unlikely(!skb)) { + dev->stats.rx_missed_errors++; + goto out_dropped; + } + + copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; + skb_put(skb, copy_len); + err = ldc_copy(port->vio.lp, LDC_COPY_IN, + skb->data, copy_len, 0, + cookies, ncookies); + if (unlikely(err < 0)) { + dev->stats.rx_frame_errors++; + goto out_free_skb; + } + + skb_pull(skb, VNET_PACKET_SKIP); + skb_trim(skb, len); + skb->protocol = eth_type_trans(skb, dev); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + + netif_rx(skb); + + return 0; + +out_free_skb: + kfree_skb(skb); + +out_dropped: + dev->stats.rx_dropped++; + return err; +} + +static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, + u32 start, u32 end, u8 vio_dring_state) +{ + struct vio_dring_data hdr = { + .tag = { + .type = VIO_TYPE_DATA, + .stype = VIO_SUBTYPE_ACK, + .stype_env = VIO_DRING_DATA, + .sid = vio_send_sid(&port->vio), + }, + .dring_ident = dr->ident, + .start_idx = start, + .end_idx = end, + .state = vio_dring_state, + }; + int err, delay; + + hdr.seq = dr->snd_nxt; + delay = 1; + do { + err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); + if (err > 0) { + dr->snd_nxt++; + break; + } + udelay(delay); + if ((delay <<= 1) > 128) + delay = 128; + } while (err == -EAGAIN); + + return err; +} + +static u32 next_idx(u32 idx, struct vio_dring_state *dr) +{ + if (++idx == dr->num_entries) + idx = 0; + return idx; +} + +static u32 prev_idx(u32 idx, struct vio_dring_state *dr) +{ + if (idx == 0) + idx = dr->num_entries - 1; + else + idx--; + + return idx; +} + +static struct vio_net_desc *get_rx_desc(struct vnet_port *port, + struct vio_dring_state *dr, + u32 index) +{ + struct vio_net_desc *desc = port->vio.desc_buf; + int err; + + err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, + (index * dr->entry_size), + dr->cookies, dr->ncookies); + if (err < 0) + return ERR_PTR(err); + + return desc; +} + +static int put_rx_desc(struct vnet_port *port, + struct vio_dring_state *dr, + struct vio_net_desc *desc, + u32 index) +{ + int err; + + err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, + (index * dr->entry_size), + dr->cookies, dr->ncookies); + if (err < 0) + return err; + + return 0; +} + +static int vnet_walk_rx_one(struct vnet_port *port, + struct vio_dring_state *dr, + u32 index, int *needs_ack) +{ + struct vio_net_desc *desc = get_rx_desc(port, dr, index); + struct vio_driver_state *vio = &port->vio; + int err; + + if (IS_ERR(desc)) + return PTR_ERR(desc); + + viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%lx:%lx]\n", + desc->hdr.state, desc->hdr.ack, + desc->size, desc->ncookies, + desc->cookies[0].cookie_addr, + desc->cookies[0].cookie_size); + + if (desc->hdr.state != VIO_DESC_READY) + return 1; + err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); + if (err == -ECONNRESET) + return err; + desc->hdr.state = VIO_DESC_DONE; + err = put_rx_desc(port, dr, desc, index); + if (err < 0) + return err; + *needs_ack = desc->hdr.ack; + return 0; +} + +static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, + u32 start, u32 end) +{ + struct vio_driver_state *vio = &port->vio; + int ack_start = -1, ack_end = -1; + + end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); + + viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); + + while (start != end) { + int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); + if (err == -ECONNRESET) + return err; + if (err != 0) + break; + if (ack_start == -1) + ack_start = start; + ack_end = start; + start = next_idx(start, dr); + if (ack && start != end) { + err = vnet_send_ack(port, dr, ack_start, ack_end, + VIO_DRING_ACTIVE); + if (err == -ECONNRESET) + return err; + ack_start = -1; + } + } + if (unlikely(ack_start == -1)) + ack_start = ack_end = prev_idx(start, dr); + return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); +} + +static int vnet_rx(struct vnet_port *port, void *msgbuf) +{ + struct vio_dring_data *pkt = msgbuf; + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; + struct vio_driver_state *vio = &port->vio; + + viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016lx] rcv_nxt[%016lx]\n", + pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); + + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) + return 0; + if (unlikely(pkt->seq != dr->rcv_nxt)) { + printk(KERN_ERR PFX "RX out of sequence seq[0x%lx] " + "rcv_nxt[0x%lx]\n", pkt->seq, dr->rcv_nxt); + return 0; + } + + dr->rcv_nxt++; + + /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ + + return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx); +} + +static int idx_is_pending(struct vio_dring_state *dr, u32 end) +{ + u32 idx = dr->cons; + int found = 0; + + while (idx != dr->prod) { + if (idx == end) { + found = 1; + break; + } + idx = next_idx(idx, dr); + } + return found; +} + +static int vnet_ack(struct vnet_port *port, void *msgbuf) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data *pkt = msgbuf; + struct net_device *dev; + struct vnet *vp; + u32 end; + + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) + return 0; + + end = pkt->end_idx; + if (unlikely(!idx_is_pending(dr, end))) + return 0; + + dr->cons = next_idx(end, dr); + + vp = port->vp; + dev = vp->dev; + if (unlikely(netif_queue_stopped(dev) && + vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) + return 1; + + return 0; +} + +static int vnet_nack(struct vnet_port *port, void *msgbuf) +{ + /* XXX just reset or similar XXX */ + return 0; +} + +static int handle_mcast(struct vnet_port *port, void *msgbuf) +{ + struct vio_net_mcast_info *pkt = msgbuf; + + if (pkt->tag.stype != VIO_SUBTYPE_ACK) + printk(KERN_ERR PFX "%s: Got unexpected MCAST reply " + "[%02x:%02x:%04x:%08x]\n", + port->vp->dev->name, + pkt->tag.type, + pkt->tag.stype, + pkt->tag.stype_env, + pkt->tag.sid); + + return 0; +} + +static void maybe_tx_wakeup(struct vnet *vp) +{ + struct net_device *dev = vp->dev; + + netif_tx_lock(dev); + if (likely(netif_queue_stopped(dev))) { + struct vnet_port *port; + int wake = 1; + + list_for_each_entry(port, &vp->port_list, list) { + struct vio_dring_state *dr; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + if (vnet_tx_dring_avail(dr) < + VNET_TX_WAKEUP_THRESH(dr)) { + wake = 0; + break; + } + } + if (wake) + netif_wake_queue(dev); + } + netif_tx_unlock(dev); +} + +static void vnet_event(void *arg, int event) +{ + struct vnet_port *port = arg; + struct vio_driver_state *vio = &port->vio; + unsigned long flags; + int tx_wakeup, err; + + spin_lock_irqsave(&vio->lock, flags); + + if (unlikely(event == LDC_EVENT_RESET || + event == LDC_EVENT_UP)) { + vio_link_state_change(vio, event); + spin_unlock_irqrestore(&vio->lock, flags); + + if (event == LDC_EVENT_RESET) + vio_port_up(vio); + return; + } + + if (unlikely(event != LDC_EVENT_DATA_READY)) { + printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); + spin_unlock_irqrestore(&vio->lock, flags); + return; + } + + tx_wakeup = err = 0; + while (1) { + union { + struct vio_msg_tag tag; + u64 raw[8]; + } msgbuf; + + err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); + if (unlikely(err < 0)) { + if (err == -ECONNRESET) + vio_conn_reset(vio); + break; + } + if (err == 0) + break; + viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", + msgbuf.tag.type, + msgbuf.tag.stype, + msgbuf.tag.stype_env, + msgbuf.tag.sid); + err = vio_validate_sid(vio, &msgbuf.tag); + if (err < 0) + break; + + if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { + if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { + err = vnet_rx(port, &msgbuf); + } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { + err = vnet_ack(port, &msgbuf); + if (err > 0) + tx_wakeup |= err; + } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { + err = vnet_nack(port, &msgbuf); + } + } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { + if (msgbuf.tag.stype_env == VNET_MCAST_INFO) + err = handle_mcast(port, &msgbuf); + else + err = vio_control_pkt_engine(vio, &msgbuf); + if (err) + break; + } else { + err = vnet_handle_unknown(port, &msgbuf); + } + if (err == -ECONNRESET) + break; + } + spin_unlock(&vio->lock); + if (unlikely(tx_wakeup && err != -ECONNRESET)) + maybe_tx_wakeup(port->vp); + local_irq_restore(flags); +} + +static int __vnet_tx_trigger(struct vnet_port *port) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data hdr = { + .tag = { + .type = VIO_TYPE_DATA, + .stype = VIO_SUBTYPE_INFO, + .stype_env = VIO_DRING_DATA, + .sid = vio_send_sid(&port->vio), + }, + .dring_ident = dr->ident, + .start_idx = dr->prod, + .end_idx = (u32) -1, + }; + int err, delay; + + hdr.seq = dr->snd_nxt; + delay = 1; + do { + err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); + if (err > 0) { + dr->snd_nxt++; + break; + } + udelay(delay); + if ((delay <<= 1) > 128) + delay = 128; + } while (err == -EAGAIN); + + return err; +} + +struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) +{ + unsigned int hash = vnet_hashfn(skb->data); + struct hlist_head *hp = &vp->port_hash[hash]; + struct hlist_node *n; + struct vnet_port *port; + + hlist_for_each_entry(port, n, hp, hash) { + if (!compare_ether_addr(port->raddr, skb->data)) + return port; + } + port = NULL; + if (!list_empty(&vp->port_list)) + port = list_entry(vp->port_list.next, struct vnet_port, list); + + return port; +} + +struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) +{ + struct vnet_port *ret; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + ret = __tx_port_find(vp, skb); + spin_unlock_irqrestore(&vp->lock, flags); + + return ret; +} + +static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port = tx_port_find(vp, skb); + struct vio_dring_state *dr; + struct vio_net_desc *d; + unsigned long flags; + unsigned int len; + void *tx_buf; + int i, err; + + if (unlikely(!port)) + goto out_dropped; + + spin_lock_irqsave(&port->vio.lock, flags); + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + if (unlikely(vnet_tx_dring_avail(dr) < 2)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + + /* This is a hard error, log it. */ + printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " + "queue awake!\n", dev->name); + dev->stats.tx_errors++; + } + spin_unlock_irqrestore(&port->vio.lock, flags); + return NETDEV_TX_BUSY; + } + + d = vio_dring_cur(dr); + + tx_buf = port->tx_bufs[dr->prod].buf; + skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len); + + len = skb->len; + if (len < ETH_ZLEN) { + len = ETH_ZLEN; + memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); + } + + d->hdr.ack = VIO_ACK_ENABLE; + d->size = len; + d->ncookies = port->tx_bufs[dr->prod].ncookies; + for (i = 0; i < d->ncookies; i++) + d->cookies[i] = port->tx_bufs[dr->prod].cookies[i]; + + /* This has to be a non-SMP write barrier because we are writing + * to memory which is shared with the peer LDOM. + */ + wmb(); + + d->hdr.state = VIO_DESC_READY; + + err = __vnet_tx_trigger(port); + if (unlikely(err < 0)) { + printk(KERN_INFO PFX "%s: TX trigger error %d\n", + dev->name, err); + d->hdr.state = VIO_DESC_FREE; + dev->stats.tx_carrier_errors++; + goto out_dropped_unlock; + } + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); + if (unlikely(vnet_tx_dring_avail(dr) < 2)) { + netif_stop_queue(dev); + if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) + netif_wake_queue(dev); + } + + spin_unlock_irqrestore(&port->vio.lock, flags); + + dev_kfree_skb(skb); + + dev->trans_start = jiffies; + return NETDEV_TX_OK; + +out_dropped_unlock: + spin_unlock_irqrestore(&port->vio.lock, flags); + +out_dropped: + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +static void vnet_tx_timeout(struct net_device *dev) +{ + /* XXX Implement me XXX */ +} + +static int vnet_open(struct net_device *dev) +{ + netif_carrier_on(dev); + netif_start_queue(dev); + + return 0; +} + +static int vnet_close(struct net_device *dev) +{ + netif_stop_queue(dev); + netif_carrier_off(dev); + + return 0; +} + +static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) +{ + struct vnet_mcast_entry *m; + + for (m = vp->mcast_list; m; m = m->next) { + if (!memcmp(m->addr, addr, ETH_ALEN)) + return m; + } + return NULL; +} + +static void __update_mc_list(struct vnet *vp, struct net_device *dev) +{ + struct dev_addr_list *p; + + for (p = dev->mc_list; p; p = p->next) { + struct vnet_mcast_entry *m; + + m = __vnet_mc_find(vp, p->dmi_addr); + if (m) { + m->hit = 1; + continue; + } + + if (!m) { + m = kzalloc(sizeof(*m), GFP_ATOMIC); + if (!m) + continue; + memcpy(m->addr, p->dmi_addr, ETH_ALEN); + m->hit = 1; + + m->next = vp->mcast_list; + vp->mcast_list = m; + } + } +} + +static void __send_mc_list(struct vnet *vp, struct vnet_port *port) +{ + struct vio_net_mcast_info info; + struct vnet_mcast_entry *m, **pp; + int n_addrs; + + memset(&info, 0, sizeof(info)); + + info.tag.type = VIO_TYPE_CTRL; + info.tag.stype = VIO_SUBTYPE_INFO; + info.tag.stype_env = VNET_MCAST_INFO; + info.tag.sid = vio_send_sid(&port->vio); + info.set = 1; + + n_addrs = 0; + for (m = vp->mcast_list; m; m = m->next) { + if (m->sent) + continue; + m->sent = 1; + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + + (void) vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + } + if (n_addrs) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + } + + info.set = 0; + + n_addrs = 0; + pp = &vp->mcast_list; + while ((m = *pp) != NULL) { + if (m->hit) { + m->hit = 0; + pp = &m->next; + continue; + } + + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + + *pp = m->next; + kfree(m); + } + if (n_addrs) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + } +} + +static void vnet_set_rx_mode(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + if (!list_empty(&vp->port_list)) { + port = list_entry(vp->port_list.next, struct vnet_port, list); + + if (port->switch_port) { + __update_mc_list(vp, dev); + __send_mc_list(vp, port); + } + } + spin_unlock_irqrestore(&vp->lock, flags); +} + +static int vnet_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu != ETH_DATA_LEN) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +static int vnet_set_mac_addr(struct net_device *dev, void *p) +{ + return -EINVAL; +} + +static void vnet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); +} + +static u32 vnet_get_msglevel(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + return vp->msg_enable; +} + +static void vnet_set_msglevel(struct net_device *dev, u32 value) +{ + struct vnet *vp = netdev_priv(dev); + vp->msg_enable = value; +} + +static const struct ethtool_ops vnet_ethtool_ops = { + .get_drvinfo = vnet_get_drvinfo, + .get_msglevel = vnet_get_msglevel, + .set_msglevel = vnet_set_msglevel, + .get_link = ethtool_op_get_link, + .get_perm_addr = ethtool_op_get_perm_addr, +}; + +static void vnet_port_free_tx_bufs(struct vnet_port *port) +{ + struct vio_dring_state *dr; + int i; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + if (dr->base) { + ldc_free_exp_dring(port->vio.lp, dr->base, + (dr->entry_size * dr->num_entries), + dr->cookies, dr->ncookies); + dr->base = NULL; + dr->entry_size = 0; + dr->num_entries = 0; + dr->pending = 0; + dr->ncookies = 0; + } + + for (i = 0; i < VNET_TX_RING_SIZE; i++) { + void *buf = port->tx_bufs[i].buf; + + if (!buf) + continue; + + ldc_unmap(port->vio.lp, + port->tx_bufs[i].cookies, + port->tx_bufs[i].ncookies); + + kfree(buf); + port->tx_bufs[i].buf = NULL; + } +} + +static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port) +{ + struct vio_dring_state *dr; + unsigned long len; + int i, err, ncookies; + void *dring; + + for (i = 0; i < VNET_TX_RING_SIZE; i++) { + void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL); + int map_len = (ETH_FRAME_LEN + 7) & ~7; + + err = -ENOMEM; + if (!buf) { + printk(KERN_ERR "TX buffer allocation failure\n"); + goto err_out; + } + err = -EFAULT; + if ((unsigned long)buf & (8UL - 1)) { + printk(KERN_ERR "TX buffer misaligned\n"); + kfree(buf); + goto err_out; + } + + err = ldc_map_single(port->vio.lp, buf, map_len, + port->tx_bufs[i].cookies, 2, + (LDC_MAP_SHADOW | + LDC_MAP_DIRECT | + LDC_MAP_RW)); + if (err < 0) { + kfree(buf); + goto err_out; + } + port->tx_bufs[i].buf = buf; + port->tx_bufs[i].ncookies = err; + } + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + len = (VNET_TX_RING_SIZE * + (sizeof(struct vio_net_desc) + + (sizeof(struct ldc_trans_cookie) * 2))); + + ncookies = VIO_MAX_RING_COOKIES; + dring = ldc_alloc_exp_dring(port->vio.lp, len, + dr->cookies, &ncookies, + (LDC_MAP_SHADOW | + LDC_MAP_DIRECT | + LDC_MAP_RW)); + if (IS_ERR(dring)) { + err = PTR_ERR(dring); + goto err_out; + } + + dr->base = dring; + dr->entry_size = (sizeof(struct vio_net_desc) + + (sizeof(struct ldc_trans_cookie) * 2)); + dr->num_entries = VNET_TX_RING_SIZE; + dr->prod = dr->cons = 0; + dr->pending = VNET_TX_RING_SIZE; + dr->ncookies = ncookies; + + return 0; + +err_out: + vnet_port_free_tx_bufs(port); + + return err; +} + +static LIST_HEAD(vnet_list); +static DEFINE_MUTEX(vnet_list_mutex); + +static struct vnet * __devinit vnet_new(const u64 *local_mac) +{ + struct net_device *dev; + struct vnet *vp; + int err, i; + + dev = alloc_etherdev(sizeof(*vp)); + if (!dev) { + printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; + + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + vp = netdev_priv(dev); + + spin_lock_init(&vp->lock); + vp->dev = dev; + + INIT_LIST_HEAD(&vp->port_list); + for (i = 0; i < VNET_PORT_HASH_SIZE; i++) + INIT_HLIST_HEAD(&vp->port_hash[i]); + INIT_LIST_HEAD(&vp->list); + vp->local_mac = *local_mac; + + dev->open = vnet_open; + dev->stop = vnet_close; + dev->set_multicast_list = vnet_set_rx_mode; + dev->set_mac_address = vnet_set_mac_addr; + dev->tx_timeout = vnet_tx_timeout; + dev->ethtool_ops = &vnet_ethtool_ops; + dev->watchdog_timeo = VNET_TX_TIMEOUT; + dev->change_mtu = vnet_change_mtu; + dev->hard_start_xmit = vnet_start_xmit; + + err = register_netdev(dev); + if (err) { + printk(KERN_ERR PFX "Cannot register net device, " + "aborting.\n"); + goto err_out_free_dev; + } + + printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); + + for (i = 0; i < 6; i++) + printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + + list_add(&vp->list, &vnet_list); + + return vp; + +err_out_free_dev: + free_netdev(dev); + + return ERR_PTR(err); +} + +static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac) +{ + struct vnet *iter, *vp; + + mutex_lock(&vnet_list_mutex); + vp = NULL; + list_for_each_entry(iter, &vnet_list, list) { + if (iter->local_mac == *local_mac) { + vp = iter; + break; + } + } + if (!vp) + vp = vnet_new(local_mac); + mutex_unlock(&vnet_list_mutex); + + return vp; +} + +static const char *local_mac_prop = "local-mac-address"; + +static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp, + u64 port_node) +{ + const u64 *local_mac = NULL; + u64 a; + + mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { + u64 target = mdesc_arc_target(hp, a); + const char *name; + + name = mdesc_get_property(hp, target, "name", NULL); + if (!name || strcmp(name, "network")) + continue; + + local_mac = mdesc_get_property(hp, target, + local_mac_prop, NULL); + if (local_mac) + break; + } + if (!local_mac) + return ERR_PTR(-ENODEV); + + return vnet_find_or_create(local_mac); +} + +static struct ldc_channel_config vnet_ldc_cfg = { + .event = vnet_event, + .mtu = 64, + .mode = LDC_MODE_UNRELIABLE, +}; + +static struct vio_driver_ops vnet_vio_ops = { + .send_attr = vnet_send_attr, + .handle_attr = vnet_handle_attr, + .handshake_complete = vnet_handshake_complete, +}; + +static void print_version(void) +{ + static int version_printed; + + if (version_printed++ == 0) + printk(KERN_INFO "%s", version); +} + +const char *remote_macaddr_prop = "remote-mac-address"; + +static int __devinit vnet_port_probe(struct vio_dev *vdev, + const struct vio_device_id *id) +{ + struct mdesc_handle *hp; + struct vnet_port *port; + unsigned long flags; + struct vnet *vp; + const u64 *rmac; + int len, i, err, switch_port; + + print_version(); + + hp = mdesc_grab(); + + vp = vnet_find_parent(hp, vdev->mp); + if (IS_ERR(vp)) { + printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); + err = PTR_ERR(vp); + goto err_out_put_mdesc; + } + + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); + err = -ENODEV; + if (!rmac) { + printk(KERN_ERR PFX "Port lacks %s property.\n", + remote_macaddr_prop); + goto err_out_put_mdesc; + } + + port = kzalloc(sizeof(*port), GFP_KERNEL); + err = -ENOMEM; + if (!port) { + printk(KERN_ERR PFX "Cannot allocate vnet_port.\n"); + goto err_out_put_mdesc; + } + + for (i = 0; i < ETH_ALEN; i++) + port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; + + port->vp = vp; + + err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, + vnet_versions, ARRAY_SIZE(vnet_versions), + &vnet_vio_ops, vp->dev->name); + if (err) + goto err_out_free_port; + + err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port); + if (err) + goto err_out_free_port; + + err = vnet_port_alloc_tx_bufs(port); + if (err) + goto err_out_free_ldc; + + INIT_HLIST_NODE(&port->hash); + INIT_LIST_HEAD(&port->list); + + switch_port = 0; + if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) + switch_port = 1; + port->switch_port = switch_port; + + spin_lock_irqsave(&vp->lock, flags); + if (switch_port) + list_add(&port->list, &vp->port_list); + else + list_add_tail(&port->list, &vp->port_list); + hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); + spin_unlock_irqrestore(&vp->lock, flags); + + dev_set_drvdata(&vdev->dev, port); + + printk(KERN_INFO "%s: PORT ( remote-mac ", vp->dev->name); + for (i = 0; i < 6; i++) + printk("%2.2x%c", port->raddr[i], i == 5 ? ' ' : ':'); + if (switch_port) + printk("switch-port "); + printk(")\n"); + + vio_port_up(&port->vio); + + mdesc_release(hp); + + return 0; + +err_out_free_ldc: + vio_ldc_free(&port->vio); + +err_out_free_port: + kfree(port); + +err_out_put_mdesc: + mdesc_release(hp); + return err; +} + +static int vnet_port_remove(struct vio_dev *vdev) +{ + struct vnet_port *port = dev_get_drvdata(&vdev->dev); + + if (port) { + struct vnet *vp = port->vp; + unsigned long flags; + + del_timer_sync(&port->vio.timer); + + spin_lock_irqsave(&vp->lock, flags); + list_del(&port->list); + hlist_del(&port->hash); + spin_unlock_irqrestore(&vp->lock, flags); + + vnet_port_free_tx_bufs(port); + vio_ldc_free(&port->vio); + + dev_set_drvdata(&vdev->dev, NULL); + + kfree(port); + } + return 0; +} + +static struct vio_device_id vnet_port_match[] = { + { + .type = "vnet-port", + }, + {}, +}; +MODULE_DEVICE_TABLE(vio, vnet_port_match); + +static struct vio_driver vnet_port_driver = { + .id_table = vnet_port_match, + .probe = vnet_port_probe, + .remove = vnet_port_remove, + .driver = { + .name = "vnet_port", + .owner = THIS_MODULE, + } +}; + +static int __init vnet_init(void) +{ + return vio_register_driver(&vnet_port_driver); +} + +static void __exit vnet_exit(void) +{ + vio_unregister_driver(&vnet_port_driver); +} + +module_init(vnet_init); +module_exit(vnet_exit); diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h new file mode 100644 index 000000000000..d347a5bf24b0 --- /dev/null +++ b/drivers/net/sunvnet.h @@ -0,0 +1,83 @@ +#ifndef _SUNVNET_H +#define _SUNVNET_H + +#define DESC_NCOOKIES(entry_size) \ + ((entry_size) - sizeof(struct vio_net_desc)) + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define VNET_TX_TIMEOUT (5 * HZ) + +#define VNET_TX_RING_SIZE 512 +#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) + +/* VNET packets are sent in buffers with the first 6 bytes skipped + * so that after the ethernet header the IPv4/IPv6 headers are aligned + * properly. + */ +#define VNET_PACKET_SKIP 6 + +struct vnet_tx_entry { + void *buf; + unsigned int ncookies; + struct ldc_trans_cookie cookies[2]; +}; + +struct vnet; +struct vnet_port { + struct vio_driver_state vio; + + struct hlist_node hash; + u8 raddr[ETH_ALEN]; + u8 switch_port; + u8 __pad; + + struct vnet *vp; + + struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; + + struct list_head list; +}; + +static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) +{ + return container_of(vio, struct vnet_port, vio); +} + +#define VNET_PORT_HASH_SIZE 16 +#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1) + +static inline unsigned int vnet_hashfn(u8 *mac) +{ + unsigned int val = mac[4] ^ mac[5]; + + return val & (VNET_PORT_HASH_MASK); +} + +struct vnet_mcast_entry { + u8 addr[ETH_ALEN]; + u8 sent; + u8 hit; + struct vnet_mcast_entry *next; +}; + +struct vnet { + /* Protects port_list and port_hash. */ + spinlock_t lock; + + struct net_device *dev; + + u32 msg_enable; + + struct list_head port_list; + + struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; + + struct vnet_mcast_entry *mcast_list; + + struct list_head list; + u64 local_mac; +}; + +#endif /* _SUNVNET_H */ diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 75655add3f34..7f94ca930988 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -626,7 +626,7 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) return -ENODEV; } #else -static int __devinit tc35815_read_plat_dev_addr(struct device *dev) +static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) { return -ENODEV; } diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 2f3184184ad9..887b9a5cfe48 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -64,8 +64,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.77" -#define DRV_MODULE_RELDATE "May 31, 2007" +#define DRV_MODULE_VERSION "3.79" +#define DRV_MODULE_RELDATE "July 18, 2007" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -721,6 +721,44 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val) return ret; } +static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) +{ + u32 phy; + + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || + (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 ephy; + + if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) { + tg3_writephy(tp, MII_TG3_EPHY_TEST, + ephy | MII_TG3_EPHY_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) { + if (enable) + phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX; + else + phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX; + tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy); + } + tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy); + } + } else { + phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | + MII_TG3_AUXCTL_SHDWSEL_MISC; + if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && + !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { + if (enable) + phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + else + phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + phy |= MII_TG3_AUXCTL_MISC_WREN; + tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); + } + } +} + static void tg3_phy_set_wirespeed(struct tg3 *tp) { u32 val; @@ -1045,23 +1083,11 @@ out: } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 phy_reg; - /* adjust output voltage */ tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12); - - if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) { - u32 phy_reg2; - - tg3_writephy(tp, MII_TG3_EPHY_TEST, - phy_reg | MII_TG3_EPHY_SHADOW_EN); - /* Enable auto-MDIX */ - if (!tg3_readphy(tp, 0x10, &phy_reg2)) - tg3_writephy(tp, 0x10, phy_reg2 | 0x4000); - tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg); - } } + tg3_phy_toggle_automdix(tp, 1); tg3_phy_set_wirespeed(tp); return 0; } @@ -1162,6 +1188,19 @@ static void tg3_frob_aux_power(struct tg3 *tp) } } +static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) +{ + if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) + return 1; + else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) { + if (speed != SPEED_10) + return 1; + } else if (speed == SPEED_10) + return 1; + + return 0; +} + static int tg3_setup_phy(struct tg3 *, int); #define RESET_KIND_SHUTDOWN 0 @@ -1320,9 +1359,17 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) else mac_mode = MAC_MODE_PORT_MODE_MII; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 || - !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)) - mac_mode |= MAC_MODE_LINK_POLARITY; + mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700) { + u32 speed = (tp->tg3_flags & + TG3_FLAG_WOL_SPEED_100MB) ? + SPEED_100 : SPEED_10; + if (tg3_5700_link_polarity(tp, speed)) + mac_mode |= MAC_MODE_LINK_POLARITY; + else + mac_mode &= ~MAC_MODE_LINK_POLARITY; + } } else { mac_mode = MAC_MODE_PORT_MODE_TBI; } @@ -1990,15 +2037,12 @@ relink: if (tp->link_config.active_duplex == DUPLEX_HALF) tp->mac_mode |= MAC_MODE_HALF_DUPLEX; - tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { - if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) || - (current_link_up == 1 && - tp->link_config.active_speed == SPEED_10)) - tp->mac_mode |= MAC_MODE_LINK_POLARITY; - } else { - if (current_link_up == 1) + if (current_link_up == 1 && + tg3_5700_link_polarity(tp, tp->link_config.active_speed)) tp->mac_mode |= MAC_MODE_LINK_POLARITY; + else + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; } /* ??? Without this setting Netgear GA302T PHY does not @@ -2639,6 +2683,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); } out: @@ -2698,10 +2745,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) else current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); - tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); - tp->hw_status->status = (SD_STATUS_UPDATED | (tp->hw_status->status & ~SD_STATUS_LINK_CHG)); @@ -3512,9 +3555,9 @@ static inline int tg3_irq_sync(struct tg3 *tp) */ static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) { + spin_lock_bh(&tp->lock); if (irq_sync) tg3_irq_quiesce(tp); - spin_lock_bh(&tp->lock); } static inline void tg3_full_unlock(struct tg3 *tp) @@ -4804,6 +4847,59 @@ static int tg3_poll_fw(struct tg3 *tp) return 0; } +/* Save PCI command register before chip reset */ +static void tg3_save_pci_state(struct tg3 *tp) +{ + u32 val; + + pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val); + tp->pci_cmd = val; +} + +/* Restore PCI state after chip reset */ +static void tg3_restore_pci_state(struct tg3 *tp) +{ + u32 val; + + /* Re-enable indirect register accesses. */ + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + /* Set MAX PCI retry to zero. */ + val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) + val |= PCISTATE_RETRY_SAME_DMA; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); + + pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd); + + /* Make sure PCI-X relaxed ordering bit is clear. */ + pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val); + val &= ~PCIX_CAPS_RELAXED_ORDERING; + pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); + + if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { + u32 val; + + /* Chip reset on 5780 will reset MSI enable bit, + * so need to restore it. + */ + if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + u16 ctrl; + + pci_read_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + &ctrl); + pci_write_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + ctrl | PCI_MSI_FLAGS_ENABLE); + val = tr32(MSGINT_MODE); + tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); + } + } +} + static void tg3_stop_fw(struct tg3 *); /* tp->lock is held. */ @@ -4820,6 +4916,12 @@ static int tg3_chip_reset(struct tg3 *tp) */ tp->nvram_lock_cnt = 0; + /* GRC_MISC_CFG core clock reset will clear the memory + * enable bit in PCI register 4 and the MSI enable bit + * on some chips, so we save relevant registers here. + */ + tg3_save_pci_state(tp); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) @@ -4918,50 +5020,14 @@ static int tg3_chip_reset(struct tg3 *tp) pci_write_config_dword(tp->pdev, 0xd8, 0xf5000); } - /* Re-enable indirect register accesses. */ - pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); - - /* Set MAX PCI retry to zero. */ - val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); - if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && - (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) - val |= PCISTATE_RETRY_SAME_DMA; - pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); - - pci_restore_state(tp->pdev); + tg3_restore_pci_state(tp); tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; - /* Make sure PCI-X relaxed ordering bit is clear. */ - pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val); - val &= ~PCIX_CAPS_RELAXED_ORDERING; - pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); - - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { - u32 val; - - /* Chip reset on 5780 will reset MSI enable bit, - * so need to restore it. - */ - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { - u16 ctrl; - - pci_read_config_word(tp->pdev, - tp->msi_cap + PCI_MSI_FLAGS, - &ctrl); - pci_write_config_word(tp->pdev, - tp->msi_cap + PCI_MSI_FLAGS, - ctrl | PCI_MSI_FLAGS_ENABLE); - val = tr32(MSGINT_MODE); - tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); - } - + val = 0; + if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) val = tr32(MEMARB_MODE); - tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); - - } else - tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { tg3_stop_fw(tp); @@ -6444,6 +6510,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && + !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); udelay(40); @@ -8271,7 +8341,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) - ethtool_op_set_tx_hw_csum(dev, data); + ethtool_op_set_tx_ipv6_csum(dev, data); else ethtool_op_set_tx_csum(dev, data); @@ -8805,7 +8875,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) return 0; mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | - MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY; + MAC_MODE_PORT_INT_LPBACK; + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) + mac_mode |= MAC_MODE_LINK_POLARITY; if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) mac_mode |= MAC_MODE_PORT_MODE_MII; else @@ -8824,19 +8896,18 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) phytest | MII_TG3_EPHY_SHADOW_EN); if (!tg3_readphy(tp, 0x1b, &phy)) tg3_writephy(tp, 0x1b, phy & ~0x20); - if (!tg3_readphy(tp, 0x10, &phy)) - tg3_writephy(tp, 0x10, phy & ~0x4000); tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest); } val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; } else val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; + tg3_phy_toggle_automdix(tp, 0); + tg3_writephy(tp, MII_BMCR, val); udelay(40); - mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | - MAC_MODE_LINK_POLARITY; + mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800); mac_mode |= MAC_MODE_PORT_MODE_MII; @@ -8849,8 +8920,11 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) udelay(10); tw32_f(MAC_RX_MODE, tp->rx_mode); } - if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { - mac_mode &= ~MAC_MODE_LINK_POLARITY; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) + mac_mode &= ~MAC_MODE_LINK_POLARITY; + else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) + mac_mode |= MAC_MODE_LINK_POLARITY; tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_LNK3_LED_MODE); } @@ -9116,10 +9190,10 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ __tg3_set_rx_mode(dev); - tg3_full_unlock(tp); - if (netif_running(dev)) tg3_netif_start(tp); + + tg3_full_unlock(tp); } #endif @@ -9410,11 +9484,13 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) case FLASH_5755VENDOR_ATMEL_FLASH_1: case FLASH_5755VENDOR_ATMEL_FLASH_2: case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_5: tp->nvram_jedecnum = JEDEC_ATMEL; tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; tp->tg3_flags2 |= TG3_FLG2_FLASH; tp->nvram_pagesize = 264; - if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1) + if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || + nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) tp->nvram_size = (protect ? 0x3e200 : 0x80000); else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) tp->nvram_size = (protect ? 0x1f200 : 0x40000); @@ -10498,11 +10574,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) continue; } if (pci_id->rev != PCI_ANY_ID) { - u8 rev; - - pci_read_config_byte(bridge, PCI_REVISION_ID, - &rev); - if (rev > pci_id->rev) + if (bridge->revision > pci_id->rev) continue; } if (bridge->subordinate && @@ -11929,7 +12001,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, */ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { - pci_save_state(tp->pdev); tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); } @@ -11944,12 +12015,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, * checksumming. */ if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) - dev->features |= NETIF_F_HW_CSUM; - else - dev->features |= NETIF_F_IP_CSUM; - dev->features |= NETIF_F_SG; + dev->features |= NETIF_F_IPV6_CSUM; + tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; } else tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; @@ -11959,12 +12029,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tg3_init_coal(tp); - /* Now that we have fully setup the chip, save away a snapshot - * of the PCI config space. We need to restore this after - * GRC_MISC_CFG core clock resets and some resume events. - */ - pci_save_state(tp->pdev); - pci_set_drvdata(pdev, dev); err = register_netdev(dev); diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index bd9f4f428e5b..5c21f49026c9 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -1467,6 +1467,7 @@ #define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 #define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 #define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003 +#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003 #define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003 #define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002 #define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 @@ -1642,6 +1643,11 @@ #define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */ +#define MII_TG3_AUXCTL_MISC_WREN 0x8000 +#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000 +#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007 + #define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */ #define MII_TG3_AUX_STAT_LPASS 0x0004 #define MII_TG3_AUX_STAT_SPDMASK 0x0700 @@ -1667,6 +1673,9 @@ #define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */ #define MII_TG3_EPHY_SHADOW_EN 0x80 +#define MII_TG3_EPHYTST_MISCCTRL 0x10 /* 5906 EPHY misc ctrl shadow register */ +#define MII_TG3_EPHYTST_MISCCTRL_MDIX 0x4000 + #define MII_TG3_TEST1 0x1e #define MII_TG3_TEST1_TRIM_EN 0x0010 #define MII_TG3_TEST1_CRC_EN 0x8000 @@ -2336,6 +2345,7 @@ struct tg3 { #define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ u32 led_ctrl; + u32 pci_cmd; char board_part_number[24]; char fw_ver[16]; diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 106dc1ef0acb..74eb12107e68 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c @@ -533,7 +533,6 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, struct net_device *dev; TLanPrivateInfo *priv; - u8 pci_rev; u16 device_id; int reg, rc = -ENODEV; @@ -577,8 +576,6 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, goto err_out_free_dev; } - pci_read_config_byte ( pdev, PCI_REVISION_ID, &pci_rev); - for ( reg= 0; reg <= 5; reg ++ ) { if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { pci_io_base = pci_resource_start(pdev, reg); @@ -595,7 +592,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, dev->base_addr = pci_io_base; dev->irq = pdev->irq; - priv->adapterRev = pci_rev; + priv->adapterRev = pdev->revision; pci_set_master(pdev); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index 58d7e5d452fa..f83bb5cb0d3d 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -3692,7 +3692,6 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, __u16 rcode, correlator; int err = 0; __u8 xframe = 1; - __u16 tx_fstatus; rmf->vl = SWAP_BYTES(rmf->vl); if(rx_status & FCB_RX_STATUS_DA_MATCHED) @@ -3783,7 +3782,9 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, } break; - case TX_FORWARD: + case TX_FORWARD: { + __u16 uninitialized_var(tx_fstatus); + if((rcode = smctr_rcv_tx_forward(dev, rmf)) != POSITIVE_ACK) { @@ -3811,6 +3812,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size, } } break; + } /* Received MAC Frames Processed by CRS/REM/RPS. */ case RSP: diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 42fca26afc50..09902891a6e6 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c @@ -2134,7 +2134,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev) u_short vendor, status; u_int irq = 0, device; u_long iobase = 0; /* Clear upper 32 bits in Alphas */ - int i, j, cfrv; + int i, j; struct de4x5_private *lp = netdev_priv(dev); struct list_head *walk; @@ -2150,7 +2150,6 @@ srom_search(struct net_device *dev, struct pci_dev *pdev) /* Get the chip configuration revision register */ pb = this_dev->bus->number; - pci_read_config_dword(this_dev, PCI_REVISION_ID, &cfrv); /* Set the device number information */ lp->device = PCI_SLOT(this_dev->devfn); @@ -2158,7 +2157,8 @@ srom_search(struct net_device *dev, struct pci_dev *pdev) /* Set the chipset information */ if (is_DC2114x) { - device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); + device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK + ? DC21142 : DC21143); } lp->chipset = device; @@ -2254,7 +2254,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev, } /* Get the chip configuration revision register */ - pci_read_config_dword(pdev, PCI_REVISION_ID, &lp->cfrv); + lp->cfrv = pdev->revision; /* Set the device number information */ lp->device = dev_num; diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index 4ed67ff0e81e..dab74feb44bc 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c @@ -181,11 +181,12 @@ udelay(5); #define __CHK_IO_SIZE(pci_id, dev_rev) \ - (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \ + (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \ DM9102A_IO_SIZE: DM9102_IO_SIZE) -#define CHK_IO_SIZE(pci_dev, dev_rev) \ - (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)) +#define CHK_IO_SIZE(pci_dev) \ + (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \ + (pci_dev)->revision)) /* Sten Check */ #define DEVICE net_device @@ -205,7 +206,7 @@ struct rx_desc { struct dmfe_board_info { u32 chip_id; /* Chip vendor/Device ID */ - u32 chip_revision; /* Chip revision */ + u8 chip_revision; /* Chip revision */ struct DEVICE *next_dev; /* next device */ struct pci_dev *pdev; /* PCI device */ spinlock_t lock; @@ -359,7 +360,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, { struct dmfe_board_info *db; /* board information structure */ struct net_device *dev; - u32 dev_rev, pci_pmr; + u32 pci_pmr; int i, err; DMFE_DBUG(0, "dmfe_init_one()", 0); @@ -392,10 +393,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, goto err_out_disable; } - /* Read Chip revision */ - pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev); - - if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) { + if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) { printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n"); err = -ENODEV; goto err_out_disable; @@ -433,7 +431,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, db->chip_id = ent->driver_data; db->ioaddr = pci_resource_start(pdev, 0); - db->chip_revision = dev_rev; + db->chip_revision = pdev->revision; db->wol_mode = 0; db->pdev = pdev; @@ -455,7 +453,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, pci_read_config_dword(pdev, 0x50, &pci_pmr); pci_pmr &= 0x70000; - if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) ) + if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) ) db->chip_type = 1; /* DM9102A E3 */ else db->chip_type = 0; @@ -553,7 +551,7 @@ static int dmfe_open(struct DEVICE *dev) /* CR6 operation mode decision */ if ( !chkmode || (db->chip_id == PCI_DM9132_ID) || - (db->chip_revision >= 0x02000030) ) { + (db->chip_revision >= 0x30) ) { db->cr6_data |= DMFE_TXTH_256; db->cr0_data = CR0_DEFAULT; db->dm910x_chk_mode=4; /* Enter the normal mode */ @@ -1199,9 +1197,9 @@ static void dmfe_timer(unsigned long data) tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */ if ( ((db->chip_id == PCI_DM9102_ID) && - (db->chip_revision == 0x02000030)) || + (db->chip_revision == 0x30)) || ((db->chip_id == PCI_DM9132_ID) && - (db->chip_revision == 0x02000010)) ) { + (db->chip_revision == 0x10)) ) { /* DM9102A Chip */ if (tmp_cr12 & 2) link_ok = 0; diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index ea896777bcaf..53efd6694e75 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c @@ -197,8 +197,8 @@ int tulip_poll(struct net_device *dev, int *budget) tp->rx_buffers[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); #if ! defined(__alpha__) - eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, - pkt_len, 0); + skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, + pkt_len); skb_put(skb, pkt_len); #else memcpy(skb_put(skb, pkt_len), @@ -420,8 +420,8 @@ static int tulip_rx(struct net_device *dev) tp->rx_buffers[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); #if ! defined(__alpha__) - eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, - pkt_len, 0); + skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, + pkt_len); skb_put(skb, pkt_len); #else memcpy(skb_put(skb, pkt_len), diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 041af63f2811..f87d76981ab7 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -1155,7 +1155,7 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev, /* set or disable MWI in the standard PCI command bit. * Check for the case where mwi is desired but not available */ - if (csr0 & MWI) pci_set_mwi(pdev); + if (csr0 & MWI) pci_try_set_mwi(pdev); else pci_clear_mwi(pdev); /* read result from hardware (in case bit refused to enable) */ @@ -1238,7 +1238,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, }; static int last_irq; static int multiport_cnt; /* For four-port boards w/one EEPROM */ - u8 chip_rev; int i, irq; unsigned short sum; unsigned char *ee_data; @@ -1274,10 +1273,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, if (pdev->vendor == 0x1282 && pdev->device == 0x9100) { - u32 dev_rev; /* Read Chip revision */ - pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev); - if(dev_rev < 0x02000030) + if (pdev->revision < 0x30) { printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); return -ENODEV; @@ -1360,8 +1357,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, if (!ioaddr) goto err_out_free_res; - pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev); - /* * initialize private data structure 'tp' * it is zeroed and aligned in alloc_etherdev @@ -1382,7 +1377,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, tp->flags = tulip_tbl[chip_idx].flags; tp->pdev = pdev; tp->base_addr = ioaddr; - tp->revision = chip_rev; + tp->revision = pdev->revision; tp->csr0 = csr0; spin_lock_init(&tp->lock); spin_lock_init(&tp->mii_lock); @@ -1399,7 +1394,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, tulip_mwi_config (pdev, dev); #else /* MWI is broken for DC21143 rev 65... */ - if (chip_idx == DC21143 && chip_rev == 65) + if (chip_idx == DC21143 && pdev->revision == 65) tp->csr0 &= ~MWI; #endif @@ -1640,7 +1635,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, #else "Port" #endif - " %#llx,", dev->name, chip_name, chip_rev, + " %#llx,", dev->name, chip_name, pdev->revision, (unsigned long long) pci_resource_start(pdev, TULIP_BAR)); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 38f3b99716b8..5824f6a35495 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -1232,7 +1232,7 @@ static int netdev_rx(struct net_device *dev) pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], np->rx_skbuff[entry]->len, PCI_DMA_FROMDEVICE); - eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); + skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], np->rx_skbuff[entry]->len, diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 2470b1ee33c0..16a54e6b8d4f 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c @@ -205,7 +205,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ { struct net_device *dev = NULL; struct xircom_private *private; - unsigned char chip_rev; unsigned long flags; unsigned short tmp16; enter("xircom_probe"); @@ -224,8 +223,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ pci_read_config_word (pdev,PCI_STATUS, &tmp16); pci_write_config_word (pdev, PCI_STATUS,tmp16); - pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev); - if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { printk(KERN_ERR "xircom_probe: failed to allocate io-region\n"); return -ENODEV; @@ -286,7 +283,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ goto reg_fail; } - printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq); + printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq); /* start the transmitter to get a heartbeat */ /* TODO: send 2 dummy packets here */ transceiver_voodoo(private); @@ -1208,7 +1205,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri goto out; } skb_reserve(skb, 2); - eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); + skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c index f64172927377..fc439f333350 100644 --- a/drivers/net/tulip/xircom_tulip_cb.c +++ b/drivers/net/tulip/xircom_tulip_cb.c @@ -524,7 +524,6 @@ static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_devi int chip_idx = id->driver_data; long ioaddr; int i; - u8 chip_rev; /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -620,9 +619,8 @@ static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_devi if (register_netdev(dev)) goto err_out_cleardev; - pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev); printk(KERN_INFO "%s: %s rev %d at %#3lx,", - dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr); + dev->name, xircom_tbl[chip_idx].chip_name, pdev->revision, ioaddr); for (i = 0; i < 6; i++) printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]); printk(", IRQ %d.\n", dev->irq); @@ -1242,8 +1240,8 @@ xircom_rx(struct net_device *dev) && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ #if ! defined(__alpha__) - eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), - pkt_len, 0); + skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1), + pkt_len); skb_put(skb, pkt_len); #else memcpy(skb_put(skb, pkt_len), diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a2c6caaaae93..62b2b3005019 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -432,6 +432,7 @@ static void tun_setup(struct net_device *dev) init_waitqueue_head(&tun->read_wait); tun->owner = -1; + tun->group = -1; SET_MODULE_OWNER(dev); dev->open = tun_net_open; @@ -467,8 +468,11 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr) return -EBUSY; /* Check permissions */ - if (tun->owner != -1 && - current->euid != tun->owner && !capable(CAP_NET_ADMIN)) + if (((tun->owner != -1 && + current->euid != tun->owner) || + (tun->group != -1 && + current->egid != tun->group)) && + !capable(CAP_NET_ADMIN)) return -EPERM; } else if (__dev_get_by_name(ifr->ifr_name)) @@ -610,6 +614,13 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); break; + case TUNSETGROUP: + /* Set group of the device */ + tun->group= (gid_t) arg; + + DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group); + break; + case TUNSETLINK: /* Only allow setting the type when the interface is down */ if (tun->dev->flags & IFF_UP) { diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 15b2fb8aa492..03587205546e 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -1703,7 +1703,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready, pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - eth_copy_and_sum(new_skb, skb->data, pkt_len, 0); + skb_copy_to_linear_data(new_skb, skb->data, pkt_len); pci_dma_sync_single_for_device(tp->pdev, dma_addr, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); @@ -2267,12 +2267,6 @@ need_resume: typhoon_resume(pdev); return -EBUSY; } - -static int -typhoon_enable_wake(struct pci_dev *pdev, pci_power_t state, int enable) -{ - return pci_enable_wake(pdev, state, enable); -} #endif static int __devinit @@ -2636,7 +2630,6 @@ static struct pci_driver typhoon_driver = { #ifdef CONFIG_PM .suspend = typhoon_suspend, .resume = typhoon_resume, - .enable_wake = typhoon_enable_wake, #endif }; diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 86e90c59d551..76752d84a30f 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -255,7 +255,7 @@ static void catc_rx_done(struct urb *urb) if (!(skb = dev_alloc_skb(pkt_len))) return; - eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0); + skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, catc->netdev); diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c index bc62b012602b..943988ed01d8 100644 --- a/drivers/net/usb/cdc_subset.c +++ b/drivers/net/usb/cdc_subset.c @@ -305,6 +305,9 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x8086, 0x07d3), // "blob" bootloader .driver_info = (unsigned long) &blob_info, }, { + USB_DEVICE (0x1286, 0x8001), // "blob" bootloader + .driver_info = (unsigned long) &blob_info, +}, { // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config // e.g. Gumstix, current OpenZaurus, ... USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203), diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 60d29440f316..524dc5f5e46d 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -635,7 +635,7 @@ static void kaweth_usb_receive(struct urb *urb) skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0); + skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len); skb_put(skb, pkt_len); diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index adea290a9d5e..f51c2c138f10 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -622,7 +622,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, struct net_device *dev; struct rhine_private *rp; int i, rc; - u8 pci_rev; u32 quirks; long pioaddr; long memaddr; @@ -642,27 +641,25 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, printk(version); #endif - pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev); - io_size = 256; phy_id = 0; quirks = 0; name = "Rhine"; - if (pci_rev < VTunknown0) { + if (pdev->revision < VTunknown0) { quirks = rqRhineI; io_size = 128; } - else if (pci_rev >= VT6102) { + else if (pdev->revision >= VT6102) { quirks = rqWOL | rqForceReset; - if (pci_rev < VT6105) { + if (pdev->revision < VT6105) { name = "Rhine II"; quirks |= rqStatusWBRace; /* Rhine-II exclusive */ } else { phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ - if (pci_rev >= VT6105_B0) + if (pdev->revision >= VT6105_B0) quirks |= rq6patterns; - if (pci_rev < VT6105M) + if (pdev->revision < VT6105M) name = "Rhine III"; else name = "Rhine III (Management Adapter)"; @@ -1492,9 +1489,9 @@ static int rhine_rx(struct net_device *dev, int limit) rp->rx_buf_sz, PCI_DMA_FROMDEVICE); - eth_copy_and_sum(skb, + skb_copy_to_linear_data(skb, rp->rx_skbuff[entry]->data, - pkt_len, 0); + pkt_len); skb_put(skb, pkt_len); pci_dma_sync_single_for_device(rp->pdev, rp->rx_skbuff_dma[entry], diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index b670b97bcfde..f331843d1102 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -890,8 +890,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev, static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) { - if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) - return -EIO; + vptr->rev_id = pdev->revision; pci_set_master(pdev); diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 4fc8681bc110..a3df09ee729f 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -61,7 +61,7 @@ config COSA # config LANMEDIA tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" - depends on PCI + depends on PCI && VIRT_TO_BUS ---help--- Driver for the following Lan Media family of serial boards: diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 6b63b350cd52..8ead774d14c8 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -315,12 +315,11 @@ static int __init c101_run(unsigned long irq, unsigned long winbase) return -ENODEV; } - card = kmalloc(sizeof(card_t), GFP_KERNEL); + card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "c101: unable to allocate memory\n"); return -ENOBUFS; } - memset(card, 0, sizeof(card_t)); card->dev = alloc_hdlcdev(card); if (!card->dev) { diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 9ef49ce148b2..26058b4f8f36 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -572,13 +572,11 @@ static int cosa_probe(int base, int irq, int dma) sprintf(cosa->name, "cosa%d", cosa->num); /* Initialize the per-channel data */ - cosa->chan = kmalloc(sizeof(struct channel_data)*cosa->nchannels, - GFP_KERNEL); + cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); if (!cosa->chan) { err = -ENOMEM; goto err_out3; } - memset(cosa->chan, 0, sizeof(struct channel_data)*cosa->nchannels); for (i=0; i<cosa->nchannels; i++) { cosa->chan[i].cosa = cosa; cosa->chan[i].num = i; diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c index 6e5f1c898517..a0e8611ad8e8 100644 --- a/drivers/net/wan/cycx_main.c +++ b/drivers/net/wan/cycx_main.c @@ -113,12 +113,10 @@ static int __init cycx_init(void) /* Verify number of cards and allocate adapter data space */ cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS); cycx_ncards = max_t(int, cycx_ncards, 1); - cycx_card_array = kmalloc(sizeof(struct cycx_device) * cycx_ncards, - GFP_KERNEL); + cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL); if (!cycx_card_array) goto out; - memset(cycx_card_array, 0, sizeof(struct cycx_device) * cycx_ncards); /* Register adapters with WAN router */ for (cnt = 0; cnt < cycx_ncards; ++cnt) { diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c index 016b3ff3ea5e..a8af28b273d3 100644 --- a/drivers/net/wan/cycx_x25.c +++ b/drivers/net/wan/cycx_x25.c @@ -376,11 +376,10 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev, } /* allocate and initialize private data */ - chan = kmalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL); + chan = kzalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL); if (!chan) return -ENOMEM; - memset(chan, 0, sizeof(*chan)); strcpy(chan->name, conf->name); chan->card = card; chan->link = conf->port; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index dca024471455..50d2f9108dca 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -890,12 +890,11 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) struct dscc4_dev_priv *root; int i, ret = -ENOMEM; - root = kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL); + root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL); if (!root) { printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME); goto err_out; } - memset(root, 0, dev_per_card*sizeof(*root)); for (i = 0; i < dev_per_card; i++) { root[i].dev = alloc_hdlcdev(root + i); @@ -903,12 +902,11 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) goto err_free_dev; } - ppriv = kmalloc(sizeof(*ppriv), GFP_KERNEL); + ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL); if (!ppriv) { printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME); goto err_free_dev; } - memset(ppriv, 0, sizeof(struct dscc4_pci_priv)); ppriv->root = root; spin_lock_init(&ppriv->lock); diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 58a53b6d9b42..12dae8e24844 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2476,13 +2476,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Allocate driver private data */ - card = kmalloc(sizeof (struct fst_card_info), GFP_KERNEL); + card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL); if (card == NULL) { printk_err("FarSync card found but insufficient memory for" " driver storage\n"); return -ENOMEM; } - memset(card, 0, sizeof (struct fst_card_info)); /* Try to enable the device */ if ((err = pci_enable_device(pdev)) != 0) { diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index 9ba3e4ee6ec7..bf5f8d9b5c83 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -231,11 +231,10 @@ static struct sv11_device *sv11_init(int iobase, int irq) return NULL; } - sv = kmalloc(sizeof(struct sv11_device), GFP_KERNEL); + sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL); if(!sv) goto fail3; - memset(sv, 0, sizeof(*sv)); sv->if_ptr=&sv->netdev; sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup); diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index 5c322dfb79f6..cbdf0b748bde 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c @@ -351,12 +351,11 @@ static int __init n2_run(unsigned long io, unsigned long irq, return -ENODEV; } - card = kmalloc(sizeof(card_t), GFP_KERNEL); + card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "n2: unable to allocate memory\n"); return -ENOBUFS; } - memset(card, 0, sizeof(card_t)); card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 999bf71937ca..99fee2f1d019 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c @@ -2833,6 +2833,8 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io) int br, tc; int br_pwr, error; + *br_io = 0; + if (rate == 0) return (0); @@ -3439,7 +3441,6 @@ static int __devinit cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int first_time = 1; - ucchar cpc_rev_id; int err, eeprom_outdated = 0; ucshort device_id; pc300_t *card; @@ -3455,7 +3456,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if ((err = pci_enable_device(pdev)) < 0) return err; - card = kmalloc(sizeof(pc300_t), GFP_KERNEL); + card = kzalloc(sizeof(pc300_t), GFP_KERNEL); if (card == NULL) { printk("PC300 found at RAM 0x%016llx, " "but could not allocate card structure.\n", @@ -3463,7 +3464,6 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto err_disable_dev; } - memset(card, 0, sizeof(pc300_t)); err = -ENODEV; @@ -3480,7 +3480,6 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) card->hw.falcsize = pci_resource_len(pdev, 4); card->hw.plxphys = pci_resource_start(pdev, 5); card->hw.plxsize = pci_resource_len(pdev, 5); - pci_read_config_byte(pdev, PCI_REVISION_ID, &cpc_rev_id); switch (device_id) { case PCI_DEVICE_ID_PC300_RX_1: @@ -3498,7 +3497,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } #ifdef PC300_DEBUG_PCI printk("cpc (bus=0x0%x,pci_id=0x%x,", pdev->bus->number, pdev->devfn); - printk("rev_id=%d) IRQ%d\n", cpc_rev_id, card->hw.irq); + printk("rev_id=%d) IRQ%d\n", pdev->revision, card->hw.irq); printk("cpc:found ramaddr=0x%08lx plxaddr=0x%08lx " "ctladdr=0x%08lx falcaddr=0x%08lx\n", card->hw.ramphys, card->hw.plxphys, card->hw.scaphys, diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index aff05dba720a..6353cb5c658d 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -311,7 +311,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { card_t *card; - u8 rev_id; u32 __iomem *p; int i; u32 ramsize; @@ -335,14 +334,13 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev, return i; } - card = kmalloc(sizeof(card_t), GFP_KERNEL); + card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "pc300: unable to allocate memory\n"); pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } - memset(card, 0, sizeof(card_t)); pci_set_drvdata(pdev, card); if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 || @@ -366,7 +364,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev, return -ENOMEM; } - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE || pci_resource_len(pdev, 2) != PC300_SCA_SIZE || pci_resource_len(pdev, 3) < 16384) { diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index ca06a00d9d86..092e51d89036 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -289,7 +289,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { card_t *card; - u8 rev_id; u32 __iomem *p; int i; u32 ramsize; @@ -313,14 +312,13 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, return i; } - card = kmalloc(sizeof(card_t), GFP_KERNEL); + card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "pci200syn: unable to allocate memory\n"); pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } - memset(card, 0, sizeof(card_t)); pci_set_drvdata(pdev, card); card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); @@ -330,7 +328,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, return -ENOMEM; } - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE || pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE || pci_resource_len(pdev, 3) < 16384) { diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 35eded7ffb2d..1cc18e787a65 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -595,8 +595,8 @@ recv_frame( struct net_device *dev ) u32 crc = CRC32_INITIAL; - unsigned framelen, frameno, ack; - unsigned is_first, frame_ok; + unsigned framelen = 0, frameno, ack; + unsigned is_first, frame_ok = 0; if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) { frame_ok = framelen > 4 @@ -604,8 +604,7 @@ recv_frame( struct net_device *dev ) : skip_tail( ioaddr, framelen, crc ); if( frame_ok ) interpret_ack( dev, ack ); - } else - frame_ok = 0; + } outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 ); if( frame_ok ) { diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 6a485f0556f4..792e588d7d61 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -1196,10 +1196,9 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r if (read) { - temp = kmalloc(mem.len, GFP_KERNEL); + temp = kzalloc(mem.len, GFP_KERNEL); if (!temp) return(-ENOMEM); - memset(temp, 0, mem.len); sdla_read(dev, mem.addr, temp, mem.len); if(copy_to_user(mem.data, temp, mem.len)) { diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 131358108c5a..11276bf3149f 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -270,11 +270,10 @@ static __init struct slvl_board *slvl_init(int iobase, int irq, return NULL; } - b = kmalloc(sizeof(struct slvl_board), GFP_KERNEL); + b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); if(!b) goto fail3; - memset(b, 0, sizeof(*b)); if (!(b->dev[0]= slvl_alloc(iobase, irq))) goto fail2; diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index c73601574334..3c78f9856380 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -599,7 +599,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, } alloc_size = sizeof(card_t) + ports * sizeof(port_t); - card = kmalloc(alloc_size, GFP_KERNEL); + card = kzalloc(alloc_size, GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "wanXL %s: unable to allocate memory\n", pci_name(pdev)); @@ -607,7 +607,6 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, pci_disable_device(pdev); return -ENOBUFS; } - memset(card, 0, alloc_size); pci_set_drvdata(pdev, card); card->pdev = pdev; diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 1c9edd97accd..c48b1cc63fd5 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -786,14 +786,12 @@ static int __init init_x25_asy(void) printk(KERN_INFO "X.25 async: version 0.00 ALPHA " "(dynamic channels, max=%d).\n", x25_asy_maxdev ); - x25_asy_devs = kmalloc(sizeof(struct net_device *)*x25_asy_maxdev, - GFP_KERNEL); + x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device*), GFP_KERNEL); if (!x25_asy_devs) { printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] " "array! Uaargh! (-> No X.25 available)\n"); return -ENOMEM; } - memset(x25_asy_devs, 0, sizeof(struct net_device *)*x25_asy_maxdev); return tty_register_ldisc(N_X25, &x25_ldisc); } diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 2d3a180dada0..ee1cc14db389 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -52,6 +52,8 @@ #include "airo.h" +#define DRV_NAME "airo" + #ifdef CONFIG_PCI static struct pci_device_id card_ids[] = { { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, }, @@ -71,7 +73,7 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state); static int airo_pci_resume(struct pci_dev *pdev); static struct pci_driver airo_driver = { - .name = "airo", + .name = DRV_NAME, .id_table = card_ids, .probe = airo_pci_probe, .remove = __devexit_p(airo_pci_remove), @@ -1092,7 +1094,7 @@ static int get_dec_u16( char *buffer, int *start, int limit ); static void OUT4500( struct airo_info *, u16 register, u16 value ); static unsigned short IN4500( struct airo_info *, u16 register ); static u16 setup_card(struct airo_info*, u8 *mac, int lock); -static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ); +static int enable_MAC(struct airo_info *ai, int lock); static void disable_MAC(struct airo_info *ai, int lock); static void enable_interrupts(struct airo_info*); static void disable_interrupts(struct airo_info*); @@ -1250,7 +1252,7 @@ static int flashputbuf(struct airo_info *ai); static int flashrestart(struct airo_info *ai,struct net_device *dev); #define airo_print(type, name, fmt, args...) \ - { printk(type "airo(%s): " fmt "\n", name, ##args); } + printk(type DRV_NAME "(%s): " fmt "\n", name, ##args) #define airo_print_info(name, fmt, args...) \ airo_print(KERN_INFO, name, fmt, ##args) @@ -1926,28 +1928,54 @@ static int readStatsRid(struct airo_info*ai, StatsRid *sr, int rid, int lock) { return rc; } +static void try_auto_wep(struct airo_info *ai) +{ + if (auto_wep && !(ai->flags & FLAG_RADIO_DOWN)) { + ai->expires = RUN_AT(3*HZ); + wake_up_interruptible(&ai->thr_wait); + } +} + static int airo_open(struct net_device *dev) { - struct airo_info *info = dev->priv; - Resp rsp; + struct airo_info *ai = dev->priv; + int rc = 0; - if (test_bit(FLAG_FLASHING, &info->flags)) + if (test_bit(FLAG_FLASHING, &ai->flags)) return -EIO; /* Make sure the card is configured. * Wireless Extensions may postpone config changes until the card * is open (to pipeline changes and speed-up card setup). If * those changes are not yet commited, do it now - Jean II */ - if (test_bit (FLAG_COMMIT, &info->flags)) { - disable_MAC(info, 1); - writeConfigRid(info, 1); + if (test_bit(FLAG_COMMIT, &ai->flags)) { + disable_MAC(ai, 1); + writeConfigRid(ai, 1); } - if (info->wifidev != dev) { + if (ai->wifidev != dev) { + clear_bit(JOB_DIE, &ai->jobs); + ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name); + if (IS_ERR(ai->airo_thread_task)) + return (int)PTR_ERR(ai->airo_thread_task); + + rc = request_irq(dev->irq, airo_interrupt, IRQF_SHARED, + dev->name, dev); + if (rc) { + airo_print_err(dev->name, + "register interrupt %d failed, rc %d", + dev->irq, rc); + set_bit(JOB_DIE, &ai->jobs); + kthread_stop(ai->airo_thread_task); + return rc; + } + /* Power on the MAC controller (which may have been disabled) */ - clear_bit(FLAG_RADIO_DOWN, &info->flags); - enable_interrupts(info); + clear_bit(FLAG_RADIO_DOWN, &ai->flags); + enable_interrupts(ai); + + try_auto_wep(ai); } - enable_MAC(info, &rsp, 1); + enable_MAC(ai, 1); netif_start_queue(dev); return 0; @@ -2338,14 +2366,13 @@ static int airo_set_mac_address(struct net_device *dev, void *p) { struct airo_info *ai = dev->priv; struct sockaddr *addr = p; - Resp rsp; readConfigRid(ai, 1); memcpy (ai->config.macAddr, addr->sa_data, dev->addr_len); set_bit (FLAG_COMMIT, &ai->flags); disable_MAC(ai, 1); writeConfigRid (ai, 1); - enable_MAC(ai, &rsp, 1); + enable_MAC(ai, 1); memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len); if (ai->wifidev) memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len); @@ -2392,6 +2419,11 @@ static int airo_close(struct net_device *dev) { disable_MAC(ai, 1); #endif disable_interrupts( ai ); + + free_irq(dev->irq, dev); + + set_bit(JOB_DIE, &ai->jobs); + kthread_stop(ai->airo_thread_task); } return 0; } @@ -2403,7 +2435,6 @@ void stop_airo_card( struct net_device *dev, int freeres ) set_bit(FLAG_RADIO_DOWN, &ai->flags); disable_MAC(ai, 1); disable_interrupts(ai); - free_irq( dev->irq, dev ); takedown_proc_entry( dev, ai ); if (test_bit(FLAG_REGISTERED, &ai->flags)) { unregister_netdev( dev ); @@ -2414,9 +2445,6 @@ void stop_airo_card( struct net_device *dev, int freeres ) } clear_bit(FLAG_REGISTERED, &ai->flags); } - set_bit(JOB_DIE, &ai->jobs); - kthread_stop(ai->airo_thread_task); - /* * Clean out tx queue */ @@ -2554,8 +2582,7 @@ static int mpi_init_descriptors (struct airo_info *ai) * 2) Map PCI memory for issueing commands. * 3) Allocate memory (shared) to send and receive ethernet frames. */ -static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, - const char *name) +static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci) { unsigned long mem_start, mem_len, aux_start, aux_len; int rc = -1; @@ -2569,35 +2596,35 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, aux_start = pci_resource_start(pci, 2); aux_len = AUXMEMSIZE; - if (!request_mem_region(mem_start, mem_len, name)) { - airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s", - (int)mem_start, (int)mem_len, name); + if (!request_mem_region(mem_start, mem_len, DRV_NAME)) { + airo_print_err("", "Couldn't get region %x[%x]", + (int)mem_start, (int)mem_len); goto out; } - if (!request_mem_region(aux_start, aux_len, name)) { - airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s", - (int)aux_start, (int)aux_len, name); + if (!request_mem_region(aux_start, aux_len, DRV_NAME)) { + airo_print_err("", "Couldn't get region %x[%x]", + (int)aux_start, (int)aux_len); goto free_region1; } ai->pcimem = ioremap(mem_start, mem_len); if (!ai->pcimem) { - airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s", - (int)mem_start, (int)mem_len, name); + airo_print_err("", "Couldn't map region %x[%x]", + (int)mem_start, (int)mem_len); goto free_region2; } ai->pciaux = ioremap(aux_start, aux_len); if (!ai->pciaux) { - airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s", - (int)aux_start, (int)aux_len, name); + airo_print_err("", "Couldn't map region %x[%x]", + (int)aux_start, (int)aux_len); goto free_memmap; } /* Reserve PKTSIZE for each fid and 2K for the Rids */ ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma); if (!ai->shared) { - airo_print_err(ai->dev->name, "Couldn't alloc_consistent %d", - PCI_SHARED_LEN); + airo_print_err("", "Couldn't alloc_consistent %d", + PCI_SHARED_LEN); goto free_auxmap; } @@ -2742,7 +2769,7 @@ static int airo_networks_allocate(struct airo_info *ai) kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement), GFP_KERNEL); if (!ai->networks) { - airo_print_warn(ai->dev->name, "Out of memory allocating beacons"); + airo_print_warn("", "Out of memory allocating beacons"); return -ENOMEM; } @@ -2770,7 +2797,6 @@ static int airo_test_wpa_capable(struct airo_info *ai) { int status; CapabilityRid cap_rid; - const char *name = ai->dev->name; status = readCapabilityRid(ai, &cap_rid, 1); if (status != SUCCESS) return 0; @@ -2778,12 +2804,12 @@ static int airo_test_wpa_capable(struct airo_info *ai) /* Only firmware versions 5.30.17 or better can do WPA */ if ((cap_rid.softVer > 0x530) || ((cap_rid.softVer == 0x530) && (cap_rid.softSubVer >= 17))) { - airo_print_info(name, "WPA is supported."); + airo_print_info("", "WPA is supported."); return 1; } /* No WPA support */ - airo_print_info(name, "WPA unsupported (only firmware versions 5.30.17" + airo_print_info("", "WPA unsupported (only firmware versions 5.30.17" " and greater support WPA. Detected %s)", cap_rid.prodVer); return 0; } @@ -2797,23 +2823,19 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, int i, rc; /* Create the network device object. */ - dev = alloc_etherdev(sizeof(*ai)); - if (!dev) { + dev = alloc_netdev(sizeof(*ai), "", ether_setup); + if (!dev) { airo_print_err("", "Couldn't alloc_etherdev"); return NULL; - } - if (dev_alloc_name(dev, dev->name) < 0) { - airo_print_err("", "Couldn't get name!"); - goto err_out_free; } ai = dev->priv; ai->wifidev = NULL; - ai->flags = 0; + ai->flags = 1 << FLAG_RADIO_DOWN; ai->jobs = 0; ai->dev = dev; if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) { - airo_print_dbg(dev->name, "Found an MPI350 card"); + airo_print_dbg("", "Found an MPI350 card"); set_bit(FLAG_MPI, &ai->flags); } spin_lock_init(&ai->aux_lock); @@ -2821,14 +2843,11 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, ai->config.len = 0; ai->pci = pci; init_waitqueue_head (&ai->thr_wait); - ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name); - if (IS_ERR(ai->airo_thread_task)) - goto err_out_free; ai->tfm = NULL; add_airo_dev(ai); if (airo_networks_allocate (ai)) - goto err_out_thr; + goto err_out_free; airo_networks_initialize (ai); /* The Airo-specific entries in the device structure. */ @@ -2851,27 +2870,22 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, dev->base_addr = port; SET_NETDEV_DEV(dev, dmdev); + SET_MODULE_OWNER(dev); reset_card (dev, 1); msleep(400); - rc = request_irq( dev->irq, airo_interrupt, IRQF_SHARED, dev->name, dev ); - if (rc) { - airo_print_err(dev->name, "register interrupt %d failed, rc %d", - irq, rc); - goto err_out_nets; - } if (!is_pcmcia) { - if (!request_region( dev->base_addr, 64, dev->name )) { + if (!request_region(dev->base_addr, 64, DRV_NAME)) { rc = -EBUSY; airo_print_err(dev->name, "Couldn't request region"); - goto err_out_irq; + goto err_out_nets; } } if (test_bit(FLAG_MPI,&ai->flags)) { - if (mpi_map_card(ai, pci, dev->name)) { - airo_print_err(dev->name, "Could not map memory"); + if (mpi_map_card(ai, pci)) { + airo_print_err("", "Could not map memory"); goto err_out_res; } } @@ -2899,6 +2913,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra); } + strcpy(dev->name, "eth%d"); rc = register_netdev(dev); if (rc) { airo_print_err(dev->name, "Couldn't register_netdev"); @@ -2921,8 +2936,6 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, if (setup_proc_entry(dev, dev->priv) < 0) goto err_out_wifi; - netif_start_queue(dev); - SET_MODULE_OWNER(dev); return dev; err_out_wifi: @@ -2940,14 +2953,9 @@ err_out_map: err_out_res: if (!is_pcmcia) release_region( dev->base_addr, 64 ); -err_out_irq: - free_irq(dev->irq, dev); err_out_nets: airo_networks_free(ai); -err_out_thr: del_airo_dev(ai); - set_bit(JOB_DIE, &ai->jobs); - kthread_stop(ai->airo_thread_task); err_out_free: free_netdev(dev); return NULL; @@ -3078,7 +3086,8 @@ static int airo_thread(void *data) { struct net_device *dev = data; struct airo_info *ai = dev->priv; int locked; - + + set_freezable(); while(1) { /* make swsusp happy with our thread */ try_to_freeze(); @@ -3529,9 +3538,11 @@ static u16 IN4500( struct airo_info *ai, u16 reg ) { return rc; } -static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) { +static int enable_MAC(struct airo_info *ai, int lock) +{ int rc; - Cmd cmd; + Cmd cmd; + Resp rsp; /* FLAG_RADIO_OFF : Radio disabled via /proc or Wireless Extensions * FLAG_RADIO_DOWN : Radio disabled via "ifconfig ethX down" @@ -3547,7 +3558,7 @@ static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) { if (!test_bit(FLAG_ENABLED, &ai->flags)) { memset(&cmd, 0, sizeof(cmd)); cmd.cmd = MAC_ENABLE; - rc = issuecommand(ai, &cmd, rsp); + rc = issuecommand(ai, &cmd, &rsp); if (rc == SUCCESS) set_bit(FLAG_ENABLED, &ai->flags); } else @@ -3557,8 +3568,12 @@ static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) { up(&ai->sem); if (rc) - airo_print_err(ai->dev->name, "%s: Cannot enable MAC, err=%d", - __FUNCTION__, rc); + airo_print_err(ai->dev->name, "Cannot enable MAC"); + else if ((rsp.status & 0xFF00) != 0) { + airo_print_err(ai->dev->name, "Bad MAC enable reason=%x, " + "rid=%x, offset=%d", rsp.rsp0, rsp.rsp1, rsp.rsp2); + rc = ERROR; + } return rc; } @@ -3902,12 +3917,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) if ( status != SUCCESS ) return ERROR; } - status = enable_MAC(ai, &rsp, lock); - if ( status != SUCCESS || (rsp.status & 0xFF00) != 0) { - airo_print_err(ai->dev->name, "Bad MAC enable reason = %x, rid = %x," - " offset = %d", rsp.rsp0, rsp.rsp1, rsp.rsp2 ); + status = enable_MAC(ai, lock); + if (status != SUCCESS) return ERROR; - } /* Grab the initial wep key, we gotta save it for auto_wep */ rc = readWepKeyRid(ai, &wkr, 1, lock); @@ -3919,10 +3931,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) rc = readWepKeyRid(ai, &wkr, 0, lock); } while(lastindex != wkr.kindex); - if (auto_wep) { - ai->expires = RUN_AT(3*HZ); - wake_up_interruptible(&ai->thr_wait); - } + try_auto_wep(ai); return SUCCESS; } @@ -4004,7 +4013,7 @@ static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap ) } if ( !(max_tries--) ) { airo_print_err(ai->dev->name, - "airo: BAP setup error too many retries\n"); + "BAP setup error too many retries\n"); return ERROR; } // -- PC4500 missed it, try again @@ -5152,7 +5161,6 @@ static void proc_SSID_on_close( struct inode *inode, struct file *file ) { struct net_device *dev = dp->data; struct airo_info *ai = dev->priv; SsidRid SSID_rid; - Resp rsp; int i; int offset = 0; @@ -5177,7 +5185,7 @@ static void proc_SSID_on_close( struct inode *inode, struct file *file ) { SSID_rid.len = sizeof(SSID_rid); disable_MAC(ai, 1); writeSsidRid(ai, &SSID_rid, 1); - enable_MAC(ai, &rsp, 1); + enable_MAC(ai, 1); } static inline u8 hexVal(char c) { @@ -5193,7 +5201,6 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) { struct net_device *dev = dp->data; struct airo_info *ai = dev->priv; APListRid APList_rid; - Resp rsp; int i; if ( !data->writelen ) return; @@ -5218,18 +5225,17 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) { } disable_MAC(ai, 1); writeAPListRid(ai, &APList_rid, 1); - enable_MAC(ai, &rsp, 1); + enable_MAC(ai, 1); } /* This function wraps PC4500_writerid with a MAC disable */ static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data, int len, int dummy ) { int rc; - Resp rsp; disable_MAC(ai, 1); rc = PC4500_writerid(ai, rid, rid_data, len, 1); - enable_MAC(ai, &rsp, 1); + enable_MAC(ai, 1); return rc; } @@ -5260,7 +5266,6 @@ static int set_wep_key(struct airo_info *ai, u16 index, const char *key, u16 keylen, int perm, int lock ) { static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; WepKeyRid wkr; - Resp rsp; memset(&wkr, 0, sizeof(wkr)); if (keylen == 0) { @@ -5280,7 +5285,7 @@ static int set_wep_key(struct airo_info *ai, u16 index, if (perm) disable_MAC(ai, lock); writeWepKeyRid(ai, &wkr, perm, lock); - if (perm) enable_MAC(ai, &rsp, lock); + if (perm) enable_MAC(ai, lock); return 0; } @@ -5548,7 +5553,6 @@ static int proc_close( struct inode *inode, struct file *file ) changed. */ static void timer_func( struct net_device *dev ) { struct airo_info *apriv = dev->priv; - Resp rsp; /* We don't have a link so try changing the authtype */ readConfigRid(apriv, 0); @@ -5575,7 +5579,7 @@ static void timer_func( struct net_device *dev ) { } set_bit (FLAG_COMMIT, &apriv->flags); writeConfigRid(apriv, 0); - enable_MAC(apriv, &rsp, 0); + enable_MAC(apriv, 0); up(&apriv->sem); /* Schedule check to see if the change worked */ @@ -5597,8 +5601,10 @@ static int __devinit airo_pci_probe(struct pci_dev *pdev, dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev); else dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev); - if (!dev) + if (!dev) { + pci_disable_device(pdev); return -ENODEV; + } pci_set_drvdata(pdev, dev); return 0; @@ -5610,6 +5616,8 @@ static void __devexit airo_pci_remove(struct pci_dev *pdev) airo_print_info(dev->name, "Unregistering..."); stop_airo_card(dev, 1); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); } static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state) @@ -5646,7 +5654,6 @@ static int airo_pci_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct airo_info *ai = dev->priv; - Resp rsp; pci_power_t prev_state = pdev->current_state; pci_set_power_state(pdev, PCI_D0); @@ -5679,7 +5686,7 @@ static int airo_pci_resume(struct pci_dev *pdev) ai->APList = NULL; } writeConfigRid(ai, 0); - enable_MAC(ai, &rsp, 0); + enable_MAC(ai, 0); ai->power = PMSG_ON; netif_device_attach(dev); netif_wake_queue(dev); @@ -5903,7 +5910,6 @@ static int airo_set_essid(struct net_device *dev, char *extra) { struct airo_info *local = dev->priv; - Resp rsp; SsidRid SSID_rid; /* SSIDs */ /* Reload the list of current SSID */ @@ -5935,7 +5941,7 @@ static int airo_set_essid(struct net_device *dev, /* Write it to the card */ disable_MAC(local, 1); writeSsidRid(local, &SSID_rid, 1); - enable_MAC(local, &rsp, 1); + enable_MAC(local, 1); return 0; } @@ -6000,7 +6006,7 @@ static int airo_set_wap(struct net_device *dev, memcpy(APList_rid.ap[0], awrq->sa_data, ETH_ALEN); disable_MAC(local, 1); writeAPListRid(local, &APList_rid, 1); - enable_MAC(local, &rsp, 1); + enable_MAC(local, 1); } return 0; } @@ -7454,7 +7460,6 @@ static int airo_config_commit(struct net_device *dev, char *extra) /* NULL */ { struct airo_info *local = dev->priv; - Resp rsp; if (!test_bit (FLAG_COMMIT, &local->flags)) return 0; @@ -7479,7 +7484,7 @@ static int airo_config_commit(struct net_device *dev, if (down_interruptible(&local->sem)) return -ERESTARTSYS; writeConfigRid(local, 0); - enable_MAC(local, &rsp, 0); + enable_MAC(local, 0); if (test_bit (FLAG_RESET, &local->flags)) airo_set_promisc(local); else @@ -7746,7 +7751,6 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { unsigned char *iobuf; int len; struct airo_info *ai = dev->priv; - Resp rsp; if (test_bit(FLAG_FLASHING, &ai->flags)) return -EIO; @@ -7758,7 +7762,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { if (test_bit(FLAG_COMMIT, &ai->flags)) { disable_MAC (ai, 1); writeConfigRid (ai, 1); - enable_MAC (ai, &rsp, 1); + enable_MAC(ai, 1); } break; case AIROGSLIST: ridcode = RID_SSID; break; @@ -7815,7 +7819,6 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) { struct airo_info *ai = dev->priv; int ridcode; int enabled; - Resp rsp; static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); unsigned char *iobuf; @@ -7849,7 +7852,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) { * same with MAC off */ case AIROPMACON: - if (enable_MAC(ai, &rsp, 1) != 0) + if (enable_MAC(ai, 1) != 0) return -EIO; return 0; diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index ef6b253a92ce..c5d6753a55ea 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -3741,10 +3741,8 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm) &bcm->board_type); if (err) goto err_iounmap; - err = bcm43xx_pci_read_config16(bcm, PCI_REVISION_ID, - &bcm->board_revision); - if (err) - goto err_iounmap; + + bcm->board_revision = bcm->pci_dev->revision; err = bcm43xx_chipset_attach(bcm); if (err) diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c index 0cd48d151f5e..7da3664b8515 100644 --- a/drivers/net/wireless/hostap/hostap_pci.c +++ b/drivers/net/wireless/hostap/hostap_pci.c @@ -453,8 +453,6 @@ static struct pci_driver prism2_pci_drv_id = { .suspend = prism2_pci_suspend, .resume = prism2_pci_resume, #endif /* CONFIG_PM */ - /* Linux 2.4.6 added save_state and enable_wake that are not used here - */ }; diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c index 0183df757b3e..040dc3e36410 100644 --- a/drivers/net/wireless/hostap/hostap_plx.c +++ b/drivers/net/wireless/hostap/hostap_plx.c @@ -613,9 +613,6 @@ static struct pci_driver prism2_plx_drv_id = { .id_table = prism2_plx_id_table, .probe = prism2_plx_probe, .remove = prism2_plx_remove, - .suspend = NULL, - .resume = NULL, - .enable_wake = NULL }; diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index d51daf87450f..8990585bd228 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c @@ -1768,7 +1768,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) if (priv->stop_rf_kill) { priv->stop_rf_kill = 0; - queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ); + queue_delayed_work(priv->workqueue, &priv->rf_kill, + round_jiffies(HZ)); } deferred = 1; @@ -2098,7 +2099,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) /* Make sure the RF Kill check timer is running */ priv->stop_rf_kill = 0; cancel_delayed_work(&priv->rf_kill); - queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ); + queue_delayed_work(priv->workqueue, &priv->rf_kill, round_jiffies(HZ)); } static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) @@ -4233,7 +4234,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) /* Make sure the RF_KILL check timer is running */ priv->stop_rf_kill = 0; cancel_delayed_work(&priv->rf_kill); - queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ); + queue_delayed_work(priv->workqueue, &priv->rf_kill, + round_jiffies(HZ)); } else schedule_reset(priv); } @@ -5969,7 +5971,8 @@ static void ipw2100_rf_kill(struct work_struct *work) if (rf_kill_active(priv)) { IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); if (!priv->stop_rf_kill) - queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ); + queue_delayed_work(priv->workqueue, &priv->rf_kill, + round_jiffies(HZ)); goto exit_unlock; } @@ -7865,10 +7868,10 @@ static int ipw2100_wx_set_powermode(struct net_device *dev, goto done; } - if ((mode < 1) || (mode > POWER_MODES)) + if ((mode < 0) || (mode > POWER_MODES)) mode = IPW_POWER_AUTO; - if (priv->power_mode != mode) + if (IPW_POWER_LEVEL(priv->power_mode) != mode) err = ipw2100_set_power_mode(priv, mode); done: mutex_unlock(&priv->action_mutex); @@ -7899,7 +7902,7 @@ static int ipw2100_wx_get_powermode(struct net_device *dev, break; case IPW_POWER_AUTO: snprintf(extra, MAX_POWER_STRING, - "Power save level: %d (Auto)", 0); + "Power save level: %d (Auto)", level); break; default: timeout = timeout_duration[level - 1] / 1000; diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 7cb2052a55a5..61497c467467 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -70,7 +70,7 @@ #define VQ #endif -#define IPW2200_VERSION "1.2.0" VK VD VM VP VR VQ +#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" #define DRV_VERSION IPW2200_VERSION @@ -1751,7 +1751,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) /* Make sure the RF_KILL check timer is running */ cancel_delayed_work(&priv->rf_kill); queue_delayed_work(priv->workqueue, &priv->rf_kill, - 2 * HZ); + round_jiffies(2 * HZ)); } else queue_work(priv->workqueue, &priv->up); } @@ -2506,7 +2506,7 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) break; } - param = cpu_to_le32(mode); + param = cpu_to_le32(param); return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), ¶m); } @@ -4690,7 +4690,8 @@ static void ipw_rx_notification(struct ipw_priv *priv, else if (priv->config & CFG_BACKGROUND_SCAN && priv->status & STATUS_ASSOCIATED) queue_delayed_work(priv->workqueue, - &priv->request_scan, HZ); + &priv->request_scan, + round_jiffies(HZ)); /* Send an empty event to user space. * We don't send the received data on the event because @@ -9567,6 +9568,7 @@ static int ipw_wx_set_power(struct net_device *dev, priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY; else priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; + err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); if (err) { IPW_DEBUG_WX("failed setting power mode.\n"); @@ -9603,22 +9605,19 @@ static int ipw_wx_set_powermode(struct net_device *dev, struct ipw_priv *priv = ieee80211_priv(dev); int mode = *(int *)extra; int err; + mutex_lock(&priv->mutex); - if ((mode < 1) || (mode > IPW_POWER_LIMIT)) { + if ((mode < 1) || (mode > IPW_POWER_LIMIT)) mode = IPW_POWER_AC; - priv->power_mode = mode; - } else { - priv->power_mode = IPW_POWER_ENABLED | mode; - } - if (priv->power_mode != mode) { + if (IPW_POWER_LEVEL(priv->power_mode) != mode) { err = ipw_send_power_mode(priv, mode); - if (err) { IPW_DEBUG_WX("failed setting power mode.\n"); mutex_unlock(&priv->mutex); return err; } + priv->power_mode = IPW_POWER_ENABLED | mode; } mutex_unlock(&priv->mutex); return 0; @@ -10554,7 +10553,7 @@ static irqreturn_t ipw_isr(int irq, void *data) spin_lock(&priv->irq_lock); if (!(priv->status & STATUS_INT_ENABLED)) { - /* Shared IRQ */ + /* IRQ is disabled */ goto none; } diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 13f6528abb00..4a8f5dc70239 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -240,7 +240,7 @@ static int wlan_cmd_802_11_enable_rsn(wlan_private * priv, if (*enable) penableRSN->enable = cpu_to_le16(cmd_enable_rsn); else - penableRSN->enable = cpu_to_le16(cmd_enable_rsn); + penableRSN->enable = cpu_to_le16(cmd_disable_rsn); } lbs_deb_leave(LBS_DEB_CMD); diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 4a59306a3f05..9f366242c392 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -613,6 +613,7 @@ static int wlan_service_main_thread(void *data) init_waitqueue_entry(&wait, current); + set_freezable(); for (;;) { lbs_deb_thread( "main-thread 111: intcounter=%d " "currenttxskb=%p dnld_sent=%d\n", diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c index 88d9d2d787d5..769c86fb9509 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/libertas/rx.c @@ -439,7 +439,6 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb) ret = 0; done: - skb->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */ lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret); return ret; } diff --git a/drivers/net/wireless/libertas/version.h b/drivers/net/wireless/libertas/version.h deleted file mode 100644 index 8b137891791f..000000000000 --- a/drivers/net/wireless/libertas/version.h +++ /dev/null @@ -1 +0,0 @@ - diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c index f42b796b5e47..2fcc3bf21081 100644 --- a/drivers/net/wireless/libertas/wext.c +++ b/drivers/net/wireless/libertas/wext.c @@ -1719,9 +1719,6 @@ static int wlan_set_encodeext(struct net_device *dev, pkey->type = KEY_TYPE_ID_TKIP; } else if (alg == IW_ENCODE_ALG_CCMP) { pkey->type = KEY_TYPE_ID_AES; - } else { - ret = -EINVAL; - goto out; } /* If WPA isn't enabled yet, do that now */ diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 283be4a70524..585f5996d292 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -1853,7 +1853,6 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info, islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; struct mac_entry *entry; - struct list_head *ptr; struct sockaddr *addr = (struct sockaddr *) extra; if (addr->sa_family != ARPHRD_ETHER) @@ -1861,11 +1860,9 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info, if (down_interruptible(&acl->sem)) return -ERESTARTSYS; - for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) { - entry = list_entry(ptr, struct mac_entry, _list); - + list_for_each_entry(entry, &acl->mac_list, _list) { if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) { - list_del(ptr); + list_del(&entry->_list); acl->size--; kfree(entry); up(&acl->sem); @@ -1883,7 +1880,6 @@ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info, islpci_private *priv = netdev_priv(ndev); struct islpci_acl *acl = &priv->acl; struct mac_entry *entry; - struct list_head *ptr; struct sockaddr *dst = (struct sockaddr *) extra; dwrq->length = 0; @@ -1891,9 +1887,7 @@ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info, if (down_interruptible(&acl->sem)) return -ERESTARTSYS; - for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) { - entry = list_entry(ptr, struct mac_entry, _list); - + list_for_each_entry(entry, &acl->mac_list, _list) { memcpy(dst->sa_data, entry->addr, ETH_ALEN); dst->sa_family = ARPHRD_ETHER; dwrq->length++; @@ -1960,7 +1954,6 @@ prism54_get_policy(struct net_device *ndev, struct iw_request_info *info, static int prism54_mac_accept(struct islpci_acl *acl, char *mac) { - struct list_head *ptr; struct mac_entry *entry; int res = 0; @@ -1972,8 +1965,7 @@ prism54_mac_accept(struct islpci_acl *acl, char *mac) return 1; } - for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) { - entry = list_entry(ptr, struct mac_entry, _list); + list_for_each_entry(entry, &acl->mac_list, _list) { if (memcmp(entry->addr, mac, ETH_ALEN) == 0) { res = 1; break; @@ -2216,11 +2208,9 @@ prism54_wpa_bss_ie_init(islpci_private *priv) void prism54_wpa_bss_ie_clean(islpci_private *priv) { - struct list_head *ptr, *n; + struct islpci_bss_wpa_ie *bss, *n; - list_for_each_safe(ptr, n, &priv->bss_wpa_list) { - struct islpci_bss_wpa_ie *bss; - bss = list_entry(ptr, struct islpci_bss_wpa_ie, list); + list_for_each_entry_safe(bss, n, &priv->bss_wpa_list, list) { kfree(bss); } } diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c index 3dcb13bb7d57..af2e4f2405f2 100644 --- a/drivers/net/wireless/prism54/islpci_hotplug.c +++ b/drivers/net/wireless/prism54/islpci_hotplug.c @@ -87,7 +87,6 @@ static struct pci_driver prism54_driver = { .remove = prism54_remove, .suspend = prism54_suspend, .resume = prism54_resume, - /* .enable_wake ; we don't support this yet */ }; /****************************************************************************** @@ -167,8 +166,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); /* enable MWI */ - if (!pci_set_mwi(pdev)) - printk(KERN_INFO "%s: pci_set_mwi(pdev) succeeded\n", DRV_NAME); + pci_try_set_mwi(pdev); /* setup the network device interface and its structure */ if (!(ndev = islpci_setup(pdev))) { diff --git a/drivers/net/wireless/rtl8187_rtl8225.c b/drivers/net/wireless/rtl8187_rtl8225.c index e25a09f1b068..efc41207780e 100644 --- a/drivers/net/wireless/rtl8187_rtl8225.c +++ b/drivers/net/wireless/rtl8187_rtl8225.c @@ -67,7 +67,7 @@ static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data) msleep(2); } -static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, u16 data) +static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data) { struct rtl8187_priv *priv = dev->priv; u16 reg80, reg82, reg84; @@ -106,7 +106,7 @@ void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data) struct rtl8187_priv *priv = dev->priv; if (priv->asic_rev) - rtl8225_write_8051(dev, addr, data); + rtl8225_write_8051(dev, addr, cpu_to_le16(data)); else rtl8225_write_bitbang(dev, addr, data); } diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index ce9230b2f630..c8b5c2271938 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -1011,7 +1011,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev, } else { skb->dev = dev; skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */ - eth_copy_and_sum(skb, (unsigned char *)&sig.daddr, 12, 0); + skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12); wl3501_receive(this, skb->data, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, dev); diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index 5b624bfc01a6..c39f1984b84d 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c @@ -49,8 +49,9 @@ void zd_chip_clear(struct zd_chip *chip) ZD_MEMCLEAR(chip, sizeof(*chip)); } -static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size) +static int scnprint_mac_oui(struct zd_chip *chip, char *buffer, size_t size) { + u8 *addr = zd_usb_to_netdev(&chip->usb)->dev_addr; return scnprintf(buffer, size, "%02x-%02x-%02x", addr[0], addr[1], addr[2]); } @@ -61,10 +62,10 @@ static int scnprint_id(struct zd_chip *chip, char *buffer, size_t size) int i = 0; i = scnprintf(buffer, size, "zd1211%s chip ", - chip->is_zd1211b ? "b" : ""); + zd_chip_is_zd1211b(chip) ? "b" : ""); i += zd_usb_scnprint_id(&chip->usb, buffer+i, size-i); i += scnprintf(buffer+i, size-i, " "); - i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i); + i += scnprint_mac_oui(chip, buffer+i, size-i); i += scnprintf(buffer+i, size-i, " "); i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i); i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c%c%c", chip->pa_type, @@ -366,64 +367,9 @@ error: return r; } -static int _read_mac_addr(struct zd_chip *chip, u8 *mac_addr, - const zd_addr_t *addr) -{ - int r; - u32 parts[2]; - - r = zd_ioread32v_locked(chip, parts, (const zd_addr_t *)addr, 2); - if (r) { - dev_dbg_f(zd_chip_dev(chip), - "error: couldn't read e2p macs. Error number %d\n", r); - return r; - } - - mac_addr[0] = parts[0]; - mac_addr[1] = parts[0] >> 8; - mac_addr[2] = parts[0] >> 16; - mac_addr[3] = parts[0] >> 24; - mac_addr[4] = parts[1]; - mac_addr[5] = parts[1] >> 8; - - return 0; -} - -static int read_e2p_mac_addr(struct zd_chip *chip) -{ - static const zd_addr_t addr[2] = { E2P_MAC_ADDR_P1, E2P_MAC_ADDR_P2 }; - - ZD_ASSERT(mutex_is_locked(&chip->mutex)); - return _read_mac_addr(chip, chip->e2p_mac, (const zd_addr_t *)addr); -} - /* MAC address: if custom mac addresses are to to be used CR_MAC_ADDR_P1 and * CR_MAC_ADDR_P2 must be overwritten */ -void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr) -{ - mutex_lock(&chip->mutex); - memcpy(mac_addr, chip->e2p_mac, ETH_ALEN); - mutex_unlock(&chip->mutex); -} - -static int read_mac_addr(struct zd_chip *chip, u8 *mac_addr) -{ - static const zd_addr_t addr[2] = { CR_MAC_ADDR_P1, CR_MAC_ADDR_P2 }; - return _read_mac_addr(chip, mac_addr, (const zd_addr_t *)addr); -} - -int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr) -{ - int r; - - dev_dbg_f(zd_chip_dev(chip), "\n"); - mutex_lock(&chip->mutex); - r = read_mac_addr(chip, mac_addr); - mutex_unlock(&chip->mutex); - return r; -} - int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) { int r; @@ -444,12 +390,6 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) mutex_lock(&chip->mutex); r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); -#ifdef DEBUG - { - u8 tmp[ETH_ALEN]; - read_mac_addr(chip, tmp); - } -#endif /* DEBUG */ mutex_unlock(&chip->mutex); return r; } @@ -809,7 +749,7 @@ out: static int hw_reset_phy(struct zd_chip *chip) { - return chip->is_zd1211b ? zd1211b_hw_reset_phy(chip) : + return zd_chip_is_zd1211b(chip) ? zd1211b_hw_reset_phy(chip) : zd1211_hw_reset_phy(chip); } @@ -874,7 +814,7 @@ static int hw_init_hmac(struct zd_chip *chip) if (r) return r; - return chip->is_zd1211b ? + return zd_chip_is_zd1211b(chip) ? zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip); } @@ -1136,8 +1076,15 @@ static int read_fw_regs_offset(struct zd_chip *chip) return 0; } +/* Read mac address using pre-firmware interface */ +int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr) +{ + dev_dbg_f(zd_chip_dev(chip), "\n"); + return zd_usb_read_fw(&chip->usb, E2P_MAC_ADDR_P1, addr, + ETH_ALEN); +} -int zd_chip_init_hw(struct zd_chip *chip, u8 device_type) +int zd_chip_init_hw(struct zd_chip *chip) { int r; u8 rf_type; @@ -1145,7 +1092,6 @@ int zd_chip_init_hw(struct zd_chip *chip, u8 device_type) dev_dbg_f(zd_chip_dev(chip), "\n"); mutex_lock(&chip->mutex); - chip->is_zd1211b = (device_type == DEVICE_ZD1211B) != 0; #ifdef DEBUG r = test_init(chip); @@ -1201,10 +1147,6 @@ int zd_chip_init_hw(struct zd_chip *chip, u8 device_type) goto out; #endif /* DEBUG */ - r = read_e2p_mac_addr(chip); - if (r) - goto out; - r = read_cal_int_tables(chip); if (r) goto out; @@ -1259,7 +1201,7 @@ static int update_channel_integration_and_calibration(struct zd_chip *chip, r = update_pwr_int(chip, channel); if (r) return r; - if (chip->is_zd1211b) { + if (zd_chip_is_zd1211b(chip)) { static const struct zd_ioreq16 ioreqs[] = { { CR69, 0x28 }, {}, diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h index 79d0288c193a..f4698576ab71 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zd1211rw/zd_chip.h @@ -704,7 +704,6 @@ struct zd_chip { struct mutex mutex; /* Base address of FW_REG_ registers */ zd_addr_t fw_regs_base; - u8 e2p_mac[ETH_ALEN]; /* EepSetPoint in the vendor driver */ u8 pwr_cal_values[E2P_CHANNEL_COUNT]; /* integration values in the vendor driver */ @@ -715,7 +714,7 @@ struct zd_chip { unsigned int pa_type:4, patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1, new_phy_layout:1, al2230s_bit:1, - is_zd1211b:1, supports_tx_led:1; + supports_tx_led:1; }; static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb) @@ -734,9 +733,15 @@ void zd_chip_init(struct zd_chip *chip, struct net_device *netdev, struct usb_interface *intf); void zd_chip_clear(struct zd_chip *chip); -int zd_chip_init_hw(struct zd_chip *chip, u8 device_type); +int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr); +int zd_chip_init_hw(struct zd_chip *chip); int zd_chip_reset(struct zd_chip *chip); +static inline int zd_chip_is_zd1211b(struct zd_chip *chip) +{ + return chip->usb.is_zd1211b; +} + static inline int zd_ioread16v_locked(struct zd_chip *chip, u16 *values, const zd_addr_t *addresses, unsigned int count) @@ -825,8 +830,6 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip) } u8 zd_chip_get_channel(struct zd_chip *chip); int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain); -void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr); -int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr); int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr); int zd_chip_switch_radio_on(struct zd_chip *chip); int zd_chip_switch_radio_off(struct zd_chip *chip); diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 6753d240c168..f6c487aa8246 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -86,38 +86,46 @@ out: return r; } -int zd_mac_init_hw(struct zd_mac *mac, u8 device_type) +int zd_mac_preinit_hw(struct zd_mac *mac) { int r; - struct zd_chip *chip = &mac->chip; u8 addr[ETH_ALEN]; + + r = zd_chip_read_mac_addr_fw(&mac->chip, addr); + if (r) + return r; + + memcpy(mac->netdev->dev_addr, addr, ETH_ALEN); + return 0; +} + +int zd_mac_init_hw(struct zd_mac *mac) +{ + int r; + struct zd_chip *chip = &mac->chip; u8 default_regdomain; r = zd_chip_enable_int(chip); if (r) goto out; - r = zd_chip_init_hw(chip, device_type); + r = zd_chip_init_hw(chip); if (r) goto disable_int; - zd_get_e2p_mac_addr(chip, addr); - r = zd_write_mac_addr(chip, addr); - if (r) - goto disable_int; ZD_ASSERT(!irqs_disabled()); - spin_lock_irq(&mac->lock); - memcpy(mac->netdev->dev_addr, addr, ETH_ALEN); - spin_unlock_irq(&mac->lock); r = zd_read_regdomain(chip, &default_regdomain); if (r) goto disable_int; if (!zd_regdomain_supported(default_regdomain)) { - dev_dbg_f(zd_mac_dev(mac), - "Regulatory Domain %#04x is not supported.\n", - default_regdomain); - r = -EINVAL; - goto disable_int; + /* The vendor driver overrides the regulatory domain and + * allowed channel registers and unconditionally restricts + * available channels to 1-11 everywhere. Match their + * questionable behaviour only for regdomains which we don't + * recognise. */ + dev_warn(zd_mac_dev(mac), "Unrecognised regulatory domain: " + "%#04x. Defaulting to FCC.\n", default_regdomain); + default_regdomain = ZD_REGDOMAIN_FCC; } spin_lock_irq(&mac->lock); mac->regdomain = mac->default_regdomain = default_regdomain; @@ -164,14 +172,25 @@ int zd_mac_open(struct net_device *netdev) { struct zd_mac *mac = zd_netdev_mac(netdev); struct zd_chip *chip = &mac->chip; + struct zd_usb *usb = &chip->usb; int r; + if (!usb->initialized) { + r = zd_usb_init_hw(usb); + if (r) + goto out; + } + tasklet_enable(&mac->rx_tasklet); r = zd_chip_enable_int(chip); if (r < 0) goto out; + r = zd_write_mac_addr(chip, netdev->dev_addr); + if (r) + goto disable_int; + r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G); if (r < 0) goto disable_int; @@ -251,9 +270,11 @@ int zd_mac_set_mac_address(struct net_device *netdev, void *p) dev_dbg_f(zd_mac_dev(mac), "Setting MAC to " MAC_FMT "\n", MAC_ARG(addr->sa_data)); - r = zd_write_mac_addr(chip, addr->sa_data); - if (r) - return r; + if (netdev->flags & IFF_UP) { + r = zd_write_mac_addr(chip, addr->sa_data); + if (r) + return r; + } spin_lock_irqsave(&mac->lock, flags); memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); @@ -855,7 +876,7 @@ static int fill_ctrlset(struct zd_mac *mac, /* ZD1211B: Computing the length difference this way, gives us * flexibility to compute the packet length. */ - cs->packet_length = cpu_to_le16(mac->chip.is_zd1211b ? + cs->packet_length = cpu_to_le16(zd_chip_is_zd1211b(&mac->chip) ? packet_length - frag_len : packet_length); /* diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index faf4c7828d4e..9f9344eb50f9 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h @@ -189,7 +189,8 @@ int zd_mac_init(struct zd_mac *mac, struct usb_interface *intf); void zd_mac_clear(struct zd_mac *mac); -int zd_mac_init_hw(struct zd_mac *mac, u8 device_type); +int zd_mac_preinit_hw(struct zd_mac *mac); +int zd_mac_init_hw(struct zd_mac *mac); int zd_mac_open(struct net_device *netdev); int zd_mac_stop(struct net_device *netdev); diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c index 7407409b60b1..abe5d38f7f4d 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf.c +++ b/drivers/net/wireless/zd1211rw/zd_rf.c @@ -34,7 +34,7 @@ static const char * const rfs[] = { [AL2210_RF] = "AL2210_RF", [MAXIM_NEW_RF] = "MAXIM_NEW_RF", [UW2453_RF] = "UW2453_RF", - [UNKNOWN_A_RF] = "UNKNOWN_A_RF", + [AL2230S_RF] = "AL2230S_RF", [RALINK_RF] = "RALINK_RF", [INTERSIL_RF] = "INTERSIL_RF", [RF2959_RF] = "RF2959_RF", @@ -77,6 +77,7 @@ int zd_rf_init_hw(struct zd_rf *rf, u8 type) r = zd_rf_init_rf2959(rf); break; case AL2230_RF: + case AL2230S_RF: r = zd_rf_init_al2230(rf); break; case AL7230B_RF: diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h index c6dfd8227f6e..30502f26b71c 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf.h +++ b/drivers/net/wireless/zd1211rw/zd_rf.h @@ -26,7 +26,7 @@ #define AL2210_RF 0x7 #define MAXIM_NEW_RF 0x8 #define UW2453_RF 0x9 -#define UNKNOWN_A_RF 0xa +#define AL2230S_RF 0xa #define RALINK_RF 0xb #define INTERSIL_RF 0xc #define RF2959_RF 0xd diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c index e7a4ecf7b6e2..006774de3202 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c +++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c @@ -21,6 +21,8 @@ #include "zd_usb.h" #include "zd_chip.h" +#define IS_AL2230S(chip) ((chip)->al2230s_bit || (chip)->rf.type == AL2230S_RF) + static const u32 zd1211_al2230_table[][3] = { RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, }, RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, }, @@ -176,7 +178,7 @@ static int zd1211_al2230_init_hw(struct zd_rf *rf) if (r) return r; - if (chip->al2230s_bit) { + if (IS_AL2230S(chip)) { r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, ARRAY_SIZE(ioreqs_init_al2230s)); if (r) @@ -188,7 +190,7 @@ static int zd1211_al2230_init_hw(struct zd_rf *rf) return r; /* improve band edge for AL2230S */ - if (chip->al2230s_bit) + if (IS_AL2230S(chip)) r = zd_rfwrite_locked(chip, 0x000824, RF_RV_BITS); else r = zd_rfwrite_locked(chip, 0x0005a4, RF_RV_BITS); @@ -314,7 +316,7 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf) if (r) return r; - if (chip->al2230s_bit) { + if (IS_AL2230S(chip)) { r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, ARRAY_SIZE(ioreqs_init_al2230s)); if (r) @@ -328,7 +330,7 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf) if (r) return r; - if (chip->al2230s_bit) + if (IS_AL2230S(chip)) r = zd_rfwrite_locked(chip, 0x241000, RF_RV_BITS); else r = zd_rfwrite_locked(chip, 0x25a000, RF_RV_BITS); @@ -422,7 +424,7 @@ int zd_rf_init_al2230(struct zd_rf *rf) struct zd_chip *chip = zd_rf_to_chip(rf); rf->switch_radio_off = al2230_switch_radio_off; - if (chip->is_zd1211b) { + if (zd_chip_is_zd1211b(chip)) { rf->init_hw = zd1211b_al2230_init_hw; rf->set_channel = zd1211b_al2230_set_channel; rf->switch_radio_on = zd1211b_al2230_switch_radio_on; diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c index f4e8b6ada854..73d0bb26f810 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c +++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c @@ -473,7 +473,7 @@ int zd_rf_init_al7230b(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); - if (chip->is_zd1211b) { + if (zd_chip_is_zd1211b(chip)) { rf->init_hw = zd1211b_al7230b_init_hw; rf->switch_radio_on = zd1211b_al7230b_switch_radio_on; rf->set_channel = zd1211b_al7230b_set_channel; diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c index 2d736bdf707c..cc70d40684ea 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c +++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c @@ -265,7 +265,7 @@ int zd_rf_init_rf2959(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); - if (chip->is_zd1211b) { + if (zd_chip_is_zd1211b(chip)) { dev_err(zd_chip_dev(chip), "RF2959 is currently not supported for ZD1211B" " devices\n"); diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c index 414e40d571ab..857dcf3eae61 100644 --- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c +++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c @@ -486,7 +486,7 @@ static int uw2453_switch_radio_on(struct zd_rf *rf) if (r) return r; - if (chip->is_zd1211b) + if (zd_chip_is_zd1211b(chip)) ioreqs[1].value = 0x7f; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 740a2194fdde..a9c339ef116a 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -15,7 +15,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <asm/unaligned.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> @@ -26,6 +25,7 @@ #include <linux/usb.h> #include <linux/workqueue.h> #include <net/ieee80211.h> +#include <asm/unaligned.h> #include "zd_def.h" #include "zd_netdev.h" @@ -71,6 +71,9 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x129b, 0x1667), .driver_info = DEVICE_ZD1211B }, /* "Driverless" devices that need ejecting */ { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, @@ -195,26 +198,27 @@ static u16 get_word(const void *data, u16 offset) return le16_to_cpu(p[offset]); } -static char *get_fw_name(char *buffer, size_t size, u8 device_type, +static char *get_fw_name(struct zd_usb *usb, char *buffer, size_t size, const char* postfix) { scnprintf(buffer, size, "%s%s", - device_type == DEVICE_ZD1211B ? + usb->is_zd1211b ? FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX, postfix); return buffer; } -static int handle_version_mismatch(struct usb_device *udev, u8 device_type, +static int handle_version_mismatch(struct zd_usb *usb, const struct firmware *ub_fw) { + struct usb_device *udev = zd_usb_to_usbdev(usb); const struct firmware *ur_fw = NULL; int offset; int r = 0; char fw_name[128]; r = request_fw_file(&ur_fw, - get_fw_name(fw_name, sizeof(fw_name), device_type, "ur"), + get_fw_name(usb, fw_name, sizeof(fw_name), "ur"), &udev->dev); if (r) goto error; @@ -237,11 +241,12 @@ error: return r; } -static int upload_firmware(struct usb_device *udev, u8 device_type) +static int upload_firmware(struct zd_usb *usb) { int r; u16 fw_bcdDevice; u16 bcdDevice; + struct usb_device *udev = zd_usb_to_usbdev(usb); const struct firmware *ub_fw = NULL; const struct firmware *uph_fw = NULL; char fw_name[128]; @@ -249,7 +254,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type) bcdDevice = get_bcdDevice(udev); r = request_fw_file(&ub_fw, - get_fw_name(fw_name, sizeof(fw_name), device_type, "ub"), + get_fw_name(usb, fw_name, sizeof(fw_name), "ub"), &udev->dev); if (r) goto error; @@ -264,7 +269,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type) dev_warn(&udev->dev, "device has old bootcode, please " "report success or failure\n"); - r = handle_version_mismatch(udev, device_type, ub_fw); + r = handle_version_mismatch(usb, ub_fw); if (r) goto error; } else { @@ -275,7 +280,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type) r = request_fw_file(&uph_fw, - get_fw_name(fw_name, sizeof(fw_name), device_type, "uphr"), + get_fw_name(usb, fw_name, sizeof(fw_name), "uphr"), &udev->dev); if (r) goto error; @@ -294,6 +299,30 @@ error: return r; } +/* Read data from device address space using "firmware interface" which does + * not require firmware to be loaded. */ +int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len) +{ + int r; + struct usb_device *udev = zd_usb_to_usbdev(usb); + + r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0, + data, len, 5000); + if (r < 0) { + dev_err(&udev->dev, + "read over firmware interface failed: %d\n", r); + return r; + } else if (r != len) { + dev_err(&udev->dev, + "incomplete read over firmware interface: %d/%d\n", + r, len); + return -EIO; + } + + return 0; +} + #define urb_dev(urb) (&(urb)->dev->dev) static inline void handle_regs_int(struct urb *urb) @@ -920,9 +949,42 @@ static int eject_installer(struct usb_interface *intf) return 0; } +int zd_usb_init_hw(struct zd_usb *usb) +{ + int r; + struct zd_mac *mac = zd_usb_to_mac(usb); + + dev_dbg_f(zd_usb_dev(usb), "\n"); + + r = upload_firmware(usb); + if (r) { + dev_err(zd_usb_dev(usb), + "couldn't load firmware. Error number %d\n", r); + return r; + } + + r = usb_reset_configuration(zd_usb_to_usbdev(usb)); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "couldn't reset configuration. Error number %d\n", r); + return r; + } + + r = zd_mac_init_hw(mac); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "couldn't initialize mac. Error number %d\n", r); + return r; + } + + usb->initialized = 1; + return 0; +} + static int probe(struct usb_interface *intf, const struct usb_device_id *id) { int r; + struct zd_usb *usb; struct usb_device *udev = interface_to_usbdev(intf); struct net_device *netdev = NULL; @@ -950,26 +1012,10 @@ static int probe(struct usb_interface *intf, const struct usb_device_id *id) goto error; } - r = upload_firmware(udev, id->driver_info); - if (r) { - dev_err(&intf->dev, - "couldn't load firmware. Error number %d\n", r); - goto error; - } + usb = &zd_netdev_mac(netdev)->chip.usb; + usb->is_zd1211b = (id->driver_info == DEVICE_ZD1211B) != 0; - r = usb_reset_configuration(udev); - if (r) { - dev_dbg_f(&intf->dev, - "couldn't reset configuration. Error number %d\n", r); - goto error; - } - - /* At this point the interrupt endpoint is not generally enabled. We - * save the USB bandwidth until the network device is opened. But - * notify that the initialization of the MAC will require the - * interrupts to be temporary enabled. - */ - r = zd_mac_init_hw(zd_netdev_mac(netdev), id->driver_info); + r = zd_mac_preinit_hw(zd_netdev_mac(netdev)); if (r) { dev_dbg_f(&intf->dev, "couldn't initialize mac. Error number %d\n", r); diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h index 506ea6a74393..961a7a12ad68 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.h +++ b/drivers/net/wireless/zd1211rw/zd_usb.h @@ -188,6 +188,7 @@ struct zd_usb { struct zd_usb_rx rx; struct zd_usb_tx tx; struct usb_interface *intf; + u8 is_zd1211b:1, initialized:1; }; #define zd_usb_dev(usb) (&usb->intf->dev) @@ -236,6 +237,8 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits); +int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len); + extern struct workqueue_struct *zd_workqueue; #endif /* _ZD_USB_H */ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c new file mode 100644 index 000000000000..489f69c5d6ca --- /dev/null +++ b/drivers/net/xen-netfront.c @@ -0,0 +1,1863 @@ +/* + * Virtual network driver for conversing with remote driver backends. + * + * Copyright (c) 2002-2005, K A Fraser + * Copyright (c) 2005, XenSource Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/moduleparam.h> +#include <linux/mm.h> +#include <net/ip.h> + +#include <xen/xenbus.h> +#include <xen/events.h> +#include <xen/page.h> +#include <xen/grant_table.h> + +#include <xen/interface/io/netif.h> +#include <xen/interface/memory.h> +#include <xen/interface/grant_table.h> + +static struct ethtool_ops xennet_ethtool_ops; + +struct netfront_cb { + struct page *page; + unsigned offset; +}; + +#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) + +#define RX_COPY_THRESHOLD 256 + +#define GRANT_INVALID_REF 0 + +#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) +#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) +#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) + +struct netfront_info { + struct list_head list; + struct net_device *netdev; + + struct net_device_stats stats; + + struct xen_netif_tx_front_ring tx; + struct xen_netif_rx_front_ring rx; + + spinlock_t tx_lock; + spinlock_t rx_lock; + + unsigned int evtchn; + + /* Receive-ring batched refills. */ +#define RX_MIN_TARGET 8 +#define RX_DFL_MIN_TARGET 64 +#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) + unsigned rx_min_target, rx_max_target, rx_target; + struct sk_buff_head rx_batch; + + struct timer_list rx_refill_timer; + + /* + * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries + * are linked from tx_skb_freelist through skb_entry.link. + * + * NB. Freelist index entries are always going to be less than + * PAGE_OFFSET, whereas pointers to skbs will always be equal or + * greater than PAGE_OFFSET: we use this property to distinguish + * them. + */ + union skb_entry { + struct sk_buff *skb; + unsigned link; + } tx_skbs[NET_TX_RING_SIZE]; + grant_ref_t gref_tx_head; + grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; + unsigned tx_skb_freelist; + + struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; + grant_ref_t gref_rx_head; + grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; + + struct xenbus_device *xbdev; + int tx_ring_ref; + int rx_ring_ref; + + unsigned long rx_pfn_array[NET_RX_RING_SIZE]; + struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; + struct mmu_update rx_mmu[NET_RX_RING_SIZE]; +}; + +struct netfront_rx_info { + struct xen_netif_rx_response rx; + struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; +}; + +/* + * Access macros for acquiring freeing slots in tx_skbs[]. + */ + +static void add_id_to_freelist(unsigned *head, union skb_entry *list, + unsigned short id) +{ + list[id].link = *head; + *head = id; +} + +static unsigned short get_id_from_freelist(unsigned *head, + union skb_entry *list) +{ + unsigned int id = *head; + *head = list[id].link; + return id; +} + +static int xennet_rxidx(RING_IDX idx) +{ + return idx & (NET_RX_RING_SIZE - 1); +} + +static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, + RING_IDX ri) +{ + int i = xennet_rxidx(ri); + struct sk_buff *skb = np->rx_skbs[i]; + np->rx_skbs[i] = NULL; + return skb; +} + +static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, + RING_IDX ri) +{ + int i = xennet_rxidx(ri); + grant_ref_t ref = np->grant_rx_ref[i]; + np->grant_rx_ref[i] = GRANT_INVALID_REF; + return ref; +} + +#ifdef CONFIG_SYSFS +static int xennet_sysfs_addif(struct net_device *netdev); +static void xennet_sysfs_delif(struct net_device *netdev); +#else /* !CONFIG_SYSFS */ +#define xennet_sysfs_addif(dev) (0) +#define xennet_sysfs_delif(dev) do { } while (0) +#endif + +static int xennet_can_sg(struct net_device *dev) +{ + return dev->features & NETIF_F_SG; +} + + +static void rx_refill_timeout(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + netif_rx_schedule(dev); +} + +static int netfront_tx_slot_available(struct netfront_info *np) +{ + return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < + (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); +} + +static void xennet_maybe_wake_tx(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + + if (unlikely(netif_queue_stopped(dev)) && + netfront_tx_slot_available(np) && + likely(netif_running(dev))) + netif_wake_queue(dev); +} + +static void xennet_alloc_rx_buffers(struct net_device *dev) +{ + unsigned short id; + struct netfront_info *np = netdev_priv(dev); + struct sk_buff *skb; + struct page *page; + int i, batch_target, notify; + RING_IDX req_prod = np->rx.req_prod_pvt; + struct xen_memory_reservation reservation; + grant_ref_t ref; + unsigned long pfn; + void *vaddr; + int nr_flips; + struct xen_netif_rx_request *req; + + if (unlikely(!netif_carrier_ok(dev))) + return; + + /* + * Allocate skbuffs greedily, even though we batch updates to the + * receive ring. This creates a less bursty demand on the memory + * allocator, so should reduce the chance of failed allocation requests + * both for ourself and for other kernel subsystems. + */ + batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); + for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { + skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + goto no_skb; + + page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + if (!page) { + kfree_skb(skb); +no_skb: + /* Any skbuffs queued for refill? Force them out. */ + if (i != 0) + goto refill; + /* Could not allocate any skbuffs. Try again later. */ + mod_timer(&np->rx_refill_timer, + jiffies + (HZ/10)); + break; + } + + skb_shinfo(skb)->frags[0].page = page; + skb_shinfo(skb)->nr_frags = 1; + __skb_queue_tail(&np->rx_batch, skb); + } + + /* Is the batch large enough to be worthwhile? */ + if (i < (np->rx_target/2)) { + if (req_prod > np->rx.sring->req_prod) + goto push; + return; + } + + /* Adjust our fill target if we risked running out of buffers. */ + if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && + ((np->rx_target *= 2) > np->rx_max_target)) + np->rx_target = np->rx_max_target; + + refill: + for (nr_flips = i = 0; ; i++) { + skb = __skb_dequeue(&np->rx_batch); + if (skb == NULL) + break; + + skb->dev = dev; + + id = xennet_rxidx(req_prod + i); + + BUG_ON(np->rx_skbs[id]); + np->rx_skbs[id] = skb; + + ref = gnttab_claim_grant_reference(&np->gref_rx_head); + BUG_ON((signed short)ref < 0); + np->grant_rx_ref[id] = ref; + + pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); + vaddr = page_address(skb_shinfo(skb)->frags[0].page); + + req = RING_GET_REQUEST(&np->rx, req_prod + i); + gnttab_grant_foreign_access_ref(ref, + np->xbdev->otherend_id, + pfn_to_mfn(pfn), + 0); + + req->id = id; + req->gref = ref; + } + + if (nr_flips != 0) { + reservation.extent_start = np->rx_pfn_array; + reservation.nr_extents = nr_flips; + reservation.extent_order = 0; + reservation.address_bits = 0; + reservation.domid = DOMID_SELF; + + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* After all PTEs have been zapped, flush the TLB. */ + np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = + UVMF_TLB_FLUSH|UVMF_ALL; + + /* Give away a batch of pages. */ + np->rx_mcl[i].op = __HYPERVISOR_memory_op; + np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; + np->rx_mcl[i].args[1] = (unsigned long)&reservation; + + /* Zap PTEs and give away pages in one big + * multicall. */ + (void)HYPERVISOR_multicall(np->rx_mcl, i+1); + + /* Check return status of HYPERVISOR_memory_op(). */ + if (unlikely(np->rx_mcl[i].result != i)) + panic("Unable to reduce memory reservation\n"); + } else { + if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, + &reservation) != i) + panic("Unable to reduce memory reservation\n"); + } + } else { + wmb(); /* barrier so backend seens requests */ + } + + /* Above is a suitable barrier to ensure backend will see requests. */ + np->rx.req_prod_pvt = req_prod + i; + push: + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); + if (notify) + notify_remote_via_irq(np->netdev->irq); +} + +static int xennet_open(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + + memset(&np->stats, 0, sizeof(np->stats)); + + spin_lock_bh(&np->rx_lock); + if (netif_carrier_ok(dev)) { + xennet_alloc_rx_buffers(dev); + np->rx.sring->rsp_event = np->rx.rsp_cons + 1; + if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) + netif_rx_schedule(dev); + } + spin_unlock_bh(&np->rx_lock); + + xennet_maybe_wake_tx(dev); + + return 0; +} + +static void xennet_tx_buf_gc(struct net_device *dev) +{ + RING_IDX cons, prod; + unsigned short id; + struct netfront_info *np = netdev_priv(dev); + struct sk_buff *skb; + + BUG_ON(!netif_carrier_ok(dev)); + + do { + prod = np->tx.sring->rsp_prod; + rmb(); /* Ensure we see responses up to 'rp'. */ + + for (cons = np->tx.rsp_cons; cons != prod; cons++) { + struct xen_netif_tx_response *txrsp; + + txrsp = RING_GET_RESPONSE(&np->tx, cons); + if (txrsp->status == NETIF_RSP_NULL) + continue; + + id = txrsp->id; + skb = np->tx_skbs[id].skb; + if (unlikely(gnttab_query_foreign_access( + np->grant_tx_ref[id]) != 0)) { + printk(KERN_ALERT "xennet_tx_buf_gc: warning " + "-- grant still in use by backend " + "domain.\n"); + BUG(); + } + gnttab_end_foreign_access_ref( + np->grant_tx_ref[id], GNTMAP_readonly); + gnttab_release_grant_reference( + &np->gref_tx_head, np->grant_tx_ref[id]); + np->grant_tx_ref[id] = GRANT_INVALID_REF; + add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); + dev_kfree_skb_irq(skb); + } + + np->tx.rsp_cons = prod; + + /* + * Set a new event, then check for race with update of tx_cons. + * Note that it is essential to schedule a callback, no matter + * how few buffers are pending. Even if there is space in the + * transmit ring, higher layers may be blocked because too much + * data is outstanding: in such cases notification from Xen is + * likely to be the only kick that we'll get. + */ + np->tx.sring->rsp_event = + prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; + mb(); /* update shared area */ + } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); + + xennet_maybe_wake_tx(dev); +} + +static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, + struct xen_netif_tx_request *tx) +{ + struct netfront_info *np = netdev_priv(dev); + char *data = skb->data; + unsigned long mfn; + RING_IDX prod = np->tx.req_prod_pvt; + int frags = skb_shinfo(skb)->nr_frags; + unsigned int offset = offset_in_page(data); + unsigned int len = skb_headlen(skb); + unsigned int id; + grant_ref_t ref; + int i; + + /* While the header overlaps a page boundary (including being + larger than a page), split it it into page-sized chunks. */ + while (len > PAGE_SIZE - offset) { + tx->size = PAGE_SIZE - offset; + tx->flags |= NETTXF_more_data; + len -= tx->size; + data += tx->size; + offset = 0; + + id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = virt_to_mfn(data); + gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = len; + tx->flags = 0; + } + + /* Grant backend access to each skb fragment page. */ + for (i = 0; i < frags; i++) { + skb_frag_t *frag = skb_shinfo(skb)->frags + i; + + tx->flags |= NETTXF_more_data; + + id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = pfn_to_mfn(page_to_pfn(frag->page)); + gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = frag->page_offset; + tx->size = frag->size; + tx->flags = 0; + } + + np->tx.req_prod_pvt = prod; +} + +static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned short id; + struct netfront_info *np = netdev_priv(dev); + struct xen_netif_tx_request *tx; + struct xen_netif_extra_info *extra; + char *data = skb->data; + RING_IDX i; + grant_ref_t ref; + unsigned long mfn; + int notify; + int frags = skb_shinfo(skb)->nr_frags; + unsigned int offset = offset_in_page(data); + unsigned int len = skb_headlen(skb); + + frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; + if (unlikely(frags > MAX_SKB_FRAGS + 1)) { + printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", + frags); + dump_stack(); + goto drop; + } + + spin_lock_irq(&np->tx_lock); + + if (unlikely(!netif_carrier_ok(dev) || + (frags > 1 && !xennet_can_sg(dev)) || + netif_needs_gso(dev, skb))) { + spin_unlock_irq(&np->tx_lock); + goto drop; + } + + i = np->tx.req_prod_pvt; + + id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); + np->tx_skbs[id].skb = skb; + + tx = RING_GET_REQUEST(&np->tx, i); + + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + mfn = virt_to_mfn(data); + gnttab_grant_foreign_access_ref( + ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = len; + extra = NULL; + + tx->flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) + /* local packet? */ + tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; + else if (skb->ip_summed == CHECKSUM_UNNECESSARY) + /* remote but checksummed. */ + tx->flags |= NETTXF_data_validated; + + if (skb_shinfo(skb)->gso_size) { + struct xen_netif_extra_info *gso; + + gso = (struct xen_netif_extra_info *) + RING_GET_REQUEST(&np->tx, ++i); + + if (extra) + extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; + else + tx->flags |= NETTXF_extra_info; + + gso->u.gso.size = skb_shinfo(skb)->gso_size; + gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; + gso->u.gso.pad = 0; + gso->u.gso.features = 0; + + gso->type = XEN_NETIF_EXTRA_TYPE_GSO; + gso->flags = 0; + extra = gso; + } + + np->tx.req_prod_pvt = i + 1; + + xennet_make_frags(skb, dev, tx); + tx->size = skb->len; + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); + if (notify) + notify_remote_via_irq(np->netdev->irq); + + xennet_tx_buf_gc(dev); + + if (!netfront_tx_slot_available(np)) + netif_stop_queue(dev); + + spin_unlock_irq(&np->tx_lock); + + np->stats.tx_bytes += skb->len; + np->stats.tx_packets++; + + return 0; + + drop: + np->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; +} + +static int xennet_close(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + netif_stop_queue(np->netdev); + return 0; +} + +static struct net_device_stats *xennet_get_stats(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + return &np->stats; +} + +static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, + grant_ref_t ref) +{ + int new = xennet_rxidx(np->rx.req_prod_pvt); + + BUG_ON(np->rx_skbs[new]); + np->rx_skbs[new] = skb; + np->grant_rx_ref[new] = ref; + RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; + RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; + np->rx.req_prod_pvt++; +} + +static int xennet_get_extras(struct netfront_info *np, + struct xen_netif_extra_info *extras, + RING_IDX rp) + +{ + struct xen_netif_extra_info *extra; + struct device *dev = &np->netdev->dev; + RING_IDX cons = np->rx.rsp_cons; + int err = 0; + + do { + struct sk_buff *skb; + grant_ref_t ref; + + if (unlikely(cons + 1 == rp)) { + if (net_ratelimit()) + dev_warn(dev, "Missing extra info\n"); + err = -EBADR; + break; + } + + extra = (struct xen_netif_extra_info *) + RING_GET_RESPONSE(&np->rx, ++cons); + + if (unlikely(!extra->type || + extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + if (net_ratelimit()) + dev_warn(dev, "Invalid extra type: %d\n", + extra->type); + err = -EINVAL; + } else { + memcpy(&extras[extra->type - 1], extra, + sizeof(*extra)); + } + + skb = xennet_get_rx_skb(np, cons); + ref = xennet_get_rx_ref(np, cons); + xennet_move_rx_slot(np, skb, ref); + } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); + + np->rx.rsp_cons = cons; + return err; +} + +static int xennet_get_responses(struct netfront_info *np, + struct netfront_rx_info *rinfo, RING_IDX rp, + struct sk_buff_head *list) +{ + struct xen_netif_rx_response *rx = &rinfo->rx; + struct xen_netif_extra_info *extras = rinfo->extras; + struct device *dev = &np->netdev->dev; + RING_IDX cons = np->rx.rsp_cons; + struct sk_buff *skb = xennet_get_rx_skb(np, cons); + grant_ref_t ref = xennet_get_rx_ref(np, cons); + int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); + int frags = 1; + int err = 0; + unsigned long ret; + + if (rx->flags & NETRXF_extra_info) { + err = xennet_get_extras(np, extras, rp); + cons = np->rx.rsp_cons; + } + + for (;;) { + if (unlikely(rx->status < 0 || + rx->offset + rx->status > PAGE_SIZE)) { + if (net_ratelimit()) + dev_warn(dev, "rx->offset: %x, size: %u\n", + rx->offset, rx->status); + xennet_move_rx_slot(np, skb, ref); + err = -EINVAL; + goto next; + } + + /* + * This definitely indicates a bug, either in this driver or in + * the backend driver. In future this should flag the bad + * situation to the system controller to reboot the backed. + */ + if (ref == GRANT_INVALID_REF) { + if (net_ratelimit()) + dev_warn(dev, "Bad rx response id %d.\n", + rx->id); + err = -EINVAL; + goto next; + } + + ret = gnttab_end_foreign_access_ref(ref, 0); + BUG_ON(!ret); + + gnttab_release_grant_reference(&np->gref_rx_head, ref); + + __skb_queue_tail(list, skb); + +next: + if (!(rx->flags & NETRXF_more_data)) + break; + + if (cons + frags == rp) { + if (net_ratelimit()) + dev_warn(dev, "Need more frags\n"); + err = -ENOENT; + break; + } + + rx = RING_GET_RESPONSE(&np->rx, cons + frags); + skb = xennet_get_rx_skb(np, cons + frags); + ref = xennet_get_rx_ref(np, cons + frags); + frags++; + } + + if (unlikely(frags > max)) { + if (net_ratelimit()) + dev_warn(dev, "Too many frags\n"); + err = -E2BIG; + } + + if (unlikely(err)) + np->rx.rsp_cons = cons + frags; + + return err; +} + +static int xennet_set_skb_gso(struct sk_buff *skb, + struct xen_netif_extra_info *gso) +{ + if (!gso->u.gso.size) { + if (net_ratelimit()) + printk(KERN_WARNING "GSO size must not be zero.\n"); + return -EINVAL; + } + + /* Currently only TCPv4 S.O. is supported. */ + if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { + if (net_ratelimit()) + printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); + return -EINVAL; + } + + skb_shinfo(skb)->gso_size = gso->u.gso.size; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + /* Header must be checked, and gso_segs computed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + + return 0; +} + +static RING_IDX xennet_fill_frags(struct netfront_info *np, + struct sk_buff *skb, + struct sk_buff_head *list) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + int nr_frags = shinfo->nr_frags; + RING_IDX cons = np->rx.rsp_cons; + skb_frag_t *frag = shinfo->frags + nr_frags; + struct sk_buff *nskb; + + while ((nskb = __skb_dequeue(list))) { + struct xen_netif_rx_response *rx = + RING_GET_RESPONSE(&np->rx, ++cons); + + frag->page = skb_shinfo(nskb)->frags[0].page; + frag->page_offset = rx->offset; + frag->size = rx->status; + + skb->data_len += rx->status; + + skb_shinfo(nskb)->nr_frags = 0; + kfree_skb(nskb); + + frag++; + nr_frags++; + } + + shinfo->nr_frags = nr_frags; + return cons; +} + +static int skb_checksum_setup(struct sk_buff *skb) +{ + struct iphdr *iph; + unsigned char *th; + int err = -EPROTO; + + if (skb->protocol != htons(ETH_P_IP)) + goto out; + + iph = (void *)skb->data; + th = skb->data + 4 * iph->ihl; + if (th >= skb_tail_pointer(skb)) + goto out; + + skb->csum_start = th - skb->head; + switch (iph->protocol) { + case IPPROTO_TCP: + skb->csum_offset = offsetof(struct tcphdr, check); + break; + case IPPROTO_UDP: + skb->csum_offset = offsetof(struct udphdr, check); + break; + default: + if (net_ratelimit()) + printk(KERN_ERR "Attempting to checksum a non-" + "TCP/UDP packet, dropping a protocol" + " %d packet", iph->protocol); + goto out; + } + + if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) + goto out; + + err = 0; + +out: + return err; +} + +static int handle_incoming_queue(struct net_device *dev, + struct sk_buff_head *rxq) +{ + struct netfront_info *np = netdev_priv(dev); + int packets_dropped = 0; + struct sk_buff *skb; + + while ((skb = __skb_dequeue(rxq)) != NULL) { + struct page *page = NETFRONT_SKB_CB(skb)->page; + void *vaddr = page_address(page); + unsigned offset = NETFRONT_SKB_CB(skb)->offset; + + memcpy(skb->data, vaddr + offset, + skb_headlen(skb)); + + if (page != skb_shinfo(skb)->frags[0].page) + __free_page(page); + + /* Ethernet work: Delayed to here as it peeks the header. */ + skb->protocol = eth_type_trans(skb, dev); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_checksum_setup(skb)) { + kfree_skb(skb); + packets_dropped++; + np->stats.rx_errors++; + continue; + } + } + + np->stats.rx_packets++; + np->stats.rx_bytes += skb->len; + + /* Pass it up. */ + netif_receive_skb(skb); + dev->last_rx = jiffies; + } + + return packets_dropped; +} + +static int xennet_poll(struct net_device *dev, int *pbudget) +{ + struct netfront_info *np = netdev_priv(dev); + struct sk_buff *skb; + struct netfront_rx_info rinfo; + struct xen_netif_rx_response *rx = &rinfo.rx; + struct xen_netif_extra_info *extras = rinfo.extras; + RING_IDX i, rp; + int work_done, budget, more_to_do = 1; + struct sk_buff_head rxq; + struct sk_buff_head errq; + struct sk_buff_head tmpq; + unsigned long flags; + unsigned int len; + int err; + + spin_lock(&np->rx_lock); + + if (unlikely(!netif_carrier_ok(dev))) { + spin_unlock(&np->rx_lock); + return 0; + } + + skb_queue_head_init(&rxq); + skb_queue_head_init(&errq); + skb_queue_head_init(&tmpq); + + budget = *pbudget; + if (budget > dev->quota) + budget = dev->quota; + rp = np->rx.sring->rsp_prod; + rmb(); /* Ensure we see queued responses up to 'rp'. */ + + i = np->rx.rsp_cons; + work_done = 0; + while ((i != rp) && (work_done < budget)) { + memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); + memset(extras, 0, sizeof(rinfo.extras)); + + err = xennet_get_responses(np, &rinfo, rp, &tmpq); + + if (unlikely(err)) { +err: + while ((skb = __skb_dequeue(&tmpq))) + __skb_queue_tail(&errq, skb); + np->stats.rx_errors++; + i = np->rx.rsp_cons; + continue; + } + + skb = __skb_dequeue(&tmpq); + + if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { + struct xen_netif_extra_info *gso; + gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; + + if (unlikely(xennet_set_skb_gso(skb, gso))) { + __skb_queue_head(&tmpq, skb); + np->rx.rsp_cons += skb_queue_len(&tmpq); + goto err; + } + } + + NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; + NETFRONT_SKB_CB(skb)->offset = rx->offset; + + len = rx->status; + if (len > RX_COPY_THRESHOLD) + len = RX_COPY_THRESHOLD; + skb_put(skb, len); + + if (rx->status > len) { + skb_shinfo(skb)->frags[0].page_offset = + rx->offset + len; + skb_shinfo(skb)->frags[0].size = rx->status - len; + skb->data_len = rx->status - len; + } else { + skb_shinfo(skb)->frags[0].page = NULL; + skb_shinfo(skb)->nr_frags = 0; + } + + i = xennet_fill_frags(np, skb, &tmpq); + + /* + * Truesize approximates the size of true data plus + * any supervisor overheads. Adding hypervisor + * overheads has been shown to significantly reduce + * achievable bandwidth with the default receive + * buffer size. It is therefore not wise to account + * for it here. + * + * After alloc_skb(RX_COPY_THRESHOLD), truesize is set + * to RX_COPY_THRESHOLD + the supervisor + * overheads. Here, we add the size of the data pulled + * in xennet_fill_frags(). + * + * We also adjust for any unused space in the main + * data area by subtracting (RX_COPY_THRESHOLD - + * len). This is especially important with drivers + * which split incoming packets into header and data, + * using only 66 bytes of the main data area (see the + * e1000 driver for example.) On such systems, + * without this last adjustement, our achievable + * receive throughout using the standard receive + * buffer size was cut by 25%(!!!). + */ + skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); + skb->len += skb->data_len; + + if (rx->flags & NETRXF_csum_blank) + skb->ip_summed = CHECKSUM_PARTIAL; + else if (rx->flags & NETRXF_data_validated) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + __skb_queue_tail(&rxq, skb); + + np->rx.rsp_cons = ++i; + work_done++; + } + + while ((skb = __skb_dequeue(&errq))) + kfree_skb(skb); + + work_done -= handle_incoming_queue(dev, &rxq); + + /* If we get a callback with very few responses, reduce fill target. */ + /* NB. Note exponential increase, linear decrease. */ + if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > + ((3*np->rx_target) / 4)) && + (--np->rx_target < np->rx_min_target)) + np->rx_target = np->rx_min_target; + + xennet_alloc_rx_buffers(dev); + + *pbudget -= work_done; + dev->quota -= work_done; + + if (work_done < budget) { + local_irq_save(flags); + + RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); + if (!more_to_do) + __netif_rx_complete(dev); + + local_irq_restore(flags); + } + + spin_unlock(&np->rx_lock); + + return more_to_do; +} + +static int xennet_change_mtu(struct net_device *dev, int mtu) +{ + int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; + + if (mtu > max) + return -EINVAL; + dev->mtu = mtu; + return 0; +} + +static void xennet_release_tx_bufs(struct netfront_info *np) +{ + struct sk_buff *skb; + int i; + + for (i = 0; i < NET_TX_RING_SIZE; i++) { + /* Skip over entries which are actually freelist references */ + if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET) + continue; + + skb = np->tx_skbs[i].skb; + gnttab_end_foreign_access_ref(np->grant_tx_ref[i], + GNTMAP_readonly); + gnttab_release_grant_reference(&np->gref_tx_head, + np->grant_tx_ref[i]); + np->grant_tx_ref[i] = GRANT_INVALID_REF; + add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); + dev_kfree_skb_irq(skb); + } +} + +static void xennet_release_rx_bufs(struct netfront_info *np) +{ + struct mmu_update *mmu = np->rx_mmu; + struct multicall_entry *mcl = np->rx_mcl; + struct sk_buff_head free_list; + struct sk_buff *skb; + unsigned long mfn; + int xfer = 0, noxfer = 0, unused = 0; + int id, ref; + + dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", + __func__); + return; + + skb_queue_head_init(&free_list); + + spin_lock_bh(&np->rx_lock); + + for (id = 0; id < NET_RX_RING_SIZE; id++) { + ref = np->grant_rx_ref[id]; + if (ref == GRANT_INVALID_REF) { + unused++; + continue; + } + + skb = np->rx_skbs[id]; + mfn = gnttab_end_foreign_transfer_ref(ref); + gnttab_release_grant_reference(&np->gref_rx_head, ref); + np->grant_rx_ref[id] = GRANT_INVALID_REF; + + if (0 == mfn) { + skb_shinfo(skb)->nr_frags = 0; + dev_kfree_skb(skb); + noxfer++; + continue; + } + + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* Remap the page. */ + struct page *page = skb_shinfo(skb)->frags[0].page; + unsigned long pfn = page_to_pfn(page); + void *vaddr = page_address(page); + + MULTI_update_va_mapping(mcl, (unsigned long)vaddr, + mfn_pte(mfn, PAGE_KERNEL), + 0); + mcl++; + mmu->ptr = ((u64)mfn << PAGE_SHIFT) + | MMU_MACHPHYS_UPDATE; + mmu->val = pfn; + mmu++; + + set_phys_to_machine(pfn, mfn); + } + __skb_queue_tail(&free_list, skb); + xfer++; + } + + dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", + __func__, xfer, noxfer, unused); + + if (xfer) { + if (!xen_feature(XENFEAT_auto_translated_physmap)) { + /* Do all the remapping work and M2P updates. */ + MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, + 0, DOMID_SELF); + mcl++; + HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); + } + } + + while ((skb = __skb_dequeue(&free_list)) != NULL) + dev_kfree_skb(skb); + + spin_unlock_bh(&np->rx_lock); +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_release_tx_bufs(np); + xennet_release_rx_bufs(np); + gnttab_free_grant_references(np->gref_tx_head); + gnttab_free_grant_references(np->gref_rx_head); +} + +static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) +{ + int i, err; + struct net_device *netdev; + struct netfront_info *np; + + netdev = alloc_etherdev(sizeof(struct netfront_info)); + if (!netdev) { + printk(KERN_WARNING "%s> alloc_etherdev failed.\n", + __func__); + return ERR_PTR(-ENOMEM); + } + + np = netdev_priv(netdev); + np->xbdev = dev; + + spin_lock_init(&np->tx_lock); + spin_lock_init(&np->rx_lock); + + skb_queue_head_init(&np->rx_batch); + np->rx_target = RX_DFL_MIN_TARGET; + np->rx_min_target = RX_DFL_MIN_TARGET; + np->rx_max_target = RX_MAX_TARGET; + + init_timer(&np->rx_refill_timer); + np->rx_refill_timer.data = (unsigned long)netdev; + np->rx_refill_timer.function = rx_refill_timeout; + + /* Initialise tx_skbs as a free chain containing every entry. */ + np->tx_skb_freelist = 0; + for (i = 0; i < NET_TX_RING_SIZE; i++) { + np->tx_skbs[i].link = i+1; + np->grant_tx_ref[i] = GRANT_INVALID_REF; + } + + /* Clear out rx_skbs */ + for (i = 0; i < NET_RX_RING_SIZE; i++) { + np->rx_skbs[i] = NULL; + np->grant_rx_ref[i] = GRANT_INVALID_REF; + } + + /* A grant for every tx ring slot */ + if (gnttab_alloc_grant_references(TX_MAX_TARGET, + &np->gref_tx_head) < 0) { + printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); + err = -ENOMEM; + goto exit; + } + /* A grant for every rx ring slot */ + if (gnttab_alloc_grant_references(RX_MAX_TARGET, + &np->gref_rx_head) < 0) { + printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); + err = -ENOMEM; + goto exit_free_tx; + } + + netdev->open = xennet_open; + netdev->hard_start_xmit = xennet_start_xmit; + netdev->stop = xennet_close; + netdev->get_stats = xennet_get_stats; + netdev->poll = xennet_poll; + netdev->uninit = xennet_uninit; + netdev->change_mtu = xennet_change_mtu; + netdev->weight = 64; + netdev->features = NETIF_F_IP_CSUM; + + SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, &dev->dev); + + np->netdev = netdev; + + netif_carrier_off(netdev); + + return netdev; + + exit_free_tx: + gnttab_free_grant_references(np->gref_tx_head); + exit: + free_netdev(netdev); + return ERR_PTR(err); +} + +/** + * Entry point to this code when a new device is created. Allocate the basic + * structures and the ring buffers for communication with the backend, and + * inform the backend of the appropriate details for those. + */ +static int __devinit netfront_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + int err; + struct net_device *netdev; + struct netfront_info *info; + + netdev = xennet_create_dev(dev); + if (IS_ERR(netdev)) { + err = PTR_ERR(netdev); + xenbus_dev_fatal(dev, err, "creating netdev"); + return err; + } + + info = netdev_priv(netdev); + dev->dev.driver_data = info; + + err = register_netdev(info->netdev); + if (err) { + printk(KERN_WARNING "%s: register_netdev err=%d\n", + __func__, err); + goto fail; + } + + err = xennet_sysfs_addif(info->netdev); + if (err) { + unregister_netdev(info->netdev); + printk(KERN_WARNING "%s: add sysfs failed err=%d\n", + __func__, err); + goto fail; + } + + return 0; + + fail: + free_netdev(netdev); + dev->dev.driver_data = NULL; + return err; +} + +static void xennet_end_access(int ref, void *page) +{ + /* This frees the page as a side-effect */ + if (ref != GRANT_INVALID_REF) + gnttab_end_foreign_access(ref, 0, (unsigned long)page); +} + +static void xennet_disconnect_backend(struct netfront_info *info) +{ + /* Stop old i/f to prevent errors whilst we rebuild the state. */ + spin_lock_bh(&info->rx_lock); + spin_lock_irq(&info->tx_lock); + netif_carrier_off(info->netdev); + spin_unlock_irq(&info->tx_lock); + spin_unlock_bh(&info->rx_lock); + + if (info->netdev->irq) + unbind_from_irqhandler(info->netdev->irq, info->netdev); + info->evtchn = info->netdev->irq = 0; + + /* End access and free the pages */ + xennet_end_access(info->tx_ring_ref, info->tx.sring); + xennet_end_access(info->rx_ring_ref, info->rx.sring); + + info->tx_ring_ref = GRANT_INVALID_REF; + info->rx_ring_ref = GRANT_INVALID_REF; + info->tx.sring = NULL; + info->rx.sring = NULL; +} + +/** + * We are reconnecting to the backend, due to a suspend/resume, or a backend + * driver restart. We tear down our netif structure and recreate it, but + * leave the device-layer structures intact so that this is transparent to the + * rest of the kernel. + */ +static int netfront_resume(struct xenbus_device *dev) +{ + struct netfront_info *info = dev->dev.driver_data; + + dev_dbg(&dev->dev, "%s\n", dev->nodename); + + xennet_disconnect_backend(info); + return 0; +} + +static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) +{ + char *s, *e, *macstr; + int i; + + macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); + if (IS_ERR(macstr)) + return PTR_ERR(macstr); + + for (i = 0; i < ETH_ALEN; i++) { + mac[i] = simple_strtoul(s, &e, 16); + if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { + kfree(macstr); + return -ENOENT; + } + s = e+1; + } + + kfree(macstr); + return 0; +} + +static irqreturn_t xennet_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct netfront_info *np = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&np->tx_lock, flags); + + if (likely(netif_carrier_ok(dev))) { + xennet_tx_buf_gc(dev); + /* Under tx_lock: protects access to rx shared-ring indexes. */ + if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) + netif_rx_schedule(dev); + } + + spin_unlock_irqrestore(&np->tx_lock, flags); + + return IRQ_HANDLED; +} + +static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) +{ + struct xen_netif_tx_sring *txs; + struct xen_netif_rx_sring *rxs; + int err; + struct net_device *netdev = info->netdev; + + info->tx_ring_ref = GRANT_INVALID_REF; + info->rx_ring_ref = GRANT_INVALID_REF; + info->rx.sring = NULL; + info->tx.sring = NULL; + netdev->irq = 0; + + err = xen_net_read_mac(dev, netdev->dev_addr); + if (err) { + xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); + goto fail; + } + + txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); + if (!txs) { + err = -ENOMEM; + xenbus_dev_fatal(dev, err, "allocating tx ring page"); + goto fail; + } + SHARED_RING_INIT(txs); + FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); + + err = xenbus_grant_ring(dev, virt_to_mfn(txs)); + if (err < 0) { + free_page((unsigned long)txs); + goto fail; + } + + info->tx_ring_ref = err; + rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); + if (!rxs) { + err = -ENOMEM; + xenbus_dev_fatal(dev, err, "allocating rx ring page"); + goto fail; + } + SHARED_RING_INIT(rxs); + FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); + + err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); + if (err < 0) { + free_page((unsigned long)rxs); + goto fail; + } + info->rx_ring_ref = err; + + err = xenbus_alloc_evtchn(dev, &info->evtchn); + if (err) + goto fail; + + err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, + IRQF_SAMPLE_RANDOM, netdev->name, + netdev); + if (err < 0) + goto fail; + netdev->irq = err; + return 0; + + fail: + return err; +} + +/* Common code used when first setting up, and when resuming. */ +static int talk_to_backend(struct xenbus_device *dev, + struct netfront_info *info) +{ + const char *message; + struct xenbus_transaction xbt; + int err; + + /* Create shared ring, alloc event channel. */ + err = setup_netfront(dev, info); + if (err) + goto out; + +again: + err = xenbus_transaction_start(&xbt); + if (err) { + xenbus_dev_fatal(dev, err, "starting transaction"); + goto destroy_ring; + } + + err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", + info->tx_ring_ref); + if (err) { + message = "writing tx ring-ref"; + goto abort_transaction; + } + err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", + info->rx_ring_ref); + if (err) { + message = "writing rx ring-ref"; + goto abort_transaction; + } + err = xenbus_printf(xbt, dev->nodename, + "event-channel", "%u", info->evtchn); + if (err) { + message = "writing event-channel"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", + 1); + if (err) { + message = "writing request-rx-copy"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); + if (err) { + message = "writing feature-rx-notify"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); + if (err) { + message = "writing feature-sg"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); + if (err) { + message = "writing feature-gso-tcpv4"; + goto abort_transaction; + } + + err = xenbus_transaction_end(xbt, 0); + if (err) { + if (err == -EAGAIN) + goto again; + xenbus_dev_fatal(dev, err, "completing transaction"); + goto destroy_ring; + } + + return 0; + + abort_transaction: + xenbus_transaction_end(xbt, 1); + xenbus_dev_fatal(dev, err, "%s", message); + destroy_ring: + xennet_disconnect_backend(info); + out: + return err; +} + +static int xennet_set_sg(struct net_device *dev, u32 data) +{ + if (data) { + struct netfront_info *np = netdev_priv(dev); + int val; + + if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", + "%d", &val) < 0) + val = 0; + if (!val) + return -ENOSYS; + } else if (dev->mtu > ETH_DATA_LEN) + dev->mtu = ETH_DATA_LEN; + + return ethtool_op_set_sg(dev, data); +} + +static int xennet_set_tso(struct net_device *dev, u32 data) +{ + if (data) { + struct netfront_info *np = netdev_priv(dev); + int val; + + if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, + "feature-gso-tcpv4", "%d", &val) < 0) + val = 0; + if (!val) + return -ENOSYS; + } + + return ethtool_op_set_tso(dev, data); +} + +static void xennet_set_features(struct net_device *dev) +{ + /* Turn off all GSO bits except ROBUST. */ + dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; + dev->features |= NETIF_F_GSO_ROBUST; + xennet_set_sg(dev, 0); + + /* We need checksum offload to enable scatter/gather and TSO. */ + if (!(dev->features & NETIF_F_IP_CSUM)) + return; + + if (!xennet_set_sg(dev, 1)) + xennet_set_tso(dev, 1); +} + +static int xennet_connect(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + int i, requeue_idx, err; + struct sk_buff *skb; + grant_ref_t ref; + struct xen_netif_rx_request *req; + unsigned int feature_rx_copy; + + err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, + "feature-rx-copy", "%u", &feature_rx_copy); + if (err != 1) + feature_rx_copy = 0; + + if (!feature_rx_copy) { + dev_info(&dev->dev, + "backend does not support copying recieve path"); + return -ENODEV; + } + + err = talk_to_backend(np->xbdev, np); + if (err) + return err; + + xennet_set_features(dev); + + spin_lock_bh(&np->rx_lock); + spin_lock_irq(&np->tx_lock); + + /* Step 1: Discard all pending TX packet fragments. */ + xennet_release_tx_bufs(np); + + /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ + for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { + if (!np->rx_skbs[i]) + continue; + + skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); + ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); + req = RING_GET_REQUEST(&np->rx, requeue_idx); + + gnttab_grant_foreign_access_ref( + ref, np->xbdev->otherend_id, + pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> + frags->page)), + 0); + req->gref = ref; + req->id = requeue_idx; + + requeue_idx++; + } + + np->rx.req_prod_pvt = requeue_idx; + + /* + * Step 3: All public and private state should now be sane. Get + * ready to start sending and receiving packets and give the driver + * domain a kick because we've probably just requeued some + * packets. + */ + netif_carrier_on(np->netdev); + notify_remote_via_irq(np->netdev->irq); + xennet_tx_buf_gc(dev); + xennet_alloc_rx_buffers(dev); + + spin_unlock_irq(&np->tx_lock); + spin_unlock_bh(&np->rx_lock); + + return 0; +} + +/** + * Callback received when the backend's state changes. + */ +static void backend_changed(struct xenbus_device *dev, + enum xenbus_state backend_state) +{ + struct netfront_info *np = dev->dev.driver_data; + struct net_device *netdev = np->netdev; + + dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); + + switch (backend_state) { + case XenbusStateInitialising: + case XenbusStateInitialised: + case XenbusStateConnected: + case XenbusStateUnknown: + case XenbusStateClosed: + break; + + case XenbusStateInitWait: + if (dev->state != XenbusStateInitialising) + break; + if (xennet_connect(netdev) != 0) + break; + xenbus_switch_state(dev, XenbusStateConnected); + break; + + case XenbusStateClosing: + xenbus_frontend_closed(dev); + break; + } +} + +static struct ethtool_ops xennet_ethtool_ops = +{ + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = xennet_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = xennet_set_tso, + .get_link = ethtool_op_get_link, +}; + +#ifdef CONFIG_SYSFS +static ssize_t show_rxbuf_min(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *info = netdev_priv(netdev); + + return sprintf(buf, "%u\n", info->rx_min_target); +} + +static ssize_t store_rxbuf_min(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *np = netdev_priv(netdev); + char *endp; + unsigned long target; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + target = simple_strtoul(buf, &endp, 0); + if (endp == buf) + return -EBADMSG; + + if (target < RX_MIN_TARGET) + target = RX_MIN_TARGET; + if (target > RX_MAX_TARGET) + target = RX_MAX_TARGET; + + spin_lock_bh(&np->rx_lock); + if (target > np->rx_max_target) + np->rx_max_target = target; + np->rx_min_target = target; + if (target > np->rx_target) + np->rx_target = target; + + xennet_alloc_rx_buffers(netdev); + + spin_unlock_bh(&np->rx_lock); + return len; +} + +static ssize_t show_rxbuf_max(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *info = netdev_priv(netdev); + + return sprintf(buf, "%u\n", info->rx_max_target); +} + +static ssize_t store_rxbuf_max(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *np = netdev_priv(netdev); + char *endp; + unsigned long target; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + target = simple_strtoul(buf, &endp, 0); + if (endp == buf) + return -EBADMSG; + + if (target < RX_MIN_TARGET) + target = RX_MIN_TARGET; + if (target > RX_MAX_TARGET) + target = RX_MAX_TARGET; + + spin_lock_bh(&np->rx_lock); + if (target < np->rx_min_target) + np->rx_min_target = target; + np->rx_max_target = target; + if (target < np->rx_target) + np->rx_target = target; + + xennet_alloc_rx_buffers(netdev); + + spin_unlock_bh(&np->rx_lock); + return len; +} + +static ssize_t show_rxbuf_cur(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct netfront_info *info = netdev_priv(netdev); + + return sprintf(buf, "%u\n", info->rx_target); +} + +static struct device_attribute xennet_attrs[] = { + __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), + __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), + __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), +}; + +static int xennet_sysfs_addif(struct net_device *netdev) +{ + int i; + int err; + + for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { + err = device_create_file(&netdev->dev, + &xennet_attrs[i]); + if (err) + goto fail; + } + return 0; + + fail: + while (--i >= 0) + device_remove_file(&netdev->dev, &xennet_attrs[i]); + return err; +} + +static void xennet_sysfs_delif(struct net_device *netdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) + device_remove_file(&netdev->dev, &xennet_attrs[i]); +} + +#endif /* CONFIG_SYSFS */ + +static struct xenbus_device_id netfront_ids[] = { + { "vif" }, + { "" } +}; + + +static int __devexit xennet_remove(struct xenbus_device *dev) +{ + struct netfront_info *info = dev->dev.driver_data; + + dev_dbg(&dev->dev, "%s\n", dev->nodename); + + unregister_netdev(info->netdev); + + xennet_disconnect_backend(info); + + del_timer_sync(&info->rx_refill_timer); + + xennet_sysfs_delif(info->netdev); + + free_netdev(info->netdev); + + return 0; +} + +static struct xenbus_driver netfront = { + .name = "vif", + .owner = THIS_MODULE, + .ids = netfront_ids, + .probe = netfront_probe, + .remove = __devexit_p(xennet_remove), + .resume = netfront_resume, + .otherend_changed = backend_changed, +}; + +static int __init netif_init(void) +{ + if (!is_running_on_xen()) + return -ENODEV; + + if (is_initial_xendomain()) + return 0; + + printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); + + return xenbus_register_frontend(&netfront); +} +module_init(netif_init); + + +static void __exit netif_exit(void) +{ + if (is_initial_xendomain()) + return; + + return xenbus_unregister_driver(&netfront); +} +module_exit(netif_exit); + +MODULE_DESCRIPTION("Xen virtual network device frontend"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index f2a90a7fa2d6..870c5393c21a 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c @@ -1137,7 +1137,7 @@ static int yellowfin_rx(struct net_device *dev) if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header */ - eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); + skb_copy_to_linear_data(skb, rx_skb->data, pkt_len); skb_put(skb, pkt_len); pci_dma_sync_single_for_device(yp->pci_dev, desc->addr, yp->rx_buf_sz, |