diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-24 05:44:19 +0100 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-24 05:44:19 +0100 |
commit | 1ebbe2b20091d306453a5cf480a87e6cd28ae76f (patch) | |
tree | f5cd7a0fa69b8b1938cb5a0faed2e7b0628072a5 /drivers/net | |
parent | Merge branch 'linus' (diff) | |
parent | [PATCH] w1: use kthread api. (diff) | |
download | linux-1ebbe2b20091d306453a5cf480a87e6cd28ae76f.tar.xz linux-1ebbe2b20091d306453a5cf480a87e6cd28ae76f.zip |
Merge branch 'linus'
Diffstat (limited to 'drivers/net')
55 files changed, 5067 insertions, 3577 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index e58d4c50c2e1..f5ee064ab6b2 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -1605,7 +1605,7 @@ static void rtl8139_thread (void *_data) if (tp->watchdog_fired) { tp->watchdog_fired = 0; rtl8139_tx_timeout_task(_data); - } else if (rtnl_shlock_nowait() == 0) { + } else if (rtnl_trylock()) { rtl8139_thread_iter (dev, tp, tp->mmio_addr); rtnl_unlock (); } else { diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index e0b11095b9da..e20b849a22e8 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1914,7 +1914,7 @@ config E1000_DISABLE_PACKET_SPLIT depends on E1000 help Say Y here if you want to use the legacy receive path for PCI express - hadware. + hardware. If in doubt, say N. @@ -2172,6 +2172,7 @@ config BNX2 config SPIDER_NET tristate "Spider Gigabit Ethernet driver" depends on PCI && PPC_CELL + select FW_LOADER help This driver supports the Gigabit Ethernet chips present on the Cell Processor-Based Blades from IBM. diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 00e72b12fb92..b90468aea077 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -58,8 +58,8 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BNX2) += bnx2.o -spidernet-y += spider_net.o spider_net_ethtool.o sungem_phy.o -obj-$(CONFIG_SPIDER_NET) += spidernet.o +spidernet-y += spider_net.o spider_net_ethtool.o +obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o obj-$(CONFIG_TC35815) += tc35815.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 53e3afc1b7b7..09d5c3f26985 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -696,7 +696,9 @@ static int __init am79c961_probe(struct platform_device *pdev) dev->base_addr = res->start; dev->irq = platform_get_irq(pdev, 0); - ret = -ENODEV; + ret = -ENODEV; + if (dev->irq < 0) + goto nodev; if (!request_region(dev->base_addr, 0x18, dev->name)) goto nodev; diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index cd0b1dccfb61..1363083b4d83 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -90,8 +90,6 @@ static void au1000_tx_timeout(struct net_device *); static int au1000_set_config(struct net_device *dev, struct ifmap *map); static void set_rx_mode(struct net_device *); static struct net_device_stats *au1000_get_stats(struct net_device *); -static inline void update_tx_stats(struct net_device *, u32, u32); -static inline void update_rx_stats(struct net_device *, u32); static void au1000_timer(unsigned long); static int au1000_ioctl(struct net_device *, struct ifreq *, int); static int mdio_read(struct net_device *, int, int); @@ -1825,16 +1823,11 @@ static void __exit au1000_cleanup_module(void) } } - -static inline void -update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len) +static void update_tx_stats(struct net_device *dev, u32 status) { struct au1000_private *aup = (struct au1000_private *) dev->priv; struct net_device_stats *ps = &aup->stats; - ps->tx_packets++; - ps->tx_bytes += pkt_len; - if (status & TX_FRAME_ABORTED) { if (dev->if_port == IF_PORT_100BASEFX) { if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { @@ -1867,7 +1860,7 @@ static void au1000_tx_ack(struct net_device *dev) ptxd = aup->tx_dma_ring[aup->tx_tail]; while (ptxd->buff_stat & TX_T_DONE) { - update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff); + update_tx_stats(dev, ptxd->status); ptxd->buff_stat &= ~TX_T_DONE; ptxd->len = 0; au_sync(); @@ -1889,6 +1882,7 @@ static void au1000_tx_ack(struct net_device *dev) static int au1000_tx(struct sk_buff *skb, struct net_device *dev) { struct au1000_private *aup = (struct au1000_private *) dev->priv; + struct net_device_stats *ps = &aup->stats; volatile tx_dma_t *ptxd; u32 buff_stat; db_dest_t *pDB; @@ -1908,7 +1902,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) return 1; } else if (buff_stat & TX_T_DONE) { - update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff); + update_tx_stats(dev, ptxd->status); ptxd->len = 0; } @@ -1928,6 +1922,9 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) else ptxd->len = skb->len; + ps->tx_packets++; + ps->tx_bytes += ptxd->len; + ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; au_sync(); dev_kfree_skb(skb); @@ -1936,7 +1933,6 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) return 0; } - static inline void update_rx_stats(struct net_device *dev, u32 status) { struct au1000_private *aup = (struct au1000_private *) dev->priv; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index b787b6582e50..2671da20a496 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -9,13 +9,54 @@ * Written by: Michael Chan (mchan@broadcom.com) */ +#include <linux/config.h> + +#include <linux/module.h> +#include <linux/moduleparam.h> + +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <asm/bitops.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <linux/delay.h> +#include <asm/byteorder.h> +#include <linux/time.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#ifdef NETIF_F_HW_VLAN_TX +#include <linux/if_vlan.h> +#define BCM_VLAN 1 +#endif +#ifdef NETIF_F_TSO +#include <net/ip.h> +#include <net/tcp.h> +#include <net/checksum.h> +#define BCM_TSO 1 +#endif +#include <linux/workqueue.h> +#include <linux/crc32.h> +#include <linux/prefetch.h> +#include <linux/cache.h> + #include "bnx2.h" #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.4.31" -#define DRV_MODULE_RELDATE "January 19, 2006" +#define DRV_MODULE_VERSION "1.4.39" +#define DRV_MODULE_RELDATE "March 22, 2006" #define RUN_AT(x) (jiffies + (x)) @@ -313,8 +354,6 @@ bnx2_disable_int(struct bnx2 *bp) static void bnx2_enable_int(struct bnx2 *bp) { - u32 val; - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx); @@ -322,8 +361,7 @@ bnx2_enable_int(struct bnx2 *bp) REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx); - val = REG_RD(bp, BNX2_HC_COMMAND); - REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW); + REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); } static void @@ -360,15 +398,13 @@ bnx2_netif_start(struct bnx2 *bp) static void bnx2_free_mem(struct bnx2 *bp) { - if (bp->stats_blk) { - pci_free_consistent(bp->pdev, sizeof(struct statistics_block), - bp->stats_blk, bp->stats_blk_mapping); - bp->stats_blk = NULL; - } + int i; + if (bp->status_blk) { - pci_free_consistent(bp->pdev, sizeof(struct status_block), + pci_free_consistent(bp->pdev, bp->status_stats_size, bp->status_blk, bp->status_blk_mapping); bp->status_blk = NULL; + bp->stats_blk = NULL; } if (bp->tx_desc_ring) { pci_free_consistent(bp->pdev, @@ -378,25 +414,28 @@ bnx2_free_mem(struct bnx2 *bp) } kfree(bp->tx_buf_ring); bp->tx_buf_ring = NULL; - if (bp->rx_desc_ring) { - pci_free_consistent(bp->pdev, - sizeof(struct rx_bd) * RX_DESC_CNT, - bp->rx_desc_ring, bp->rx_desc_mapping); - bp->rx_desc_ring = NULL; - } - kfree(bp->rx_buf_ring); + for (i = 0; i < bp->rx_max_ring; i++) { + if (bp->rx_desc_ring[i]) + pci_free_consistent(bp->pdev, + sizeof(struct rx_bd) * RX_DESC_CNT, + bp->rx_desc_ring[i], + bp->rx_desc_mapping[i]); + bp->rx_desc_ring[i] = NULL; + } + vfree(bp->rx_buf_ring); bp->rx_buf_ring = NULL; } static int bnx2_alloc_mem(struct bnx2 *bp) { - bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT, - GFP_KERNEL); + int i, status_blk_size; + + bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT, + GFP_KERNEL); if (bp->tx_buf_ring == NULL) return -ENOMEM; - memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT); bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, sizeof(struct tx_bd) * TX_DESC_CNT, @@ -404,34 +443,40 @@ bnx2_alloc_mem(struct bnx2 *bp) if (bp->tx_desc_ring == NULL) goto alloc_mem_err; - bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT, - GFP_KERNEL); + bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT * + bp->rx_max_ring); if (bp->rx_buf_ring == NULL) goto alloc_mem_err; - memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT); - bp->rx_desc_ring = pci_alloc_consistent(bp->pdev, - sizeof(struct rx_bd) * - RX_DESC_CNT, - &bp->rx_desc_mapping); - if (bp->rx_desc_ring == NULL) - goto alloc_mem_err; + memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT * + bp->rx_max_ring); + + for (i = 0; i < bp->rx_max_ring; i++) { + bp->rx_desc_ring[i] = + pci_alloc_consistent(bp->pdev, + sizeof(struct rx_bd) * RX_DESC_CNT, + &bp->rx_desc_mapping[i]); + if (bp->rx_desc_ring[i] == NULL) + goto alloc_mem_err; + + } + + /* Combine status and statistics blocks into one allocation. */ + status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); + bp->status_stats_size = status_blk_size + + sizeof(struct statistics_block); - bp->status_blk = pci_alloc_consistent(bp->pdev, - sizeof(struct status_block), + bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size, &bp->status_blk_mapping); if (bp->status_blk == NULL) goto alloc_mem_err; - memset(bp->status_blk, 0, sizeof(struct status_block)); + memset(bp->status_blk, 0, bp->status_stats_size); - bp->stats_blk = pci_alloc_consistent(bp->pdev, - sizeof(struct statistics_block), - &bp->stats_blk_mapping); - if (bp->stats_blk == NULL) - goto alloc_mem_err; + bp->stats_blk = (void *) ((unsigned long) bp->status_blk + + status_blk_size); - memset(bp->stats_blk, 0, sizeof(struct statistics_block)); + bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; return 0; @@ -1520,7 +1565,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) struct sk_buff *skb; struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; dma_addr_t mapping; - struct rx_bd *rxbd = &bp->rx_desc_ring[index]; + struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; unsigned long align; skb = dev_alloc_skb(bp->rx_buf_size); @@ -1656,23 +1701,30 @@ static inline void bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, u16 cons, u16 prod) { - struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons]; - struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod]; - struct rx_bd *cons_bd = &bp->rx_desc_ring[cons]; - struct rx_bd *prod_bd = &bp->rx_desc_ring[prod]; + struct sw_bd *cons_rx_buf, *prod_rx_buf; + struct rx_bd *cons_bd, *prod_bd; + + cons_rx_buf = &bp->rx_buf_ring[cons]; + prod_rx_buf = &bp->rx_buf_ring[prod]; pci_dma_sync_single_for_device(bp->pdev, pci_unmap_addr(cons_rx_buf, mapping), bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); - prod_rx_buf->skb = cons_rx_buf->skb; - pci_unmap_addr_set(prod_rx_buf, mapping, - pci_unmap_addr(cons_rx_buf, mapping)); + bp->rx_prod_bseq += bp->rx_buf_use_size; - memcpy(prod_bd, cons_bd, 8); + prod_rx_buf->skb = skb; - bp->rx_prod_bseq += bp->rx_buf_use_size; + if (cons == prod) + return; + pci_unmap_addr_set(prod_rx_buf, mapping, + pci_unmap_addr(cons_rx_buf, mapping)); + + cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; + prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; + prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; } static int @@ -1699,14 +1751,19 @@ bnx2_rx_int(struct bnx2 *bp, int budget) u32 status; struct sw_bd *rx_buf; struct sk_buff *skb; + dma_addr_t dma_addr; sw_ring_cons = RX_RING_IDX(sw_cons); sw_ring_prod = RX_RING_IDX(sw_prod); rx_buf = &bp->rx_buf_ring[sw_ring_cons]; skb = rx_buf->skb; - pci_dma_sync_single_for_cpu(bp->pdev, - pci_unmap_addr(rx_buf, mapping), + + rx_buf->skb = NULL; + + dma_addr = pci_unmap_addr(rx_buf, mapping); + + pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); rx_hdr = (struct l2_fhdr *) skb->data; @@ -1747,8 +1804,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) skb = new_skb; } else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { - pci_unmap_single(bp->pdev, - pci_unmap_addr(rx_buf, mapping), + pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); skb_reserve(skb, bp->rx_offset); @@ -1794,8 +1850,6 @@ reuse_rx: rx_pkt++; next_rx: - rx_buf->skb = NULL; - sw_cons = NEXT_RX_BD(sw_cons); sw_prod = NEXT_RX_BD(sw_prod); @@ -1906,6 +1960,13 @@ bnx2_poll(struct net_device *dev, int *budget) spin_lock(&bp->phy_lock); bnx2_phy_int(bp); spin_unlock(&bp->phy_lock); + + /* This is needed to take care of transient status + * during link changes. + */ + REG_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_RD(bp, BNX2_HC_COMMAND); } if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) @@ -3287,6 +3348,8 @@ bnx2_init_chip(struct bnx2 *bp) udelay(20); + bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND); + return rc; } @@ -3340,27 +3403,35 @@ bnx2_init_rx_ring(struct bnx2 *bp) bp->hw_rx_cons = 0; bp->rx_prod_bseq = 0; - rxbd = &bp->rx_desc_ring[0]; - for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { - rxbd->rx_bd_len = bp->rx_buf_use_size; - rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; - } + for (i = 0; i < bp->rx_max_ring; i++) { + int j; - rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32; - rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff; + rxbd = &bp->rx_desc_ring[i][0]; + for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { + rxbd->rx_bd_len = bp->rx_buf_use_size; + rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; + } + if (i == (bp->rx_max_ring - 1)) + j = 0; + else + j = i + 1; + rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32; + rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] & + 0xffffffff; + } val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; val |= 0x02 << 8; CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val); - val = (u64) bp->rx_desc_mapping >> 32; + val = (u64) bp->rx_desc_mapping[0] >> 32; CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val); - val = (u64) bp->rx_desc_mapping & 0xffffffff; + val = (u64) bp->rx_desc_mapping[0] & 0xffffffff; CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); - for ( ;ring_prod < bp->rx_ring_size; ) { + for (i = 0; i < bp->rx_ring_size; i++) { if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { break; } @@ -3375,6 +3446,29 @@ bnx2_init_rx_ring(struct bnx2 *bp) } static void +bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) +{ + u32 num_rings, max; + + bp->rx_ring_size = size; + num_rings = 1; + while (size > MAX_RX_DESC_CNT) { + size -= MAX_RX_DESC_CNT; + num_rings++; + } + /* round to next power of 2 */ + max = MAX_RX_RINGS; + while ((max & num_rings) == 0) + max >>= 1; + + if (num_rings != max) + max <<= 1; + + bp->rx_max_ring = max; + bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; +} + +static void bnx2_free_tx_skbs(struct bnx2 *bp) { int i; @@ -3419,7 +3513,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp) if (bp->rx_buf_ring == NULL) return; - for (i = 0; i < RX_DESC_CNT; i++) { + for (i = 0; i < bp->rx_max_ring_idx; i++) { struct sw_bd *rx_buf = &bp->rx_buf_ring[i]; struct sk_buff *skb = rx_buf->skb; @@ -3506,74 +3600,9 @@ bnx2_test_registers(struct bnx2 *bp) { 0x0c00, 0, 0x00000000, 0x00000001 }, { 0x0c04, 0, 0x00000000, 0x03ff0001 }, { 0x0c08, 0, 0x0f0ff073, 0x00000000 }, - { 0x0c0c, 0, 0x00ffffff, 0x00000000 }, - { 0x0c30, 0, 0x00000000, 0xffffffff }, - { 0x0c34, 0, 0x00000000, 0xffffffff }, - { 0x0c38, 0, 0x00000000, 0xffffffff }, - { 0x0c3c, 0, 0x00000000, 0xffffffff }, - { 0x0c40, 0, 0x00000000, 0xffffffff }, - { 0x0c44, 0, 0x00000000, 0xffffffff }, - { 0x0c48, 0, 0x00000000, 0x0007ffff }, - { 0x0c4c, 0, 0x00000000, 0xffffffff }, - { 0x0c50, 0, 0x00000000, 0xffffffff }, - { 0x0c54, 0, 0x00000000, 0xffffffff }, - { 0x0c58, 0, 0x00000000, 0xffffffff }, - { 0x0c5c, 0, 0x00000000, 0xffffffff }, - { 0x0c60, 0, 0x00000000, 0xffffffff }, - { 0x0c64, 0, 0x00000000, 0xffffffff }, - { 0x0c68, 0, 0x00000000, 0xffffffff }, - { 0x0c6c, 0, 0x00000000, 0xffffffff }, - { 0x0c70, 0, 0x00000000, 0xffffffff }, - { 0x0c74, 0, 0x00000000, 0xffffffff }, - { 0x0c78, 0, 0x00000000, 0xffffffff }, - { 0x0c7c, 0, 0x00000000, 0xffffffff }, - { 0x0c80, 0, 0x00000000, 0xffffffff }, - { 0x0c84, 0, 0x00000000, 0xffffffff }, - { 0x0c88, 0, 0x00000000, 0xffffffff }, - { 0x0c8c, 0, 0x00000000, 0xffffffff }, - { 0x0c90, 0, 0x00000000, 0xffffffff }, - { 0x0c94, 0, 0x00000000, 0xffffffff }, - { 0x0c98, 0, 0x00000000, 0xffffffff }, - { 0x0c9c, 0, 0x00000000, 0xffffffff }, - { 0x0ca0, 0, 0x00000000, 0xffffffff }, - { 0x0ca4, 0, 0x00000000, 0xffffffff }, - { 0x0ca8, 0, 0x00000000, 0x0007ffff }, - { 0x0cac, 0, 0x00000000, 0xffffffff }, - { 0x0cb0, 0, 0x00000000, 0xffffffff }, - { 0x0cb4, 0, 0x00000000, 0xffffffff }, - { 0x0cb8, 0, 0x00000000, 0xffffffff }, - { 0x0cbc, 0, 0x00000000, 0xffffffff }, - { 0x0cc0, 0, 0x00000000, 0xffffffff }, - { 0x0cc4, 0, 0x00000000, 0xffffffff }, - { 0x0cc8, 0, 0x00000000, 0xffffffff }, - { 0x0ccc, 0, 0x00000000, 0xffffffff }, - { 0x0cd0, 0, 0x00000000, 0xffffffff }, - { 0x0cd4, 0, 0x00000000, 0xffffffff }, - { 0x0cd8, 0, 0x00000000, 0xffffffff }, - { 0x0cdc, 0, 0x00000000, 0xffffffff }, - { 0x0ce0, 0, 0x00000000, 0xffffffff }, - { 0x0ce4, 0, 0x00000000, 0xffffffff }, - { 0x0ce8, 0, 0x00000000, 0xffffffff }, - { 0x0cec, 0, 0x00000000, 0xffffffff }, - { 0x0cf0, 0, 0x00000000, 0xffffffff }, - { 0x0cf4, 0, 0x00000000, 0xffffffff }, - { 0x0cf8, 0, 0x00000000, 0xffffffff }, - { 0x0cfc, 0, 0x00000000, 0xffffffff }, - { 0x0d00, 0, 0x00000000, 0xffffffff }, - { 0x0d04, 0, 0x00000000, 0xffffffff }, { 0x1000, 0, 0x00000000, 0x00000001 }, { 0x1004, 0, 0x00000000, 0x000f0001 }, - { 0x1044, 0, 0x00000000, 0xffc003ff }, - { 0x1080, 0, 0x00000000, 0x0001ffff }, - { 0x1084, 0, 0x00000000, 0xffffffff }, - { 0x1088, 0, 0x00000000, 0xffffffff }, - { 0x108c, 0, 0x00000000, 0xffffffff }, - { 0x1090, 0, 0x00000000, 0xffffffff }, - { 0x1094, 0, 0x00000000, 0xffffffff }, - { 0x1098, 0, 0x00000000, 0xffffffff }, - { 0x109c, 0, 0x00000000, 0xffffffff }, - { 0x10a0, 0, 0x00000000, 0xffffffff }, { 0x1408, 0, 0x01c00800, 0x00000000 }, { 0x149c, 0, 0x8000ffff, 0x00000000 }, @@ -3585,111 +3614,9 @@ bnx2_test_registers(struct bnx2 *bp) { 0x14c4, 0, 0x00003fff, 0x00000000 }, { 0x14cc, 0, 0x00000000, 0x00000001 }, { 0x14d0, 0, 0xffffffff, 0x00000000 }, - { 0x1500, 0, 0x00000000, 0xffffffff }, - { 0x1504, 0, 0x00000000, 0xffffffff }, - { 0x1508, 0, 0x00000000, 0xffffffff }, - { 0x150c, 0, 0x00000000, 0xffffffff }, - { 0x1510, 0, 0x00000000, 0xffffffff }, - { 0x1514, 0, 0x00000000, 0xffffffff }, - { 0x1518, 0, 0x00000000, 0xffffffff }, - { 0x151c, 0, 0x00000000, 0xffffffff }, - { 0x1520, 0, 0x00000000, 0xffffffff }, - { 0x1524, 0, 0x00000000, 0xffffffff }, - { 0x1528, 0, 0x00000000, 0xffffffff }, - { 0x152c, 0, 0x00000000, 0xffffffff }, - { 0x1530, 0, 0x00000000, 0xffffffff }, - { 0x1534, 0, 0x00000000, 0xffffffff }, - { 0x1538, 0, 0x00000000, 0xffffffff }, - { 0x153c, 0, 0x00000000, 0xffffffff }, - { 0x1540, 0, 0x00000000, 0xffffffff }, - { 0x1544, 0, 0x00000000, 0xffffffff }, - { 0x1548, 0, 0x00000000, 0xffffffff }, - { 0x154c, 0, 0x00000000, 0xffffffff }, - { 0x1550, 0, 0x00000000, 0xffffffff }, - { 0x1554, 0, 0x00000000, 0xffffffff }, - { 0x1558, 0, 0x00000000, 0xffffffff }, - { 0x1600, 0, 0x00000000, 0xffffffff }, - { 0x1604, 0, 0x00000000, 0xffffffff }, - { 0x1608, 0, 0x00000000, 0xffffffff }, - { 0x160c, 0, 0x00000000, 0xffffffff }, - { 0x1610, 0, 0x00000000, 0xffffffff }, - { 0x1614, 0, 0x00000000, 0xffffffff }, - { 0x1618, 0, 0x00000000, 0xffffffff }, - { 0x161c, 0, 0x00000000, 0xffffffff }, - { 0x1620, 0, 0x00000000, 0xffffffff }, - { 0x1624, 0, 0x00000000, 0xffffffff }, - { 0x1628, 0, 0x00000000, 0xffffffff }, - { 0x162c, 0, 0x00000000, 0xffffffff }, - { 0x1630, 0, 0x00000000, 0xffffffff }, - { 0x1634, 0, 0x00000000, 0xffffffff }, - { 0x1638, 0, 0x00000000, 0xffffffff }, - { 0x163c, 0, 0x00000000, 0xffffffff }, - { 0x1640, 0, 0x00000000, 0xffffffff }, - { 0x1644, 0, 0x00000000, 0xffffffff }, - { 0x1648, 0, 0x00000000, 0xffffffff }, - { 0x164c, 0, 0x00000000, 0xffffffff }, - { 0x1650, 0, 0x00000000, 0xffffffff }, - { 0x1654, 0, 0x00000000, 0xffffffff }, { 0x1800, 0, 0x00000000, 0x00000001 }, { 0x1804, 0, 0x00000000, 0x00000003 }, - { 0x1840, 0, 0x00000000, 0xffffffff }, - { 0x1844, 0, 0x00000000, 0xffffffff }, - { 0x1848, 0, 0x00000000, 0xffffffff }, - { 0x184c, 0, 0x00000000, 0xffffffff }, - { 0x1850, 0, 0x00000000, 0xffffffff }, - { 0x1900, 0, 0x7ffbffff, 0x00000000 }, - { 0x1904, 0, 0xffffffff, 0x00000000 }, - { 0x190c, 0, 0xffffffff, 0x00000000 }, - { 0x1914, 0, 0xffffffff, 0x00000000 }, - { 0x191c, 0, 0xffffffff, 0x00000000 }, - { 0x1924, 0, 0xffffffff, 0x00000000 }, - { 0x192c, 0, 0xffffffff, 0x00000000 }, - { 0x1934, 0, 0xffffffff, 0x00000000 }, - { 0x193c, 0, 0xffffffff, 0x00000000 }, - { 0x1944, 0, 0xffffffff, 0x00000000 }, - { 0x194c, 0, 0xffffffff, 0x00000000 }, - { 0x1954, 0, 0xffffffff, 0x00000000 }, - { 0x195c, 0, 0xffffffff, 0x00000000 }, - { 0x1964, 0, 0xffffffff, 0x00000000 }, - { 0x196c, 0, 0xffffffff, 0x00000000 }, - { 0x1974, 0, 0xffffffff, 0x00000000 }, - { 0x197c, 0, 0xffffffff, 0x00000000 }, - { 0x1980, 0, 0x0700ffff, 0x00000000 }, - - { 0x1c00, 0, 0x00000000, 0x00000001 }, - { 0x1c04, 0, 0x00000000, 0x00000003 }, - { 0x1c08, 0, 0x0000000f, 0x00000000 }, - { 0x1c40, 0, 0x00000000, 0xffffffff }, - { 0x1c44, 0, 0x00000000, 0xffffffff }, - { 0x1c48, 0, 0x00000000, 0xffffffff }, - { 0x1c4c, 0, 0x00000000, 0xffffffff }, - { 0x1c50, 0, 0x00000000, 0xffffffff }, - { 0x1d00, 0, 0x7ffbffff, 0x00000000 }, - { 0x1d04, 0, 0xffffffff, 0x00000000 }, - { 0x1d0c, 0, 0xffffffff, 0x00000000 }, - { 0x1d14, 0, 0xffffffff, 0x00000000 }, - { 0x1d1c, 0, 0xffffffff, 0x00000000 }, - { 0x1d24, 0, 0xffffffff, 0x00000000 }, - { 0x1d2c, 0, 0xffffffff, 0x00000000 }, - { 0x1d34, 0, 0xffffffff, 0x00000000 }, - { 0x1d3c, 0, 0xffffffff, 0x00000000 }, - { 0x1d44, 0, 0xffffffff, 0x00000000 }, - { 0x1d4c, 0, 0xffffffff, 0x00000000 }, - { 0x1d54, 0, 0xffffffff, 0x00000000 }, - { 0x1d5c, 0, 0xffffffff, 0x00000000 }, - { 0x1d64, 0, 0xffffffff, 0x00000000 }, - { 0x1d6c, 0, 0xffffffff, 0x00000000 }, - { 0x1d74, 0, 0xffffffff, 0x00000000 }, - { 0x1d7c, 0, 0xffffffff, 0x00000000 }, - { 0x1d80, 0, 0x0700ffff, 0x00000000 }, - - { 0x2004, 0, 0x00000000, 0x0337000f }, - { 0x2008, 0, 0xffffffff, 0x00000000 }, - { 0x200c, 0, 0xffffffff, 0x00000000 }, - { 0x2010, 0, 0xffffffff, 0x00000000 }, - { 0x2014, 0, 0x801fff80, 0x00000000 }, - { 0x2018, 0, 0x000003ff, 0x00000000 }, { 0x2800, 0, 0x00000000, 0x00000001 }, { 0x2804, 0, 0x00000000, 0x00003f01 }, @@ -3707,16 +3634,6 @@ bnx2_test_registers(struct bnx2 *bp) { 0x2c00, 0, 0x00000000, 0x00000011 }, { 0x2c04, 0, 0x00000000, 0x00030007 }, - { 0x3000, 0, 0x00000000, 0x00000001 }, - { 0x3004, 0, 0x00000000, 0x007007ff }, - { 0x3008, 0, 0x00000003, 0x00000000 }, - { 0x300c, 0, 0xffffffff, 0x00000000 }, - { 0x3010, 0, 0xffffffff, 0x00000000 }, - { 0x3014, 0, 0xffffffff, 0x00000000 }, - { 0x3034, 0, 0xffffffff, 0x00000000 }, - { 0x3038, 0, 0xffffffff, 0x00000000 }, - { 0x3050, 0, 0x00000001, 0x00000000 }, - { 0x3c00, 0, 0x00000000, 0x00000001 }, { 0x3c04, 0, 0x00000000, 0x00070000 }, { 0x3c08, 0, 0x00007f71, 0x07f00000 }, @@ -3726,88 +3643,11 @@ bnx2_test_registers(struct bnx2 *bp) { 0x3c18, 0, 0x00000000, 0xffffffff }, { 0x3c1c, 0, 0xfffff000, 0x00000000 }, { 0x3c20, 0, 0xffffff00, 0x00000000 }, - { 0x3c24, 0, 0xffffffff, 0x00000000 }, - { 0x3c28, 0, 0xffffffff, 0x00000000 }, - { 0x3c2c, 0, 0xffffffff, 0x00000000 }, - { 0x3c30, 0, 0xffffffff, 0x00000000 }, - { 0x3c34, 0, 0xffffffff, 0x00000000 }, - { 0x3c38, 0, 0xffffffff, 0x00000000 }, - { 0x3c3c, 0, 0xffffffff, 0x00000000 }, - { 0x3c40, 0, 0xffffffff, 0x00000000 }, - { 0x3c44, 0, 0xffffffff, 0x00000000 }, - { 0x3c48, 0, 0xffffffff, 0x00000000 }, - { 0x3c4c, 0, 0xffffffff, 0x00000000 }, - { 0x3c50, 0, 0xffffffff, 0x00000000 }, - { 0x3c54, 0, 0xffffffff, 0x00000000 }, - { 0x3c58, 0, 0xffffffff, 0x00000000 }, - { 0x3c5c, 0, 0xffffffff, 0x00000000 }, - { 0x3c60, 0, 0xffffffff, 0x00000000 }, - { 0x3c64, 0, 0xffffffff, 0x00000000 }, - { 0x3c68, 0, 0xffffffff, 0x00000000 }, - { 0x3c6c, 0, 0xffffffff, 0x00000000 }, - { 0x3c70, 0, 0xffffffff, 0x00000000 }, - { 0x3c74, 0, 0x0000003f, 0x00000000 }, - { 0x3c78, 0, 0x00000000, 0x00000000 }, - { 0x3c7c, 0, 0x00000000, 0x00000000 }, - { 0x3c80, 0, 0x3fffffff, 0x00000000 }, - { 0x3c84, 0, 0x0000003f, 0x00000000 }, - { 0x3c88, 0, 0x00000000, 0xffffffff }, - { 0x3c8c, 0, 0x00000000, 0xffffffff }, - - { 0x4000, 0, 0x00000000, 0x00000001 }, - { 0x4004, 0, 0x00000000, 0x00030000 }, - { 0x4008, 0, 0x00000ff0, 0x00000000 }, - { 0x400c, 0, 0xffffffff, 0x00000000 }, - { 0x4088, 0, 0x00000000, 0x00070303 }, - - { 0x4400, 0, 0x00000000, 0x00000001 }, - { 0x4404, 0, 0x00000000, 0x00003f01 }, - { 0x4408, 0, 0x7fff00ff, 0x00000000 }, - { 0x440c, 0, 0xffffffff, 0x00000000 }, - { 0x4410, 0, 0xffff, 0x0000 }, - { 0x4414, 0, 0xffff, 0x0000 }, - { 0x4418, 0, 0xffff, 0x0000 }, - { 0x441c, 0, 0xffff, 0x0000 }, - { 0x4428, 0, 0xffffffff, 0x00000000 }, - { 0x442c, 0, 0xffffffff, 0x00000000 }, - { 0x4430, 0, 0xffffffff, 0x00000000 }, - { 0x4434, 0, 0xffffffff, 0x00000000 }, - { 0x4438, 0, 0xffffffff, 0x00000000 }, - { 0x443c, 0, 0xffffffff, 0x00000000 }, - { 0x4440, 0, 0xffffffff, 0x00000000 }, - { 0x4444, 0, 0xffffffff, 0x00000000 }, - - { 0x4c00, 0, 0x00000000, 0x00000001 }, - { 0x4c04, 0, 0x00000000, 0x0000003f }, - { 0x4c08, 0, 0xffffffff, 0x00000000 }, - { 0x4c0c, 0, 0x0007fc00, 0x00000000 }, - { 0x4c10, 0, 0x80003fe0, 0x00000000 }, - { 0x4c14, 0, 0xffffffff, 0x00000000 }, - { 0x4c44, 0, 0x00000000, 0x9fff9fff }, - { 0x4c48, 0, 0x00000000, 0xb3009fff }, - { 0x4c4c, 0, 0x00000000, 0x77f33b30 }, - { 0x4c50, 0, 0x00000000, 0xffffffff }, { 0x5004, 0, 0x00000000, 0x0000007f }, { 0x5008, 0, 0x0f0007ff, 0x00000000 }, { 0x500c, 0, 0xf800f800, 0x07ff07ff }, - { 0x5400, 0, 0x00000008, 0x00000001 }, - { 0x5404, 0, 0x00000000, 0x0000003f }, - { 0x5408, 0, 0x0000001f, 0x00000000 }, - { 0x540c, 0, 0xffffffff, 0x00000000 }, - { 0x5410, 0, 0xffffffff, 0x00000000 }, - { 0x5414, 0, 0x0000ffff, 0x00000000 }, - { 0x5418, 0, 0x0000ffff, 0x00000000 }, - { 0x541c, 0, 0x0000ffff, 0x00000000 }, - { 0x5420, 0, 0x0000ffff, 0x00000000 }, - { 0x5428, 0, 0x000000ff, 0x00000000 }, - { 0x542c, 0, 0xff00ffff, 0x00000000 }, - { 0x5430, 0, 0x001fff80, 0x00000000 }, - { 0x5438, 0, 0xffffffff, 0x00000000 }, - { 0x543c, 0, 0xffffffff, 0x00000000 }, - { 0x5440, 0, 0xf800f800, 0x07ff07ff }, - { 0x5c00, 0, 0x00000000, 0x00000001 }, { 0x5c04, 0, 0x00000000, 0x0003000f }, { 0x5c08, 0, 0x00000003, 0x00000000 }, @@ -3949,7 +3789,6 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) struct sk_buff *skb, *rx_skb; unsigned char *packet; u16 rx_start_idx, rx_idx; - u32 val; dma_addr_t map; struct tx_bd *txbd; struct sw_bd *rx_buf; @@ -3980,8 +3819,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) map = pci_map_single(bp->pdev, skb->data, pkt_size, PCI_DMA_TODEVICE); - val = REG_RD(bp, BNX2_HC_COMMAND); - REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_RD(bp, BNX2_HC_COMMAND); udelay(5); @@ -4005,8 +3845,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) udelay(100); - val = REG_RD(bp, BNX2_HC_COMMAND); - REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + REG_RD(bp, BNX2_HC_COMMAND); udelay(5); @@ -4142,7 +3983,6 @@ static int bnx2_test_intr(struct bnx2 *bp) { int i; - u32 val; u16 status_idx; if (!netif_running(bp->dev)) @@ -4151,8 +3991,7 @@ bnx2_test_intr(struct bnx2 *bp) status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; /* This register is not touched during run-time. */ - val = REG_RD(bp, BNX2_HC_COMMAND); - REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW); + REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); REG_RD(bp, BNX2_HC_COMMAND); for (i = 0; i < 10; i++) { @@ -4794,6 +4633,64 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) info->fw_version[5] = 0; } +#define BNX2_REGDUMP_LEN (32 * 1024) + +static int +bnx2_get_regs_len(struct net_device *dev) +{ + return BNX2_REGDUMP_LEN; +} + +static void +bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) +{ + u32 *p = _p, i, offset; + u8 *orig_p = _p; + struct bnx2 *bp = netdev_priv(dev); + u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c, + 0x0800, 0x0880, 0x0c00, 0x0c10, + 0x0c30, 0x0d08, 0x1000, 0x101c, + 0x1040, 0x1048, 0x1080, 0x10a4, + 0x1400, 0x1490, 0x1498, 0x14f0, + 0x1500, 0x155c, 0x1580, 0x15dc, + 0x1600, 0x1658, 0x1680, 0x16d8, + 0x1800, 0x1820, 0x1840, 0x1854, + 0x1880, 0x1894, 0x1900, 0x1984, + 0x1c00, 0x1c0c, 0x1c40, 0x1c54, + 0x1c80, 0x1c94, 0x1d00, 0x1d84, + 0x2000, 0x2030, 0x23c0, 0x2400, + 0x2800, 0x2820, 0x2830, 0x2850, + 0x2b40, 0x2c10, 0x2fc0, 0x3058, + 0x3c00, 0x3c94, 0x4000, 0x4010, + 0x4080, 0x4090, 0x43c0, 0x4458, + 0x4c00, 0x4c18, 0x4c40, 0x4c54, + 0x4fc0, 0x5010, 0x53c0, 0x5444, + 0x5c00, 0x5c18, 0x5c80, 0x5c90, + 0x5fc0, 0x6000, 0x6400, 0x6428, + 0x6800, 0x6848, 0x684c, 0x6860, + 0x6888, 0x6910, 0x8000 }; + + regs->version = 0; + + memset(p, 0, BNX2_REGDUMP_LEN); + + if (!netif_running(bp->dev)) + return; + + i = 0; + offset = reg_boundaries[0]; + p += offset; + while (offset < BNX2_REGDUMP_LEN) { + *p++ = REG_RD(bp, offset); + offset += 4; + if (offset == reg_boundaries[i + 1]) { + offset = reg_boundaries[i + 2]; + p = (u32 *) (orig_p + offset); + i += 2; + } + } +} + static void bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { @@ -4979,7 +4876,7 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bnx2 *bp = netdev_priv(dev); - ering->rx_max_pending = MAX_RX_DESC_CNT; + ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT; ering->rx_mini_max_pending = 0; ering->rx_jumbo_max_pending = 0; @@ -4996,17 +4893,28 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bnx2 *bp = netdev_priv(dev); - if ((ering->rx_pending > MAX_RX_DESC_CNT) || + if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) || (ering->tx_pending > MAX_TX_DESC_CNT) || (ering->tx_pending <= MAX_SKB_FRAGS)) { return -EINVAL; } - bp->rx_ring_size = ering->rx_pending; + if (netif_running(bp->dev)) { + bnx2_netif_stop(bp); + bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); + bnx2_free_skbs(bp); + bnx2_free_mem(bp); + } + + bnx2_set_rx_ring_size(bp, ering->rx_pending); bp->tx_ring_size = ering->tx_pending; if (netif_running(bp->dev)) { - bnx2_netif_stop(bp); + int rc; + + rc = bnx2_alloc_mem(bp); + if (rc) + return rc; bnx2_init_nic(bp); bnx2_netif_start(bp); } @@ -5360,6 +5268,8 @@ static struct ethtool_ops bnx2_ethtool_ops = { .get_settings = bnx2_get_settings, .set_settings = bnx2_set_settings, .get_drvinfo = bnx2_get_drvinfo, + .get_regs_len = bnx2_get_regs_len, + .get_regs = bnx2_get_regs, .get_wol = bnx2_get_wol, .set_wol = bnx2_set_wol, .nway_reset = bnx2_nway_reset, @@ -5678,7 +5588,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->mac_addr[5] = (u8) reg; bp->tx_ring_size = MAX_TX_DESC_CNT; - bp->rx_ring_size = 100; + bnx2_set_rx_ring_size(bp, 100); bp->rx_csum = 1; @@ -5897,6 +5807,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state) if (!netif_running(dev)) return 0; + flush_scheduled_work(); bnx2_netif_stop(bp); netif_device_detach(dev); del_timer_sync(&bp->timer); diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 9f691cbd666b..b87925f6a228 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -13,45 +13,6 @@ #ifndef BNX2_H #define BNX2_H -#include <linux/config.h> - -#include <linux/module.h> -#include <linux/moduleparam.h> - -#include <linux/kernel.h> -#include <linux/timer.h> -#include <linux/errno.h> -#include <linux/ioport.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/pci.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/dma-mapping.h> -#include <asm/bitops.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <linux/delay.h> -#include <asm/byteorder.h> -#include <linux/time.h> -#include <linux/ethtool.h> -#include <linux/mii.h> -#ifdef NETIF_F_HW_VLAN_TX -#include <linux/if_vlan.h> -#define BCM_VLAN 1 -#endif -#ifdef NETIF_F_TSO -#include <net/ip.h> -#include <net/tcp.h> -#include <net/checksum.h> -#define BCM_TSO 1 -#endif -#include <linux/workqueue.h> -#include <linux/crc32.h> -#include <linux/prefetch.h> - /* Hardware data structures and register definitions automatically * generated from RTL code. Do not modify. */ @@ -3792,8 +3753,10 @@ struct l2_fhdr { #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) +#define MAX_RX_RINGS 4 #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) #define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) +#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS) #define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \ (MAX_TX_DESC_CNT - 1)) ? \ @@ -3805,8 +3768,10 @@ struct l2_fhdr { (MAX_RX_DESC_CNT - 1)) ? \ (x) + 2 : (x) + 1 -#define RX_RING_IDX(x) ((x) & MAX_RX_DESC_CNT) +#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx) +#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> 8) +#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT) /* Context size. */ #define CTX_SHIFT 7 @@ -3903,15 +3868,26 @@ struct bnx2 { struct status_block *status_blk; u32 last_status_idx; - struct tx_bd *tx_desc_ring; - struct sw_bd *tx_buf_ring; - u32 tx_prod_bseq; - u16 tx_prod; - u16 tx_cons; - int tx_ring_size; + u32 flags; +#define PCIX_FLAG 1 +#define PCI_32BIT_FLAG 2 +#define ONE_TDMA_FLAG 4 /* no longer used */ +#define NO_WOL_FLAG 8 +#define USING_DAC_FLAG 0x10 +#define USING_MSI_FLAG 0x20 +#define ASF_ENABLE_FLAG 0x40 - u16 hw_tx_cons; - u16 hw_rx_cons; + /* Put tx producer and consumer fields in separate cache lines. */ + + u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); + u16 tx_prod; + + struct tx_bd *tx_desc_ring; + struct sw_bd *tx_buf_ring; + int tx_ring_size; + + u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); + u16 hw_tx_cons; #ifdef BCM_VLAN struct vlan_group *vlgrp; @@ -3920,19 +3896,23 @@ struct bnx2 { u32 rx_offset; u32 rx_buf_use_size; /* useable size */ u32 rx_buf_size; /* with alignment */ - struct rx_bd *rx_desc_ring; - struct sw_bd *rx_buf_ring; + u32 rx_max_ring_idx; + u32 rx_prod_bseq; u16 rx_prod; u16 rx_cons; + u16 hw_rx_cons; u32 rx_csum; + struct sw_bd *rx_buf_ring; + struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; + /* Only used to synchronize netif_stop_queue/wake_queue when tx */ /* ring is full */ spinlock_t tx_lock; - /* End of fileds used in the performance code paths. */ + /* End of fields used in the performance code paths. */ char *name; @@ -3945,15 +3925,6 @@ struct bnx2 { /* Used to synchronize phy accesses. */ spinlock_t phy_lock; - u32 flags; -#define PCIX_FLAG 1 -#define PCI_32BIT_FLAG 2 -#define ONE_TDMA_FLAG 4 /* no longer used */ -#define NO_WOL_FLAG 8 -#define USING_DAC_FLAG 0x10 -#define USING_MSI_FLAG 0x20 -#define ASF_ENABLE_FLAG 0x40 - u32 phy_flags; #define PHY_SERDES_FLAG 1 #define PHY_CRC_FIX_FLAG 2 @@ -4004,8 +3975,9 @@ struct bnx2 { dma_addr_t tx_desc_mapping; + int rx_max_ring; int rx_ring_size; - dma_addr_t rx_desc_mapping; + dma_addr_t rx_desc_mapping[MAX_RX_RINGS]; u16 tx_quick_cons_trip; u16 tx_quick_cons_trip_int; @@ -4029,6 +4001,7 @@ struct bnx2 { struct statistics_block *stats_blk; dma_addr_t stats_blk_mapping; + u32 hc_cmd; u32 rx_mode; u16 req_line_speed; @@ -4073,6 +4046,8 @@ struct bnx2 { struct flash_spec *flash_info; u32 flash_size; + + int status_stats_size; }; static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset); diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 6e295fce5c6f..8f1573e658a5 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -91,6 +91,7 @@ #include <linux/mii.h> #include <linux/ip.h> #include <linux/tcp.h> +#include <linux/mutex.h> #include <net/checksum.h> @@ -3892,7 +3893,7 @@ static void cas_reset(struct cas *cp, int blkflag) spin_unlock(&cp->stat_lock[N_TX_RINGS]); } -/* Shut down the chip, must be called with pm_sem held. */ +/* Shut down the chip, must be called with pm_mutex held. */ static void cas_shutdown(struct cas *cp) { unsigned long flags; @@ -4311,11 +4312,11 @@ static int cas_open(struct net_device *dev) int hw_was_up, err; unsigned long flags; - down(&cp->pm_sem); + mutex_lock(&cp->pm_mutex); hw_was_up = cp->hw_running; - /* The power-management semaphore protects the hw_running + /* The power-management mutex protects the hw_running * etc. state so it is safe to do this bit without cp->lock */ if (!cp->hw_running) { @@ -4364,7 +4365,7 @@ static int cas_open(struct net_device *dev) cas_unlock_all_restore(cp, flags); netif_start_queue(dev); - up(&cp->pm_sem); + mutex_unlock(&cp->pm_mutex); return 0; err_spare: @@ -4372,7 +4373,7 @@ err_spare: cas_free_rxds(cp); err_tx_tiny: cas_tx_tiny_free(cp); - up(&cp->pm_sem); + mutex_unlock(&cp->pm_mutex); return err; } @@ -4382,7 +4383,7 @@ static int cas_close(struct net_device *dev) struct cas *cp = netdev_priv(dev); /* Make sure we don't get distracted by suspend/resume */ - down(&cp->pm_sem); + mutex_lock(&cp->pm_mutex); netif_stop_queue(dev); @@ -4399,7 +4400,7 @@ static int cas_close(struct net_device *dev) cas_spare_free(cp); cas_free_rxds(cp); cas_tx_tiny_free(cp); - up(&cp->pm_sem); + mutex_unlock(&cp->pm_mutex); return 0; } @@ -4834,10 +4835,10 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) unsigned long flags; int rc = -EOPNOTSUPP; - /* Hold the PM semaphore while doing ioctl's or we may collide + /* Hold the PM mutex while doing ioctl's or we may collide * with open/close and power management and oops. */ - down(&cp->pm_sem); + mutex_lock(&cp->pm_mutex); switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = cp->phy_addr; @@ -4867,7 +4868,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; }; - up(&cp->pm_sem); + mutex_unlock(&cp->pm_mutex); return rc; } @@ -4994,7 +4995,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, spin_lock_init(&cp->tx_lock[i]); } spin_lock_init(&cp->stat_lock[N_TX_RINGS]); - init_MUTEX(&cp->pm_sem); + mutex_init(&cp->pm_mutex); init_timer(&cp->link_timer); cp->link_timer.function = cas_link_timer; @@ -5116,10 +5117,10 @@ err_out_free_consistent: cp->init_block, cp->block_dvma); err_out_iounmap: - down(&cp->pm_sem); + mutex_lock(&cp->pm_mutex); if (cp->hw_running) cas_shutdown(cp); - up(&cp->pm_sem); + mutex_unlock(&cp->pm_mutex); iounmap(cp->regs); @@ -5152,11 +5153,11 @@ static void __devexit cas_remove_one(struct pci_dev *pdev) cp = netdev_priv(dev); unregister_netdev(dev); - down(&cp->pm_sem); + mutex_lock(&cp->pm_mutex); flush_scheduled_work(); if (cp->hw_running) cas_shutdown(cp); - up(&cp->pm_sem); + mutex_unlock(&cp->pm_mutex); #if 1 if (cp->orig_cacheline_size) { @@ -5183,10 +5184,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state) struct cas *cp = netdev_priv(dev); unsigned long flags; - /* We hold the PM semaphore during entire driver - * sleep time - */ - down(&cp->pm_sem); + mutex_lock(&cp->pm_mutex); /* If the driver is opened, we stop the DMA */ if (cp->opened) { @@ -5206,6 +5204,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state) if (cp->hw_running) cas_shutdown(cp); + mutex_unlock(&cp->pm_mutex); return 0; } @@ -5217,6 +5216,7 @@ static int cas_resume(struct pci_dev *pdev) printk(KERN_INFO "%s: resuming\n", dev->name); + mutex_lock(&cp->pm_mutex); cas_hard_reset(cp); if (cp->opened) { unsigned long flags; @@ -5229,7 +5229,7 @@ static int cas_resume(struct pci_dev *pdev) netif_device_attach(dev); } - up(&cp->pm_sem); + mutex_unlock(&cp->pm_mutex); return 0; } #endif /* CONFIG_PM */ diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h index 88063ef16cf6..ab55c7ee1012 100644 --- a/drivers/net/cassini.h +++ b/drivers/net/cassini.h @@ -4284,7 +4284,7 @@ struct cas { * (ie. not power managed) */ int hw_running; int opened; - struct semaphore pm_sem; /* open/close/suspend/resume */ + struct mutex pm_mutex; /* open/close/suspend/resume */ struct cas_init_block *init_block; struct cas_tx_desc *init_txds[MAX_TX_RINGS]; diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 03804cc38be0..0941d40f046f 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -1412,7 +1412,7 @@ static int __init depca_mca_probe(struct device *device) irq = 11; break; default: - printk("%s: mca_probe IRQ error. You should never get here (%d).\n", dev->name, where); + printk("%s: mca_probe IRQ error. You should never get here (%d).\n", mdev->name, where); return -EINVAL; } diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index f39de16e6b97..49cd096a3c3d 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -920,7 +920,7 @@ e1000_remove(struct pci_dev *pdev) unregister_netdev(netdev); #ifdef CONFIG_E1000_NAPI for (i = 0; i < adapter->num_rx_queues; i++) - __dev_put(&adapter->polling_netdev[i]); + dev_put(&adapter->polling_netdev[i]); #endif if (!e1000_check_phy_reset_block(&adapter->hw)) diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index e67b1d06611c..95e2bb8dd7b4 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -118,6 +118,8 @@ static int do_pd_setup(struct fs_enet_private *fep) /* Fill out IRQ field */ fep->interrupt = platform_get_irq(pdev, 0); + if (fep->interrupt < 0) + return -EINVAL; /* Attach the memory for the FCC Parameter RAM */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index 2e8f44469699..3dad69dfdb2c 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c @@ -144,6 +144,8 @@ static int do_pd_setup(struct fs_enet_private *fep) /* Fill out IRQ field */ fep->interrupt = platform_get_irq_byname(pdev,"interrupt"); + if (fep->interrupt < 0) + return -EINVAL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); fep->fec.fecp =(void*)r->start; diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index a3897fda71fa..a772b286f96d 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -118,6 +118,8 @@ static int do_pd_setup(struct fs_enet_private *fep) /* Fill out IRQ field */ fep->interrupt = platform_get_irq_byname(pdev, "interrupt"); + if (fep->interrupt < 0) + return -EINVAL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); fep->scc.sccp = (void *)r->start; diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 0e8e3fcde9ff..771e25d8c417 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -193,8 +193,12 @@ static int gfar_probe(struct platform_device *pdev) priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); priv->interruptError = platform_get_irq_byname(pdev, "error"); + if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) + goto regs_fail; } else { priv->interruptTransmit = platform_get_irq(pdev, 0); + if (priv->interruptTransmit < 0) + goto regs_fail; } /* get a pointer to the register memory */ diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index c81fe1c382d5..5e6d00752990 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -64,6 +64,14 @@ config TEKRAM_DONGLE dongles you will have to start irattach like this: "irattach -d tekram". +config TOIM3232_DONGLE + tristate "TOIM3232 IrDa dongle" + depends on DONGLE && IRDA + help + Say Y here if you want to build support for the Vishay/Temic + TOIM3232 and TOIM4232 based dongles. + To compile it as a module, choose M here. + config LITELINK_DONGLE tristate "Parallax LiteLink dongle" depends on DONGLE && IRDA diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index 72cbfdc9cfcc..27ab75f20799 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_OLD_BELKIN_DONGLE) += old_belkin-sir.o obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o +obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o # The SIR helper module sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 3137592d60c0..910c0cab35b0 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -1778,7 +1778,7 @@ static struct pci_driver donauboe_pci_driver = { static int __init donauboe_init (void) { - return pci_module_init(&donauboe_pci_driver); + return pci_register_driver(&donauboe_pci_driver); } static void __exit diff --git a/drivers/net/irda/ep7211_ir.c b/drivers/net/irda/ep7211_ir.c index 31896262d21c..4cba38f7e4a8 100644 --- a/drivers/net/irda/ep7211_ir.c +++ b/drivers/net/irda/ep7211_ir.c @@ -8,6 +8,7 @@ #include <linux/delay.h> #include <linux/tty.h> #include <linux/init.h> +#include <linux/spinlock.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> @@ -23,6 +24,8 @@ static void ep7211_ir_close(dongle_t *self); static int ep7211_ir_change_speed(struct irda_task *task); static int ep7211_ir_reset(struct irda_task *task); +static DEFINE_SPINLOCK(ep7211_lock); + static struct dongle_reg dongle = { .type = IRDA_EP7211_IR, .open = ep7211_ir_open, @@ -36,7 +39,7 @@ static void ep7211_ir_open(dongle_t *self, struct qos_info *qos) { unsigned int syscon1, flags; - save_flags(flags); cli(); + spin_lock_irqsave(&ep7211_lock, flags); /* Turn on the SIR encoder. */ syscon1 = clps_readl(SYSCON1); @@ -46,14 +49,14 @@ static void ep7211_ir_open(dongle_t *self, struct qos_info *qos) /* XXX: We should disable modem status interrupts on the first UART (interrupt #14). */ - restore_flags(flags); + spin_unlock_irqrestore(&ep7211_lock, flags); } static void ep7211_ir_close(dongle_t *self) { unsigned int syscon1, flags; - save_flags(flags); cli(); + spin_lock_irqsave(&ep7211_lock, flags); /* Turn off the SIR encoder. */ syscon1 = clps_readl(SYSCON1); @@ -63,7 +66,7 @@ static void ep7211_ir_close(dongle_t *self) /* XXX: If we've disabled the modem status interrupts, we should reset them back to their original state. */ - restore_flags(flags); + spin_unlock_irqrestore(&ep7211_lock, flags); } /* diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 8936058a3cce..6e2ec56cde0b 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -740,7 +740,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs) struct sk_buff *newskb; struct sk_buff *dataskb; struct urb *next_urb; - int docopy; + unsigned int len, docopy; IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); @@ -851,10 +851,11 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs) dataskb->dev = self->netdev; dataskb->mac.raw = dataskb->data; dataskb->protocol = htons(ETH_P_IRDA); + len = dataskb->len; netif_rx(dataskb); /* Keep stats up to date */ - self->stats.rx_bytes += dataskb->len; + self->stats.rx_bytes += len; self->stats.rx_packets++; self->netdev->last_rx = jiffies; diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index 101750bf210f..6a98b7ae4975 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c @@ -33,6 +33,7 @@ #include <asm/uaccess.h> #include <linux/smp_lock.h> #include <linux/delay.h> +#include <linux/mutex.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> @@ -338,7 +339,7 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop) /*****************************************************************/ /* serialize ldisc open/close with sir_dev */ -static DECLARE_MUTEX(irtty_sem); +static DEFINE_MUTEX(irtty_mutex); /* notifier from sir_dev when irda% device gets opened (ifup) */ @@ -348,11 +349,11 @@ static int irtty_start_dev(struct sir_dev *dev) struct tty_struct *tty; /* serialize with ldisc open/close */ - down(&irtty_sem); + mutex_lock(&irtty_mutex); priv = dev->priv; if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) { - up(&irtty_sem); + mutex_unlock(&irtty_mutex); return -ESTALE; } @@ -363,7 +364,7 @@ static int irtty_start_dev(struct sir_dev *dev) /* Make sure we can receive more data */ irtty_stop_receiver(tty, FALSE); - up(&irtty_sem); + mutex_unlock(&irtty_mutex); return 0; } @@ -375,11 +376,11 @@ static int irtty_stop_dev(struct sir_dev *dev) struct tty_struct *tty; /* serialize with ldisc open/close */ - down(&irtty_sem); + mutex_lock(&irtty_mutex); priv = dev->priv; if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) { - up(&irtty_sem); + mutex_unlock(&irtty_mutex); return -ESTALE; } @@ -390,7 +391,7 @@ static int irtty_stop_dev(struct sir_dev *dev) if (tty->driver->stop) tty->driver->stop(tty); - up(&irtty_sem); + mutex_unlock(&irtty_mutex); return 0; } @@ -514,13 +515,13 @@ static int irtty_open(struct tty_struct *tty) priv->dev = dev; /* serialize with start_dev - in case we were racing with ifup */ - down(&irtty_sem); + mutex_lock(&irtty_mutex); dev->priv = priv; tty->disc_data = priv; tty->receive_room = 65536; - up(&irtty_sem); + mutex_unlock(&irtty_mutex); IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name); diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index ee717d0e939e..83141a3ff546 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -12,6 +12,7 @@ * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no> * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com> * Copyright (c) 1998 Actisys Corp., www.actisys.com + * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com> * All Rights Reserved * * This program is free software; you can redistribute it and/or @@ -53,14 +54,13 @@ #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/dma-mapping.h> +#include <linux/pnp.h> +#include <linux/platform_device.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> -#include <linux/pm.h> -#include <linux/pm_legacy.h> - #include <net/irda/wrapper.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> @@ -72,14 +72,27 @@ static char *driver_name = "nsc-ircc"; +/* Power Management */ +#define NSC_IRCC_DRIVER_NAME "nsc-ircc" +static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state); +static int nsc_ircc_resume(struct platform_device *dev); + +static struct platform_driver nsc_ircc_driver = { + .suspend = nsc_ircc_suspend, + .resume = nsc_ircc_resume, + .driver = { + .name = NSC_IRCC_DRIVER_NAME, + }, +}; + /* Module parameters */ static int qos_mtt_bits = 0x07; /* 1 ms or more */ static int dongle_id; /* Use BIOS settions by default, but user may supply module parameters */ -static unsigned int io[] = { ~0, ~0, ~0, ~0 }; -static unsigned int irq[] = { 0, 0, 0, 0, 0 }; -static unsigned int dma[] = { 0, 0, 0, 0, 0 }; +static unsigned int io[] = { ~0, ~0, ~0, ~0, ~0 }; +static unsigned int irq[] = { 0, 0, 0, 0, 0 }; +static unsigned int dma[] = { 0, 0, 0, 0, 0 }; static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info); @@ -87,6 +100,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); +static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id); /* These are the known NSC chips */ static nsc_chip_t chips[] = { @@ -101,11 +115,12 @@ static nsc_chip_t chips[] = { /* Contributed by Jan Frey - IBM A30/A31 */ { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff, nsc_ircc_probe_39x, nsc_ircc_init_39x }, + { "IBM", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff, + nsc_ircc_probe_39x, nsc_ircc_init_39x }, { NULL } }; -/* Max 4 instances for now */ -static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL }; +static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL, NULL }; static char *dongle_types[] = { "Differential serial interface", @@ -126,8 +141,24 @@ static char *dongle_types[] = { "No dongle connected", }; +/* PNP probing */ +static chipio_t pnp_info; +static const struct pnp_device_id nsc_ircc_pnp_table[] = { + { .id = "NSC6001", .driver_data = 0 }, + { .id = "IBM0071", .driver_data = 0 }, + { } +}; + +MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table); + +static struct pnp_driver nsc_ircc_pnp_driver = { + .name = "nsc-ircc", + .id_table = nsc_ircc_pnp_table, + .probe = nsc_ircc_pnp_probe, +}; + /* Some prototypes */ -static int nsc_ircc_open(int i, chipio_t *info); +static int nsc_ircc_open(chipio_t *info); static int nsc_ircc_close(struct nsc_ircc_cb *self); static int nsc_ircc_setup(chipio_t *info); static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self); @@ -146,7 +177,10 @@ static int nsc_ircc_net_open(struct net_device *dev); static int nsc_ircc_net_close(struct net_device *dev); static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev); -static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data); + +/* Globals */ +static int pnp_registered; +static int pnp_succeeded; /* * Function nsc_ircc_init () @@ -158,28 +192,36 @@ static int __init nsc_ircc_init(void) { chipio_t info; nsc_chip_t *chip; - int ret = -ENODEV; + int ret; int cfg_base; int cfg, id; int reg; int i = 0; + ret = platform_driver_register(&nsc_ircc_driver); + if (ret) { + IRDA_ERROR("%s, Can't register driver!\n", driver_name); + return ret; + } + + /* Register with PnP subsystem to detect disable ports */ + ret = pnp_register_driver(&nsc_ircc_pnp_driver); + + if (ret >= 0) + pnp_registered = 1; + + ret = -ENODEV; + /* Probe for all the NSC chipsets we know about */ - for (chip=chips; chip->name ; chip++) { + for (chip = chips; chip->name ; chip++) { IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, chip->name); /* Try all config registers for this chip */ - for (cfg=0; cfg<3; cfg++) { + for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) { cfg_base = chip->cfg[cfg]; if (!cfg_base) continue; - - memset(&info, 0, sizeof(chipio_t)); - info.cfg_base = cfg_base; - info.fir_base = io[i]; - info.dma = dma[i]; - info.irq = irq[i]; /* Read index register */ reg = inb(cfg_base); @@ -194,24 +236,65 @@ static int __init nsc_ircc_init(void) if ((id & chip->cid_mask) == chip->cid_value) { IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", __FUNCTION__, chip->name, id & ~chip->cid_mask); - /* - * If the user supplies the base address, then - * we init the chip, if not we probe the values - * set by the BIOS - */ - if (io[i] < 0x2000) { - chip->init(chip, &info); - } else - chip->probe(chip, &info); - if (nsc_ircc_open(i, &info) == 0) - ret = 0; + /* + * If we found a correct PnP setting, + * we first try it. + */ + if (pnp_succeeded) { + memset(&info, 0, sizeof(chipio_t)); + info.cfg_base = cfg_base; + info.fir_base = pnp_info.fir_base; + info.dma = pnp_info.dma; + info.irq = pnp_info.irq; + + if (info.fir_base < 0x2000) { + IRDA_MESSAGE("%s, chip->init\n", driver_name); + chip->init(chip, &info); + } else + chip->probe(chip, &info); + + if (nsc_ircc_open(&info) >= 0) + ret = 0; + } + + /* + * Opening based on PnP values failed. + * Let's fallback to user values, or probe + * the chip. + */ + if (ret) { + IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name); + memset(&info, 0, sizeof(chipio_t)); + info.cfg_base = cfg_base; + info.fir_base = io[i]; + info.dma = dma[i]; + info.irq = irq[i]; + + /* + * If the user supplies the base address, then + * we init the chip, if not we probe the values + * set by the BIOS + */ + if (io[i] < 0x2000) { + chip->init(chip, &info); + } else + chip->probe(chip, &info); + + if (nsc_ircc_open(&info) >= 0) + ret = 0; + } i++; } else { IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id); } } - + } + + if (ret) { + platform_driver_unregister(&nsc_ircc_driver); + pnp_unregister_driver(&nsc_ircc_pnp_driver); + pnp_registered = 0; } return ret; @@ -227,12 +310,17 @@ static void __exit nsc_ircc_cleanup(void) { int i; - pm_unregister_all(nsc_ircc_pmproc); - - for (i=0; i < 4; i++) { + for (i = 0; i < ARRAY_SIZE(dev_self); i++) { if (dev_self[i]) nsc_ircc_close(dev_self[i]); } + + platform_driver_unregister(&nsc_ircc_driver); + + if (pnp_registered) + pnp_unregister_driver(&nsc_ircc_pnp_driver); + + pnp_registered = 0; } /* @@ -241,16 +329,26 @@ static void __exit nsc_ircc_cleanup(void) * Open driver instance * */ -static int __init nsc_ircc_open(int i, chipio_t *info) +static int __init nsc_ircc_open(chipio_t *info) { struct net_device *dev; struct nsc_ircc_cb *self; - struct pm_dev *pmdev; void *ret; - int err; + int err, chip_index; IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + + for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) { + if (!dev_self[chip_index]) + break; + } + + if (chip_index == ARRAY_SIZE(dev_self)) { + IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __FUNCTION__); + return -ENOMEM; + } + IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, info->cfg_base); @@ -271,8 +369,8 @@ static int __init nsc_ircc_open(int i, chipio_t *info) spin_lock_init(&self->lock); /* Need to store self somewhere */ - dev_self[i] = self; - self->index = i; + dev_self[chip_index] = self; + self->index = chip_index; /* Initialize IO */ self->io.cfg_base = info->cfg_base; @@ -351,7 +449,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info) /* Check if user has supplied a valid dongle id or not */ if ((dongle_id <= 0) || - (dongle_id >= (sizeof(dongle_types) / sizeof(dongle_types[0]))) ) { + (dongle_id >= ARRAY_SIZE(dongle_types))) { dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name, @@ -364,11 +462,18 @@ static int __init nsc_ircc_open(int i, chipio_t *info) self->io.dongle_id = dongle_id; nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id); - pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, nsc_ircc_pmproc); - if (pmdev) - pmdev->data = self; + self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME, + self->index, NULL, 0); + if (IS_ERR(self->pldev)) { + err = PTR_ERR(self->pldev); + goto out5; + } + platform_set_drvdata(self->pldev, self); - return 0; + return chip_index; + + out5: + unregister_netdev(dev); out4: dma_free_coherent(NULL, self->tx_buff.truesize, self->tx_buff.head, self->tx_buff_dma); @@ -379,7 +484,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info) release_region(self->io.fir_base, self->io.fir_ext); out1: free_netdev(dev); - dev_self[i] = NULL; + dev_self[chip_index] = NULL; return err; } @@ -399,6 +504,8 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) iobase = self->io.fir_base; + platform_device_unregister(self->pldev); + /* Remove netdevice */ unregister_netdev(self->netdev); @@ -806,6 +913,43 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) return 0; } +/* PNP probing */ +static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) +{ + memset(&pnp_info, 0, sizeof(chipio_t)); + pnp_info.irq = -1; + pnp_info.dma = -1; + pnp_succeeded = 1; + + /* There don't seem to be any way to get the cfg_base. + * On my box, cfg_base is in the PnP descriptor of the + * motherboard. Oh well... Jean II */ + + if (pnp_port_valid(dev, 0) && + !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) + pnp_info.fir_base = pnp_port_start(dev, 0); + + if (pnp_irq_valid(dev, 0) && + !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED)) + pnp_info.irq = pnp_irq(dev, 0); + + if (pnp_dma_valid(dev, 0) && + !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) + pnp_info.dma = pnp_dma(dev, 0); + + IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", + __FUNCTION__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); + + if((pnp_info.fir_base == 0) || + (pnp_info.irq == -1) || (pnp_info.dma == -1)) { + /* Returning an error will disable the device. Yuck ! */ + //return -EINVAL; + pnp_succeeded = 0; + } + + return 0; +} + /* * Function nsc_ircc_setup (info) * @@ -2161,45 +2305,83 @@ static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev) return &self->stats; } -static void nsc_ircc_suspend(struct nsc_ircc_cb *self) +static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state) { - IRDA_MESSAGE("%s, Suspending\n", driver_name); + struct nsc_ircc_cb *self = platform_get_drvdata(dev); + int bank; + unsigned long flags; + int iobase = self->io.fir_base; if (self->io.suspended) - return; + return 0; - nsc_ircc_net_close(self->netdev); + IRDA_DEBUG(1, "%s, Suspending\n", driver_name); + rtnl_lock(); + if (netif_running(self->netdev)) { + netif_device_detach(self->netdev); + spin_lock_irqsave(&self->lock, flags); + /* Save current bank */ + bank = inb(iobase+BSR); + + /* Disable interrupts */ + switch_bank(iobase, BANK0); + outb(0, iobase+IER); + + /* Restore bank register */ + outb(bank, iobase+BSR); + + spin_unlock_irqrestore(&self->lock, flags); + free_irq(self->io.irq, self->netdev); + disable_dma(self->io.dma); + } self->io.suspended = 1; + rtnl_unlock(); + + return 0; } -static void nsc_ircc_wakeup(struct nsc_ircc_cb *self) +static int nsc_ircc_resume(struct platform_device *dev) { + struct nsc_ircc_cb *self = platform_get_drvdata(dev); + unsigned long flags; + if (!self->io.suspended) - return; + return 0; + IRDA_DEBUG(1, "%s, Waking up\n", driver_name); + + rtnl_lock(); nsc_ircc_setup(&self->io); - nsc_ircc_net_open(self->netdev); - - IRDA_MESSAGE("%s, Waking up\n", driver_name); + nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id); + if (netif_running(self->netdev)) { + if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, + self->netdev->name, self->netdev)) { + IRDA_WARNING("%s, unable to allocate irq=%d\n", + driver_name, self->io.irq); + + /* + * Don't fail resume process, just kill this + * network interface + */ + unregister_netdevice(self->netdev); + } else { + spin_lock_irqsave(&self->lock, flags); + nsc_ircc_change_speed(self, self->io.speed); + spin_unlock_irqrestore(&self->lock, flags); + netif_device_attach(self->netdev); + } + + } else { + spin_lock_irqsave(&self->lock, flags); + nsc_ircc_change_speed(self, 9600); + spin_unlock_irqrestore(&self->lock, flags); + } self->io.suspended = 0; -} + rtnl_unlock(); -static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data) -{ - struct nsc_ircc_cb *self = (struct nsc_ircc_cb*) dev->data; - if (self) { - switch (rqst) { - case PM_SUSPEND: - nsc_ircc_suspend(self); - break; - case PM_RESUME: - nsc_ircc_wakeup(self); - break; - } - } - return 0; + return 0; } MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h index 6edf7e514624..dacf671abcd6 100644 --- a/drivers/net/irda/nsc-ircc.h +++ b/drivers/net/irda/nsc-ircc.h @@ -269,7 +269,7 @@ struct nsc_ircc_cb { __u32 new_speed; int index; /* Instance index */ - struct pm_dev *dev; + struct platform_device *pldev; }; static inline void switch_bank(int iobase, int bank) diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c index 8d225921ae7b..d7e32d9554fc 100644 --- a/drivers/net/irda/sir_dongle.c +++ b/drivers/net/irda/sir_dongle.c @@ -16,6 +16,7 @@ #include <linux/init.h> #include <linux/smp_lock.h> #include <linux/kmod.h> +#include <linux/mutex.h> #include <net/irda/irda.h> @@ -28,7 +29,7 @@ */ static LIST_HEAD(dongle_list); /* list of registered dongle drivers */ -static DECLARE_MUTEX(dongle_list_lock); /* protects the list */ +static DEFINE_MUTEX(dongle_list_lock); /* protects the list */ int irda_register_dongle(struct dongle_driver *new) { @@ -38,25 +39,25 @@ int irda_register_dongle(struct dongle_driver *new) IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n", __FUNCTION__, new->driver_name, new->type); - down(&dongle_list_lock); + mutex_lock(&dongle_list_lock); list_for_each(entry, &dongle_list) { drv = list_entry(entry, struct dongle_driver, dongle_list); if (new->type == drv->type) { - up(&dongle_list_lock); + mutex_unlock(&dongle_list_lock); return -EEXIST; } } list_add(&new->dongle_list, &dongle_list); - up(&dongle_list_lock); + mutex_unlock(&dongle_list_lock); return 0; } EXPORT_SYMBOL(irda_register_dongle); int irda_unregister_dongle(struct dongle_driver *drv) { - down(&dongle_list_lock); + mutex_lock(&dongle_list_lock); list_del(&drv->dongle_list); - up(&dongle_list_lock); + mutex_unlock(&dongle_list_lock); return 0; } EXPORT_SYMBOL(irda_unregister_dongle); @@ -75,7 +76,7 @@ int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type) return -EBUSY; /* serialize access to the list of registered dongles */ - down(&dongle_list_lock); + mutex_lock(&dongle_list_lock); list_for_each(entry, &dongle_list) { drv = list_entry(entry, struct dongle_driver, dongle_list); @@ -109,14 +110,14 @@ int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type) if (!drv->open || (err=drv->open(dev))!=0) goto out_reject; /* failed to open driver */ - up(&dongle_list_lock); + mutex_unlock(&dongle_list_lock); return 0; out_reject: dev->dongle_drv = NULL; module_put(drv->owner); out_unlock: - up(&dongle_list_lock); + mutex_unlock(&dongle_list_lock); return err; } diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c new file mode 100644 index 000000000000..aa1a9b0ed83e --- /dev/null +++ b/drivers/net/irda/toim3232-sir.c @@ -0,0 +1,375 @@ +/********************************************************************* + * + * Filename: toim3232-sir.c + * Version: 1.0 + * Description: Implementation of dongles based on the Vishay/Temic + * TOIM3232 SIR Endec chipset. Currently only the + * IRWave IR320ST-2 is tested, although it should work + * with any TOIM3232 or TOIM4232 chipset based RS232 + * dongle with minimal modification. + * Based heavily on the Tekram driver (tekram.c), + * with thanks to Dag Brattli and Martin Diehl. + * Status: Experimental. + * Author: David Basden <davidb-irda@rcpt.to> + * Created at: Thu Feb 09 23:47:32 2006 + * + * Copyright (c) 2006 David Basden. + * Copyright (c) 1998-1999 Dag Brattli, + * Copyright (c) 2002 Martin Diehl, + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * Neither Dag Brattli nor University of Tromsø admit liability nor + * provide warranty for any of this software. This material is + * provided "AS-IS" and at no charge. + * + ********************************************************************/ + +/* + * This driver has currently only been tested on the IRWave IR320ST-2 + * + * PROTOCOL: + * + * The protocol for talking to the TOIM3232 is quite easy, and is + * designed to interface with RS232 with only level convertors. The + * BR/~D line on the chip is brought high to signal 'command mode', + * where a command byte is sent to select the baudrate of the RS232 + * interface and the pulse length of the IRDA output. When BR/~D + * is brought low, the dongle then changes to the selected baudrate, + * and the RS232 interface is used for data until BR/~D is brought + * high again. The initial speed for the TOIMx323 after RESET is + * 9600 baud. The baudrate for command-mode is the last selected + * baud-rate, or 9600 after a RESET. + * + * The dongle I have (below) adds some extra hardware on the front end, + * but this is mostly directed towards pariasitic power from the RS232 + * line rather than changing very much about how to communicate with + * the TOIM3232. + * + * The protocol to talk to the TOIM4232 chipset seems to be almost + * identical to the TOIM3232 (and the 4232 datasheet is more detailed) + * so this code will probably work on that as well, although I haven't + * tested it on that hardware. + * + * Target dongle variations that might be common: + * + * DTR and RTS function: + * The data sheet for the 4232 has a sample implementation that hooks the + * DTR and RTS lines to the RESET and BaudRate/~Data lines of the + * chip (through line-converters). Given both DTR and RTS would have to + * be held low in normal operation, and the TOIMx232 requires +5V to + * signal ground, most dongle designers would almost certainly choose + * an implementation that kept at least one of DTR or RTS high in + * normal operation to provide power to the dongle, but will likely + * vary between designs. + * + * User specified command bits: + * There are two user-controllable output lines from the TOIMx232 that + * can be set low or high by setting the appropriate bits in the + * high-nibble of the command byte (when setting speed and pulse length). + * These might be used to switch on and off added hardware or extra + * dongle features. + * + * + * Target hardware: IRWave IR320ST-2 + * + * The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic + * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transciever. + * It uses a hex inverter and some discrete components to buffer and + * line convert the RS232 down to 5V. + * + * The dongle is powered through a voltage regulator, fed by a large + * capacitor. To switch the dongle on, DTR is brought high to charge + * the capacitor and drive the voltage regulator. DTR isn't associated + * with any control lines on the TOIM3232. Parisitic power is also taken + * from the RTS, TD and RD lines when brought high, but through resistors. + * When DTR is low, the circuit might lose power even with RTS high. + * + * RTS is inverted and attached to the BR/~D input pin. When RTS + * is high, BR/~D is low, and the TOIM3232 is in the normal 'data' mode. + * RTS is brought low, BR/~D is high, and the TOIM3232 is in 'command + * mode'. + * + * For some unknown reason, the RESET line isn't actually connected + * to anything. This means to reset the dongle to get it to a known + * state (9600 baud) you must drop DTR and RTS low, wait for the power + * capacitor to discharge, and then bring DTR (and RTS for data mode) + * high again, and wait for the capacitor to charge, the power supply + * to stabilise, and the oscillator clock to stabilise. + * + * Fortunately, if the current baudrate is known, the chipset can + * easily change speed by entering command mode without having to + * reset the dongle first. + * + * Major Components: + * + * - Vishay/Temic TOIM3232 SIR Endec to change RS232 pulse timings + * to IRDA pulse timings + * - 3.6864MHz crystal to drive TOIM3232 clock oscillator + * - DM74lS04M Inverting Hex line buffer for RS232 input buffering + * and level conversion + * - PJ2951AC 150mA voltage regulator + * - Vishay/Temic TFDS4500 SIR IRDA front-end transceiver + * + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/init.h> + +#include <net/irda/irda.h> + +#include "sir-dev.h" + +static int toim3232delay = 150; /* default is 150 ms */ +module_param(toim3232delay, int, 0); +MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay"); + +#if 0 +static int toim3232flipdtr = 0; /* default is DTR high to reset */ +module_param(toim3232flipdtr, int, 0); +MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)"); + +static int toim3232fliprts = 0; /* default is RTS high for baud change */ +module_param(toim3232fliptrs, int, 0); +MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)"); +#endif + +static int toim3232_open(struct sir_dev *); +static int toim3232_close(struct sir_dev *); +static int toim3232_change_speed(struct sir_dev *, unsigned); +static int toim3232_reset(struct sir_dev *); + +#define TOIM3232_115200 0x00 +#define TOIM3232_57600 0x01 +#define TOIM3232_38400 0x02 +#define TOIM3232_19200 0x03 +#define TOIM3232_9600 0x06 +#define TOIM3232_2400 0x0A + +#define TOIM3232_PW 0x10 /* Pulse select bit */ + +static struct dongle_driver toim3232 = { + .owner = THIS_MODULE, + .driver_name = "Vishay TOIM3232", + .type = IRDA_TOIM3232_DONGLE, + .open = toim3232_open, + .close = toim3232_close, + .reset = toim3232_reset, + .set_speed = toim3232_change_speed, +}; + +static int __init toim3232_sir_init(void) +{ + if (toim3232delay < 1 || toim3232delay > 500) + toim3232delay = 200; + IRDA_DEBUG(1, "%s - using %d ms delay\n", + toim3232.driver_name, toim3232delay); + return irda_register_dongle(&toim3232); +} + +static void __exit toim3232_sir_cleanup(void) +{ + irda_unregister_dongle(&toim3232); +} + +static int toim3232_open(struct sir_dev *dev) +{ + struct qos_info *qos = &dev->qos; + + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + + /* Pull the lines high to start with. + * + * For the IR320ST-2, we need to charge the main supply capacitor to + * switch the device on. We keep DTR high throughout to do this. + * When RTS, TD and RD are high, they will also trickle-charge the + * cap. RTS is high for data transmission, and low for baud rate select. + * -- DGB + */ + sirdev_set_dtr_rts(dev, TRUE, TRUE); + + /* The TOI3232 supports many speeds between 1200bps and 115000bps. + * We really only care about those supported by the IRDA spec, but + * 38400 seems to be implemented in many places */ + qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; + + /* From the tekram driver. Not sure what a reasonable value is -- DGB */ + qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */ + irda_qos_bits_to_value(qos); + + /* irda thread waits 50 msec for power settling */ + + return 0; +} + +static int toim3232_close(struct sir_dev *dev) +{ + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + + /* Power off dongle */ + sirdev_set_dtr_rts(dev, FALSE, FALSE); + + return 0; +} + +/* + * Function toim3232change_speed (dev, state, speed) + * + * Set the speed for the TOIM3232 based dongle. Warning, this + * function must be called with a process context! + * + * Algorithm + * 1. keep DTR high but clear RTS to bring into baud programming mode + * 2. wait at least 7us to enter programming mode + * 3. send control word to set baud rate and timing + * 4. wait at least 1us + * 5. bring RTS high to enter DATA mode (RS232 is passed through to transceiver) + * 6. should take effect immediately (although probably worth waiting) + */ + +#define TOIM3232_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1) + +static int toim3232_change_speed(struct sir_dev *dev, unsigned speed) +{ + unsigned state = dev->fsm.substate; + unsigned delay = 0; + u8 byte; + static int ret = 0; + + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + + switch(state) { + case SIRDEV_STATE_DONGLE_SPEED: + + /* Figure out what we are going to send as a control byte */ + switch (speed) { + case 2400: + byte = TOIM3232_PW|TOIM3232_2400; + break; + default: + speed = 9600; + ret = -EINVAL; + /* fall thru */ + case 9600: + byte = TOIM3232_PW|TOIM3232_9600; + break; + case 19200: + byte = TOIM3232_PW|TOIM3232_19200; + break; + case 38400: + byte = TOIM3232_PW|TOIM3232_38400; + break; + case 57600: + byte = TOIM3232_PW|TOIM3232_57600; + break; + case 115200: + byte = TOIM3232_115200; + break; + } + + /* Set DTR, Clear RTS: Go into baud programming mode */ + sirdev_set_dtr_rts(dev, TRUE, FALSE); + + /* Wait at least 7us */ + udelay(14); + + /* Write control byte */ + sirdev_raw_write(dev, &byte, 1); + + dev->speed = speed; + + state = TOIM3232_STATE_WAIT_SPEED; + delay = toim3232delay; + break; + + case TOIM3232_STATE_WAIT_SPEED: + /* Have transmitted control byte * Wait for 'at least 1us' */ + udelay(14); + + /* Set DTR, Set RTS: Go into normal data mode */ + sirdev_set_dtr_rts(dev, TRUE, TRUE); + + /* Wait (TODO: check this is needed) */ + udelay(50); + break; + + default: + printk(KERN_ERR "%s - undefined state %d\n", __FUNCTION__, state); + ret = -EINVAL; + break; + } + + dev->fsm.substate = state; + return (delay > 0) ? delay : ret; +} + +/* + * Function toim3232reset (driver) + * + * This function resets the toim3232 dongle. Warning, this function + * must be called with a process context!! + * + * What we should do is: + * 0. Pull RESET high + * 1. Wait for at least 7us + * 2. Pull RESET low + * 3. Wait for at least 7us + * 4. Pull BR/~D high + * 5. Wait for at least 7us + * 6. Send control byte to set baud rate + * 7. Wait at least 1us after stop bit + * 8. Pull BR/~D low + * 9. Should then be in data mode + * + * Because the IR320ST-2 doesn't have the RESET line connected for some reason, + * we'll have to do something else. + * + * The default speed after a RESET is 9600, so lets try just bringing it up in + * data mode after switching it off, waiting for the supply capacitor to + * discharge, and then switch it back on. This isn't actually pulling RESET + * high, but it seems to have the same effect. + * + * This behaviour will probably work on dongles that have the RESET line connected, + * but if not, add a flag for the IR320ST-2, and implment the above-listed proper + * behaviour. + * + * RTS is inverted and then fed to BR/~D, so to put it in programming mode, we + * need to have pull RTS low + */ + +static int toim3232_reset(struct sir_dev *dev) +{ + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + + /* Switch off both DTR and RTS to switch off dongle */ + sirdev_set_dtr_rts(dev, FALSE, FALSE); + + /* Should sleep a while. This might be evil doing it this way.*/ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(50)); + + /* Set DTR, Set RTS (data mode) */ + sirdev_set_dtr_rts(dev, TRUE, TRUE); + + /* Wait at least 10 ms for power to stabilize again */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(10)); + + /* Speed should now be 9600 */ + dev->speed = 9600; + + return 0; +} + +MODULE_AUTHOR("David Basden <davidb-linux@rcpt.to>"); +MODULE_DESCRIPTION("Vishay/Temic TOIM3232 based dongle driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("irda-dongle-12"); /* IRDA_TOIM3232_DONGLE */ + +module_init(toim3232_sir_init); +module_exit(toim3232_sir_cleanup); diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index a9f49f058cfb..97a49e0be76b 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -1887,7 +1887,7 @@ static int __init vlsi_mod_init(void) vlsi_proc_root->owner = THIS_MODULE; } - ret = pci_module_init(&vlsi_irda_driver); + ret = pci_register_driver(&vlsi_irda_driver); if (ret && vlsi_proc_root) remove_proc_entry(PROC_DIR, NULL); diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c index d82651a97bae..6f7dce8eba51 100644 --- a/drivers/net/ixp2000/enp2611.c +++ b/drivers/net/ixp2000/enp2611.c @@ -16,7 +16,7 @@ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/moduleparam.h> -#include <asm/arch/uengine.h> +#include <asm/hardware/uengine.h> #include <asm/mach-types.h> #include <asm/io.h> #include "ixpdev.h" diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 09f03f493bea..77f104a005f3 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -16,7 +16,7 @@ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/moduleparam.h> -#include <asm/arch/uengine.h> +#include <asm/hardware/uengine.h> #include <asm/mach-types.h> #include <asm/io.h> #include "ixp2400_rx.ucode" diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 690a1aae0b34..0c13795dca38 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -172,11 +172,9 @@ static struct net_device_stats *get_stats(struct net_device *dev) memset(stats, 0, sizeof(struct net_device_stats)); - for (i=0; i < NR_CPUS; i++) { + for_each_cpu(i) { struct net_device_stats *lb_stats; - if (!cpu_possible(i)) - continue; lb_stats = &per_cpu(loopback_stats, i); stats->rx_bytes += lb_stats->rx_bytes; stats->tx_bytes += lb_stats->tx_bytes; diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h index 7754d1974b9e..4262c1da6d4a 100644 --- a/drivers/net/mv643xx_eth.h +++ b/drivers/net/mv643xx_eth.h @@ -42,13 +42,23 @@ #define MAX_DESCS_PER_SKB 1 #endif +/* + * The MV643XX HW requires 8-byte alignment. However, when I/O + * is non-cache-coherent, we need to ensure that the I/O buffers + * we use don't share cache lines with other data. + */ +#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE) +#define ETH_DMA_ALIGN L1_CACHE_BYTES +#else +#define ETH_DMA_ALIGN 8 +#endif + #define ETH_VLAN_HLEN 4 #define ETH_FCS_LEN 4 -#define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */ -#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ +#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ - ETH_VLAN_HLEN + ETH_FCS_LEN) -#define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7) + ETH_VLAN_HLEN + ETH_FCS_LEN) +#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN) #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 7e900572eaf8..9595f74da93f 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -22,12 +22,12 @@ *************************************************************************/ #define DRV_NAME "pcnet32" -#define DRV_VERSION "1.31c" -#define DRV_RELDATE "01.Nov.2005" +#define DRV_VERSION "1.32" +#define DRV_RELDATE "18.Mar.2006" #define PFX DRV_NAME ": " -static const char * const version = -DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; +static const char *const version = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; #include <linux/module.h> #include <linux/kernel.h> @@ -58,18 +58,23 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; * PCI device identifiers for "new style" Linux PCI Device Drivers */ static struct pci_device_id pcnet32_pci_tbl[] = { - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - /* - * Adapters that were sold with IBM's RS/6000 or pSeries hardware have - * the incorrect vendor id. - */ - { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0 }, - { 0, } + { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + + /* + * Adapters that were sold with IBM's RS/6000 or pSeries hardware have + * the incorrect vendor id. + */ + { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0}, + + { } /* terminate list */ }; -MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl); +MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); static int cards_found; @@ -77,13 +82,11 @@ static int cards_found; * VLB I/O addresses */ static unsigned int pcnet32_portlist[] __initdata = - { 0x300, 0x320, 0x340, 0x360, 0 }; - - + { 0x300, 0x320, 0x340, 0x360, 0 }; static int pcnet32_debug = 0; -static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ -static int pcnet32vlb; /* check for VLB cards ? */ +static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ +static int pcnet32vlb; /* check for VLB cards ? */ static struct net_device *pcnet32_dev; @@ -110,32 +113,34 @@ static int rx_copybreak = 200; * to internal options */ static const unsigned char options_mapping[] = { - PCNET32_PORT_ASEL, /* 0 Auto-select */ - PCNET32_PORT_AUI, /* 1 BNC/AUI */ - PCNET32_PORT_AUI, /* 2 AUI/BNC */ - PCNET32_PORT_ASEL, /* 3 not supported */ - PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ - PCNET32_PORT_ASEL, /* 5 not supported */ - PCNET32_PORT_ASEL, /* 6 not supported */ - PCNET32_PORT_ASEL, /* 7 not supported */ - PCNET32_PORT_ASEL, /* 8 not supported */ - PCNET32_PORT_MII, /* 9 MII 10baseT */ - PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ - PCNET32_PORT_MII, /* 11 MII (autosel) */ - PCNET32_PORT_10BT, /* 12 10BaseT */ - PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ - PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */ - PCNET32_PORT_ASEL /* 15 not supported */ + PCNET32_PORT_ASEL, /* 0 Auto-select */ + PCNET32_PORT_AUI, /* 1 BNC/AUI */ + PCNET32_PORT_AUI, /* 2 AUI/BNC */ + PCNET32_PORT_ASEL, /* 3 not supported */ + PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ + PCNET32_PORT_ASEL, /* 5 not supported */ + PCNET32_PORT_ASEL, /* 6 not supported */ + PCNET32_PORT_ASEL, /* 7 not supported */ + PCNET32_PORT_ASEL, /* 8 not supported */ + PCNET32_PORT_MII, /* 9 MII 10baseT */ + PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ + PCNET32_PORT_MII, /* 11 MII (autosel) */ + PCNET32_PORT_10BT, /* 12 10BaseT */ + PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ + /* 14 MII 100BaseTx-FD */ + PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, + PCNET32_PORT_ASEL /* 15 not supported */ }; static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { - "Loopback test (offline)" + "Loopback test (offline)" }; + #define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN) -#define PCNET32_NUM_REGS 168 +#define PCNET32_NUM_REGS 136 -#define MAX_UNITS 8 /* More are supported, limit only on options */ +#define MAX_UNITS 8 /* More are supported, limit only on options */ static int options[MAX_UNITS]; static int full_duplex[MAX_UNITS]; static int homepna[MAX_UNITS]; @@ -151,124 +156,6 @@ static int homepna[MAX_UNITS]; */ /* - * History: - * v0.01: Initial version - * only tested on Alpha Noname Board - * v0.02: changed IRQ handling for new interrupt scheme (dev_id) - * tested on a ASUS SP3G - * v0.10: fixed an odd problem with the 79C974 in a Compaq Deskpro XL - * looks like the 974 doesn't like stopping and restarting in a - * short period of time; now we do a reinit of the lance; the - * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr> - * and hangs the machine (thanks to Klaus Liedl for debugging) - * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32, - * made it standalone (no need for lance.c) - * v0.13: added additional PCI detecting for special PCI devices (Compaq) - * v0.14: stripped down additional PCI probe (thanks to David C Niemi - * and sveneric@xs4all.nl for testing this on their Compaq boxes) - * v0.15: added 79C965 (VLB) probe - * added interrupt sharing for PCI chips - * v0.16: fixed set_multicast_list on Alpha machines - * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c - * v0.19: changed setting of autoselect bit - * v0.20: removed additional Compaq PCI probe; there is now a working one - * in arch/i386/bios32.c - * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu - * v0.22: added printing of status to ring dump - * v0.23: changed enet_statistics to net_devive_stats - * v0.90: added multicast filter - * added module support - * changed irq probe to new style - * added PCnetFast chip id - * added fix for receive stalls with Intel saturn chipsets - * added in-place rx skbs like in the tulip driver - * minor cleanups - * v0.91: added PCnetFast+ chip id - * back port to 2.0.x - * v1.00: added some stuff from Donald Becker's 2.0.34 version - * added support for byte counters in net_dev_stats - * v1.01: do ring dumps, only when debugging the driver - * increased the transmit timeout - * v1.02: fixed memory leak in pcnet32_init_ring() - * v1.10: workaround for stopped transmitter - * added port selection for modules - * detect special T1/E1 WAN card and setup port selection - * v1.11: fixed wrong checking of Tx errors - * v1.20: added check of return value kmalloc (cpeterso@cs.washington.edu) - * added save original kmalloc addr for freeing (mcr@solidum.com) - * added support for PCnetHome chip (joe@MIT.EDU) - * rewritten PCI card detection - * added dwio mode to get driver working on some PPC machines - * v1.21: added mii selection and mii ioctl - * v1.22: changed pci scanning code to make PPC people happy - * fixed switching to 32bit mode in pcnet32_open() (thanks - * to Michael Richard <mcr@solidum.com> for noticing this one) - * added sub vendor/device id matching (thanks again to - * Michael Richard <mcr@solidum.com>) - * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>) - * v1.23 fixed small bug, when manual selecting MII speed/duplex - * v1.24 Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO - * underflows. Added tx_start_pt module parameter. Increased - * TX_RING_SIZE from 16 to 32. Added #ifdef'd code to use DXSUFLO - * for FAST[+] chipsets. <kaf@fc.hp.com> - * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com> - * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com> - * v1.26 Converted to pci_alloc_consistent, Jamey Hicks / George France - * <jamey@crl.dec.com> - * - Fixed a few bugs, related to running the controller in 32bit mode. - * 23 Oct, 2000. Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. - * v1.26p Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker - * v1.27 improved CSR/PROM address detection, lots of cleanups, - * new pcnet32vlb module option, HP-PARISC support, - * added module parameter descriptions, - * initial ethtool support - Helge Deller <deller@gmx.de> - * v1.27a Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp> - * use alloc_etherdev and register_netdev - * fix pci probe not increment cards_found - * FD auto negotiate error workaround for xSeries250 - * clean up and using new mii module - * v1.27b Sep 30 2002 Kent Yoder <yoder1@us.ibm.com> - * Added timer for cable connection state changes. - * v1.28 20 Feb 2004 Don Fry <brazilnut@us.ibm.com> - * Jon Mason <jonmason@us.ibm.com>, Chinmay Albal <albal@in.ibm.com> - * Now uses ethtool_ops, netif_msg_* and generic_mii_ioctl. - * Fixes bogus 'Bus master arbitration failure', pci_[un]map_single - * length errors, and transmit hangs. Cleans up after errors in open. - * Jim Lewis <jklewis@us.ibm.com> added ethernet loopback test. - * Thomas Munck Steenholdt <tmus@tmus.dk> non-mii ioctl corrections. - * v1.29 6 Apr 2004 Jim Lewis <jklewis@us.ibm.com> added physical - * identification code (blink led's) and register dump. - * Don Fry added timer for 971/972 so skbufs don't remain on tx ring - * forever. - * v1.30 18 May 2004 Don Fry removed timer and Last Transmit Interrupt - * (ltint) as they added complexity and didn't give good throughput. - * v1.30a 22 May 2004 Don Fry limit frames received during interrupt. - * v1.30b 24 May 2004 Don Fry fix bogus tx carrier errors with 79c973, - * assisted by Bruce Penrod <bmpenrod@endruntechnologies.com>. - * v1.30c 25 May 2004 Don Fry added netif_wake_queue after pcnet32_restart. - * v1.30d 01 Jun 2004 Don Fry discard oversize rx packets. - * v1.30e 11 Jun 2004 Don Fry recover after fifo error and rx hang. - * v1.30f 16 Jun 2004 Don Fry cleanup IRQ to allow 0 and 1 for PCI, - * expanding on suggestions from Ralf Baechle <ralf@linux-mips.org>, - * and Brian Murphy <brian@murphy.dk>. - * v1.30g 22 Jun 2004 Patrick Simmons <psimmons@flash.net> added option - * homepna for selecting HomePNA mode for PCNet/Home 79C978. - * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32. - * v1.30i 28 Jun 2004 Don Fry change to use module_param. - * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test. - * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam(). - * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4 - * to allow loopback test to work unchanged. - * v1.31b 06 Oct 2005 Don Fry changed alloc_ring to show name of device - * if allocation fails - * v1.31c 01 Nov 2005 Don Fry Allied Telesyn 2700/2701 FX are 100Mbit only. - * Force 100Mbit FD if Auto (ASEL) is selected. - * See Bugzilla 2669 and 4551. - */ - - -/* * Set the number of Tx and Rx buffers, using Log_2(# buffers). * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). @@ -303,42 +190,42 @@ static int homepna[MAX_UNITS]; /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { - u32 base; - s16 buf_length; - s16 status; - u32 msg_length; - u32 reserved; + u32 base; + s16 buf_length; + s16 status; + u32 msg_length; + u32 reserved; }; struct pcnet32_tx_head { - u32 base; - s16 length; - s16 status; - u32 misc; - u32 reserved; + u32 base; + s16 length; + s16 status; + u32 misc; + u32 reserved; }; /* The PCNET32 32-Bit initialization block, described in databook. */ struct pcnet32_init_block { - u16 mode; - u16 tlen_rlen; - u8 phys_addr[6]; - u16 reserved; - u32 filter[2]; - /* Receive and transmit ring base, along with extra bits. */ - u32 rx_ring; - u32 tx_ring; + u16 mode; + u16 tlen_rlen; + u8 phys_addr[6]; + u16 reserved; + u32 filter[2]; + /* Receive and transmit ring base, along with extra bits. */ + u32 rx_ring; + u32 tx_ring; }; /* PCnet32 access functions */ struct pcnet32_access { - u16 (*read_csr)(unsigned long, int); - void (*write_csr)(unsigned long, int, u16); - u16 (*read_bcr)(unsigned long, int); - void (*write_bcr)(unsigned long, int, u16); - u16 (*read_rap)(unsigned long); - void (*write_rap)(unsigned long, u16); - void (*reset)(unsigned long); + u16 (*read_csr) (unsigned long, int); + void (*write_csr) (unsigned long, int, u16); + u16 (*read_bcr) (unsigned long, int); + void (*write_bcr) (unsigned long, int, u16); + u16 (*read_rap) (unsigned long); + void (*write_rap) (unsigned long, u16); + void (*reset) (unsigned long); }; /* @@ -346,760 +233,794 @@ struct pcnet32_access { * so the structure should be allocated using pci_alloc_consistent(). */ struct pcnet32_private { - struct pcnet32_init_block init_block; - /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ - struct pcnet32_rx_head *rx_ring; - struct pcnet32_tx_head *tx_ring; - dma_addr_t dma_addr; /* DMA address of beginning of this - object, returned by - pci_alloc_consistent */ - struct pci_dev *pci_dev; /* Pointer to the associated pci device - structure */ - const char *name; - /* The saved address of a sent-in-place packet/buffer, for skfree(). */ - struct sk_buff **tx_skbuff; - struct sk_buff **rx_skbuff; - dma_addr_t *tx_dma_addr; - dma_addr_t *rx_dma_addr; - struct pcnet32_access a; - spinlock_t lock; /* Guard lock */ - unsigned int cur_rx, cur_tx; /* The next free ring entry */ - unsigned int rx_ring_size; /* current rx ring size */ - unsigned int tx_ring_size; /* current tx ring size */ - unsigned int rx_mod_mask; /* rx ring modular mask */ - unsigned int tx_mod_mask; /* tx ring modular mask */ - unsigned short rx_len_bits; - unsigned short tx_len_bits; - dma_addr_t rx_ring_dma_addr; - dma_addr_t tx_ring_dma_addr; - unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ - struct net_device_stats stats; - char tx_full; - int options; - unsigned int shared_irq:1, /* shared irq possible */ - dxsuflo:1, /* disable transmit stop on uflo */ - mii:1; /* mii port available */ - struct net_device *next; - struct mii_if_info mii_if; - struct timer_list watchdog_timer; - struct timer_list blink_timer; - u32 msg_enable; /* debug message level */ + struct pcnet32_init_block init_block; + /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ + struct pcnet32_rx_head *rx_ring; + struct pcnet32_tx_head *tx_ring; + dma_addr_t dma_addr;/* DMA address of beginning of this + object, returned by pci_alloc_consistent */ + struct pci_dev *pci_dev; + const char *name; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct sk_buff **tx_skbuff; + struct sk_buff **rx_skbuff; + dma_addr_t *tx_dma_addr; + dma_addr_t *rx_dma_addr; + struct pcnet32_access a; + spinlock_t lock; /* Guard lock */ + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int rx_ring_size; /* current rx ring size */ + unsigned int tx_ring_size; /* current tx ring size */ + unsigned int rx_mod_mask; /* rx ring modular mask */ + unsigned int tx_mod_mask; /* tx ring modular mask */ + unsigned short rx_len_bits; + unsigned short tx_len_bits; + dma_addr_t rx_ring_dma_addr; + dma_addr_t tx_ring_dma_addr; + unsigned int dirty_rx, /* ring entries to be freed. */ + dirty_tx; + + struct net_device_stats stats; + char tx_full; + char phycount; /* number of phys found */ + int options; + unsigned int shared_irq:1, /* shared irq possible */ + dxsuflo:1, /* disable transmit stop on uflo */ + mii:1; /* mii port available */ + struct net_device *next; + struct mii_if_info mii_if; + struct timer_list watchdog_timer; + struct timer_list blink_timer; + u32 msg_enable; /* debug message level */ + + /* each bit indicates an available PHY */ + u32 phymask; }; static void pcnet32_probe_vlbus(void); -static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); -static int pcnet32_probe1(unsigned long, int, struct pci_dev *); -static int pcnet32_open(struct net_device *); -static int pcnet32_init_ring(struct net_device *); -static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); -static int pcnet32_rx(struct net_device *); -static void pcnet32_tx_timeout (struct net_device *dev); +static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); +static int pcnet32_probe1(unsigned long, int, struct pci_dev *); +static int pcnet32_open(struct net_device *); +static int pcnet32_init_ring(struct net_device *); +static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); +static int pcnet32_rx(struct net_device *); +static void pcnet32_tx_timeout(struct net_device *dev); static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *); -static int pcnet32_close(struct net_device *); +static int pcnet32_close(struct net_device *); static struct net_device_stats *pcnet32_get_stats(struct net_device *); static void pcnet32_load_multicast(struct net_device *dev); static void pcnet32_set_multicast_list(struct net_device *); -static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); +static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); static void pcnet32_watchdog(struct net_device *); static int mdio_read(struct net_device *dev, int phy_id, int reg_num); -static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); +static void mdio_write(struct net_device *dev, int phy_id, int reg_num, + int val); static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); static void pcnet32_ethtool_test(struct net_device *dev, - struct ethtool_test *eth_test, u64 *data); -static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1); + struct ethtool_test *eth_test, u64 * data); +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); static int pcnet32_phys_id(struct net_device *dev, u32 data); static void pcnet32_led_blink_callback(struct net_device *dev); static int pcnet32_get_regs_len(struct net_device *dev); static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *ptr); + void *ptr); static void pcnet32_purge_tx_ring(struct net_device *dev); static int pcnet32_alloc_ring(struct net_device *dev, char *name); static void pcnet32_free_ring(struct net_device *dev); - +static void pcnet32_check_media(struct net_device *dev, int verbose); enum pci_flags_bit { - PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, - PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3, + PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4, + PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 = + 0x10 << 2, PCI_ADDR3 = 0x10 << 3, }; - -static u16 pcnet32_wio_read_csr (unsigned long addr, int index) +static u16 pcnet32_wio_read_csr(unsigned long addr, int index) { - outw (index, addr+PCNET32_WIO_RAP); - return inw (addr+PCNET32_WIO_RDP); + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_RDP); } -static void pcnet32_wio_write_csr (unsigned long addr, int index, u16 val) +static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) { - outw (index, addr+PCNET32_WIO_RAP); - outw (val, addr+PCNET32_WIO_RDP); + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_RDP); } -static u16 pcnet32_wio_read_bcr (unsigned long addr, int index) +static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) { - outw (index, addr+PCNET32_WIO_RAP); - return inw (addr+PCNET32_WIO_BDP); + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_BDP); } -static void pcnet32_wio_write_bcr (unsigned long addr, int index, u16 val) +static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) { - outw (index, addr+PCNET32_WIO_RAP); - outw (val, addr+PCNET32_WIO_BDP); + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_BDP); } -static u16 pcnet32_wio_read_rap (unsigned long addr) +static u16 pcnet32_wio_read_rap(unsigned long addr) { - return inw (addr+PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_RAP); } -static void pcnet32_wio_write_rap (unsigned long addr, u16 val) +static void pcnet32_wio_write_rap(unsigned long addr, u16 val) { - outw (val, addr+PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_RAP); } -static void pcnet32_wio_reset (unsigned long addr) +static void pcnet32_wio_reset(unsigned long addr) { - inw (addr+PCNET32_WIO_RESET); + inw(addr + PCNET32_WIO_RESET); } -static int pcnet32_wio_check (unsigned long addr) +static int pcnet32_wio_check(unsigned long addr) { - outw (88, addr+PCNET32_WIO_RAP); - return (inw (addr+PCNET32_WIO_RAP) == 88); + outw(88, addr + PCNET32_WIO_RAP); + return (inw(addr + PCNET32_WIO_RAP) == 88); } static struct pcnet32_access pcnet32_wio = { - .read_csr = pcnet32_wio_read_csr, - .write_csr = pcnet32_wio_write_csr, - .read_bcr = pcnet32_wio_read_bcr, - .write_bcr = pcnet32_wio_write_bcr, - .read_rap = pcnet32_wio_read_rap, - .write_rap = pcnet32_wio_write_rap, - .reset = pcnet32_wio_reset + .read_csr = pcnet32_wio_read_csr, + .write_csr = pcnet32_wio_write_csr, + .read_bcr = pcnet32_wio_read_bcr, + .write_bcr = pcnet32_wio_write_bcr, + .read_rap = pcnet32_wio_read_rap, + .write_rap = pcnet32_wio_write_rap, + .reset = pcnet32_wio_reset }; -static u16 pcnet32_dwio_read_csr (unsigned long addr, int index) +static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) { - outl (index, addr+PCNET32_DWIO_RAP); - return (inl (addr+PCNET32_DWIO_RDP) & 0xffff); + outl(index, addr + PCNET32_DWIO_RAP); + return (inl(addr + PCNET32_DWIO_RDP) & 0xffff); } -static void pcnet32_dwio_write_csr (unsigned long addr, int index, u16 val) +static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) { - outl (index, addr+PCNET32_DWIO_RAP); - outl (val, addr+PCNET32_DWIO_RDP); + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_RDP); } -static u16 pcnet32_dwio_read_bcr (unsigned long addr, int index) +static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) { - outl (index, addr+PCNET32_DWIO_RAP); - return (inl (addr+PCNET32_DWIO_BDP) & 0xffff); + outl(index, addr + PCNET32_DWIO_RAP); + return (inl(addr + PCNET32_DWIO_BDP) & 0xffff); } -static void pcnet32_dwio_write_bcr (unsigned long addr, int index, u16 val) +static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) { - outl (index, addr+PCNET32_DWIO_RAP); - outl (val, addr+PCNET32_DWIO_BDP); + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_BDP); } -static u16 pcnet32_dwio_read_rap (unsigned long addr) +static u16 pcnet32_dwio_read_rap(unsigned long addr) { - return (inl (addr+PCNET32_DWIO_RAP) & 0xffff); + return (inl(addr + PCNET32_DWIO_RAP) & 0xffff); } -static void pcnet32_dwio_write_rap (unsigned long addr, u16 val) +static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) { - outl (val, addr+PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_RAP); } -static void pcnet32_dwio_reset (unsigned long addr) +static void pcnet32_dwio_reset(unsigned long addr) { - inl (addr+PCNET32_DWIO_RESET); + inl(addr + PCNET32_DWIO_RESET); } -static int pcnet32_dwio_check (unsigned long addr) +static int pcnet32_dwio_check(unsigned long addr) { - outl (88, addr+PCNET32_DWIO_RAP); - return ((inl (addr+PCNET32_DWIO_RAP) & 0xffff) == 88); + outl(88, addr + PCNET32_DWIO_RAP); + return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88); } static struct pcnet32_access pcnet32_dwio = { - .read_csr = pcnet32_dwio_read_csr, - .write_csr = pcnet32_dwio_write_csr, - .read_bcr = pcnet32_dwio_read_bcr, - .write_bcr = pcnet32_dwio_write_bcr, - .read_rap = pcnet32_dwio_read_rap, - .write_rap = pcnet32_dwio_write_rap, - .reset = pcnet32_dwio_reset + .read_csr = pcnet32_dwio_read_csr, + .write_csr = pcnet32_dwio_write_csr, + .read_bcr = pcnet32_dwio_read_bcr, + .write_bcr = pcnet32_dwio_write_bcr, + .read_rap = pcnet32_dwio_read_rap, + .write_rap = pcnet32_dwio_write_rap, + .reset = pcnet32_dwio_reset }; #ifdef CONFIG_NET_POLL_CONTROLLER static void pcnet32_poll_controller(struct net_device *dev) { - disable_irq(dev->irq); - pcnet32_interrupt(0, dev, NULL); - enable_irq(dev->irq); + disable_irq(dev->irq); + pcnet32_interrupt(0, dev, NULL); + enable_irq(dev->irq); } #endif - static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - struct pcnet32_private *lp = dev->priv; - unsigned long flags; - int r = -EOPNOTSUPP; - - if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); - mii_ethtool_gset(&lp->mii_if, cmd); - spin_unlock_irqrestore(&lp->lock, flags); - r = 0; - } - return r; + struct pcnet32_private *lp = dev->priv; + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + mii_ethtool_gset(&lp->mii_if, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + r = 0; + } + return r; } static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - struct pcnet32_private *lp = dev->priv; - unsigned long flags; - int r = -EOPNOTSUPP; + struct pcnet32_private *lp = dev->priv; + unsigned long flags; + int r = -EOPNOTSUPP; - if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); - r = mii_ethtool_sset(&lp->mii_if, cmd); - spin_unlock_irqrestore(&lp->lock, flags); - } - return r; + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + r = mii_ethtool_sset(&lp->mii_if, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + } + return r; } -static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +static void pcnet32_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) { - struct pcnet32_private *lp = dev->priv; - - strcpy (info->driver, DRV_NAME); - strcpy (info->version, DRV_VERSION); - if (lp->pci_dev) - strcpy (info->bus_info, pci_name(lp->pci_dev)); - else - sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); + struct pcnet32_private *lp = dev->priv; + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + if (lp->pci_dev) + strcpy(info->bus_info, pci_name(lp->pci_dev)); + else + sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); } static u32 pcnet32_get_link(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - unsigned long flags; - int r; - - spin_lock_irqsave(&lp->lock, flags); - if (lp->mii) { - r = mii_link_ok(&lp->mii_if); - } else { - ulong ioaddr = dev->base_addr; /* card base I/O address */ - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); - } - spin_unlock_irqrestore(&lp->lock, flags); + struct pcnet32_private *lp = dev->priv; + unsigned long flags; + int r; - return r; + spin_lock_irqsave(&lp->lock, flags); + if (lp->mii) { + r = mii_link_ok(&lp->mii_if); + } else { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } + spin_unlock_irqrestore(&lp->lock, flags); + + return r; } static u32 pcnet32_get_msglevel(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - return lp->msg_enable; + struct pcnet32_private *lp = dev->priv; + return lp->msg_enable; } static void pcnet32_set_msglevel(struct net_device *dev, u32 value) { - struct pcnet32_private *lp = dev->priv; - lp->msg_enable = value; + struct pcnet32_private *lp = dev->priv; + lp->msg_enable = value; } static int pcnet32_nway_reset(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - unsigned long flags; - int r = -EOPNOTSUPP; + struct pcnet32_private *lp = dev->priv; + unsigned long flags; + int r = -EOPNOTSUPP; - if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); - r = mii_nway_restart(&lp->mii_if); - spin_unlock_irqrestore(&lp->lock, flags); - } - return r; + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + r = mii_nway_restart(&lp->mii_if); + spin_unlock_irqrestore(&lp->lock, flags); + } + return r; } -static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +static void pcnet32_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = dev->priv; - ering->tx_max_pending = TX_MAX_RING_SIZE - 1; - ering->tx_pending = lp->tx_ring_size - 1; - ering->rx_max_pending = RX_MAX_RING_SIZE - 1; - ering->rx_pending = lp->rx_ring_size - 1; + ering->tx_max_pending = TX_MAX_RING_SIZE - 1; + ering->tx_pending = lp->tx_ring_size - 1; + ering->rx_max_pending = RX_MAX_RING_SIZE - 1; + ering->rx_pending = lp->rx_ring_size - 1; } -static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +static int pcnet32_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) { - struct pcnet32_private *lp = dev->priv; - unsigned long flags; - int i; - - if (ering->rx_mini_pending || ering->rx_jumbo_pending) - return -EINVAL; - - if (netif_running(dev)) - pcnet32_close(dev); - - spin_lock_irqsave(&lp->lock, flags); - pcnet32_free_ring(dev); - lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE); - lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE); - - /* set the minimum ring size to 4, to allow the loopback test to work - * unchanged. - */ - for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { - if (lp->tx_ring_size <= (1 << i)) - break; - } - lp->tx_ring_size = (1 << i); - lp->tx_mod_mask = lp->tx_ring_size - 1; - lp->tx_len_bits = (i << 12); - - for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { - if (lp->rx_ring_size <= (1 << i)) - break; - } - lp->rx_ring_size = (1 << i); - lp->rx_mod_mask = lp->rx_ring_size - 1; - lp->rx_len_bits = (i << 4); - - if (pcnet32_alloc_ring(dev, dev->name)) { + struct pcnet32_private *lp = dev->priv; + unsigned long flags; + int i; + + if (ering->rx_mini_pending || ering->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(dev)) + pcnet32_close(dev); + + spin_lock_irqsave(&lp->lock, flags); pcnet32_free_ring(dev); - spin_unlock_irqrestore(&lp->lock, flags); - return -ENOMEM; - } + lp->tx_ring_size = + min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); + lp->rx_ring_size = + min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); + + /* set the minimum ring size to 4, to allow the loopback test to work + * unchanged. + */ + for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { + if (lp->tx_ring_size <= (1 << i)) + break; + } + lp->tx_ring_size = (1 << i); + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->tx_len_bits = (i << 12); - spin_unlock_irqrestore(&lp->lock, flags); + for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { + if (lp->rx_ring_size <= (1 << i)) + break; + } + lp->rx_ring_size = (1 << i); + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->rx_len_bits = (i << 4); + + if (pcnet32_alloc_ring(dev, dev->name)) { + pcnet32_free_ring(dev); + spin_unlock_irqrestore(&lp->lock, flags); + return -ENOMEM; + } - if (pcnet32_debug & NETIF_MSG_DRV) - printk(KERN_INFO PFX "%s: Ring Param Settings: RX: %d, TX: %d\n", - dev->name, lp->rx_ring_size, lp->tx_ring_size); + spin_unlock_irqrestore(&lp->lock, flags); - if (netif_running(dev)) - pcnet32_open(dev); + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_INFO PFX + "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, + lp->rx_ring_size, lp->tx_ring_size); - return 0; + if (netif_running(dev)) + pcnet32_open(dev); + + return 0; } -static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) +static void pcnet32_get_strings(struct net_device *dev, u32 stringset, + u8 * data) { - memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); + memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); } static int pcnet32_self_test_count(struct net_device *dev) { - return PCNET32_TEST_LEN; + return PCNET32_TEST_LEN; } static void pcnet32_ethtool_test(struct net_device *dev, - struct ethtool_test *test, u64 *data) + struct ethtool_test *test, u64 * data) { - struct pcnet32_private *lp = dev->priv; - int rc; - - if (test->flags == ETH_TEST_FL_OFFLINE) { - rc = pcnet32_loopback_test(dev, data); - if (rc) { - if (netif_msg_hw(lp)) - printk(KERN_DEBUG "%s: Loopback test failed.\n", dev->name); - test->flags |= ETH_TEST_FL_FAILED; + struct pcnet32_private *lp = dev->priv; + int rc; + + if (test->flags == ETH_TEST_FL_OFFLINE) { + rc = pcnet32_loopback_test(dev, data); + if (rc) { + if (netif_msg_hw(lp)) + printk(KERN_DEBUG "%s: Loopback test failed.\n", + dev->name); + test->flags |= ETH_TEST_FL_FAILED; + } else if (netif_msg_hw(lp)) + printk(KERN_DEBUG "%s: Loopback test passed.\n", + dev->name); } else if (netif_msg_hw(lp)) - printk(KERN_DEBUG "%s: Loopback test passed.\n", dev->name); - } else if (netif_msg_hw(lp)) - printk(KERN_DEBUG "%s: No tests to run (specify 'Offline' on ethtool).", dev->name); -} /* end pcnet32_ethtool_test */ + printk(KERN_DEBUG + "%s: No tests to run (specify 'Offline' on ethtool).", + dev->name); +} /* end pcnet32_ethtool_test */ -static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1) +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) { - struct pcnet32_private *lp = dev->priv; - struct pcnet32_access *a = &lp->a; /* access to registers */ - ulong ioaddr = dev->base_addr; /* card base I/O address */ - struct sk_buff *skb; /* sk buff */ - int x, i; /* counters */ - int numbuffs = 4; /* number of TX/RX buffers and descs */ - u16 status = 0x8300; /* TX ring status */ - u16 teststatus; /* test of ring status */ - int rc; /* return code */ - int size; /* size of packets */ - unsigned char *packet; /* source packet data */ - static const int data_len = 60; /* length of source packets */ - unsigned long flags; - unsigned long ticks; - - *data1 = 1; /* status of test, default to fail */ - rc = 1; /* default to fail */ - - if (netif_running(dev)) - pcnet32_close(dev); - - spin_lock_irqsave(&lp->lock, flags); - - /* Reset the PCNET32 */ - lp->a.reset (ioaddr); - - /* switch pcnet32 to 32bit mode */ - lp->a.write_bcr (ioaddr, 20, 2); - - lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); - lp->init_block.filter[0] = 0; - lp->init_block.filter[1] = 0; - - /* purge & init rings but don't actually restart */ - pcnet32_restart(dev, 0x0000); - - lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ - - /* Initialize Transmit buffers. */ - size = data_len + 15; - for (x=0; x<numbuffs; x++) { - if (!(skb = dev_alloc_skb(size))) { - if (netif_msg_hw(lp)) - printk(KERN_DEBUG "%s: Cannot allocate skb at line: %d!\n", - dev->name, __LINE__); - goto clean_up; - } else { - packet = skb->data; - skb_put(skb, size); /* create space for data */ - lp->tx_skbuff[x] = skb; - lp->tx_ring[x].length = le16_to_cpu(-skb->len); - lp->tx_ring[x].misc = 0; - - /* put DA and SA into the skb */ - for (i=0; i<6; i++) - *packet++ = dev->dev_addr[i]; - for (i=0; i<6; i++) - *packet++ = dev->dev_addr[i]; - /* type */ - *packet++ = 0x08; - *packet++ = 0x06; - /* packet number */ - *packet++ = x; - /* fill packet with data */ - for (i=0; i<data_len; i++) - *packet++ = i; - - lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, - skb->len, PCI_DMA_TODEVICE); - lp->tx_ring[x].base = (u32)le32_to_cpu(lp->tx_dma_addr[x]); - wmb(); /* Make sure owner changes after all others are visible */ - lp->tx_ring[x].status = le16_to_cpu(status); - } - } - - x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ - x = x | 0x0002; - a->write_bcr(ioaddr, 32, x); - - lp->a.write_csr (ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ - - teststatus = le16_to_cpu(0x8000); - lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ - - /* Check status of descriptors */ - for (x=0; x<numbuffs; x++) { - ticks = 0; - rmb(); - while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { - spin_unlock_irqrestore(&lp->lock, flags); - mdelay(1); - spin_lock_irqsave(&lp->lock, flags); - rmb(); - ticks++; - } - if (ticks == 200) { - if (netif_msg_hw(lp)) - printk("%s: Desc %d failed to reset!\n",dev->name,x); - break; - } - } - - lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ - wmb(); - if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { - printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); - - for (x=0; x<numbuffs; x++) { - printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x); - skb = lp->rx_skbuff[x]; - for (i=0; i<size; i++) { - printk("%02x ", *(skb->data+i)); - } - printk("\n"); - } - } - - x = 0; - rc = 0; - while (x<numbuffs && !rc) { - skb = lp->rx_skbuff[x]; - packet = lp->tx_skbuff[x]->data; - for (i=0; i<size; i++) { - if (*(skb->data+i) != packet[i]) { - if (netif_msg_hw(lp)) - printk(KERN_DEBUG "%s: Error in compare! %2x - %02x %02x\n", - dev->name, i, *(skb->data+i), packet[i]); - rc = 1; - break; - } + struct pcnet32_private *lp = dev->priv; + struct pcnet32_access *a = &lp->a; /* access to registers */ + ulong ioaddr = dev->base_addr; /* card base I/O address */ + struct sk_buff *skb; /* sk buff */ + int x, i; /* counters */ + int numbuffs = 4; /* number of TX/RX buffers and descs */ + u16 status = 0x8300; /* TX ring status */ + u16 teststatus; /* test of ring status */ + int rc; /* return code */ + int size; /* size of packets */ + unsigned char *packet; /* source packet data */ + static const int data_len = 60; /* length of source packets */ + unsigned long flags; + unsigned long ticks; + + *data1 = 1; /* status of test, default to fail */ + rc = 1; /* default to fail */ + + if (netif_running(dev)) + pcnet32_close(dev); + + spin_lock_irqsave(&lp->lock, flags); + + /* Reset the PCNET32 */ + lp->a.reset(ioaddr); + + /* switch pcnet32 to 32bit mode */ + lp->a.write_bcr(ioaddr, 20, 2); + + lp->init_block.mode = + le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + lp->init_block.filter[0] = 0; + lp->init_block.filter[1] = 0; + + /* purge & init rings but don't actually restart */ + pcnet32_restart(dev, 0x0000); + + lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ + + /* Initialize Transmit buffers. */ + size = data_len + 15; + for (x = 0; x < numbuffs; x++) { + if (!(skb = dev_alloc_skb(size))) { + if (netif_msg_hw(lp)) + printk(KERN_DEBUG + "%s: Cannot allocate skb at line: %d!\n", + dev->name, __LINE__); + goto clean_up; + } else { + packet = skb->data; + skb_put(skb, size); /* create space for data */ + lp->tx_skbuff[x] = skb; + lp->tx_ring[x].length = le16_to_cpu(-skb->len); + lp->tx_ring[x].misc = 0; + + /* put DA and SA into the skb */ + for (i = 0; i < 6; i++) + *packet++ = dev->dev_addr[i]; + for (i = 0; i < 6; i++) + *packet++ = dev->dev_addr[i]; + /* type */ + *packet++ = 0x08; + *packet++ = 0x06; + /* packet number */ + *packet++ = x; + /* fill packet with data */ + for (i = 0; i < data_len; i++) + *packet++ = i; + + lp->tx_dma_addr[x] = + pci_map_single(lp->pci_dev, skb->data, skb->len, + PCI_DMA_TODEVICE); + lp->tx_ring[x].base = + (u32) le32_to_cpu(lp->tx_dma_addr[x]); + wmb(); /* Make sure owner changes after all others are visible */ + lp->tx_ring[x].status = le16_to_cpu(status); + } + } + + x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ + x = x | 0x0002; + a->write_bcr(ioaddr, 32, x); + + lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ + + teststatus = le16_to_cpu(0x8000); + lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ + + /* Check status of descriptors */ + for (x = 0; x < numbuffs; x++) { + ticks = 0; + rmb(); + while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { + spin_unlock_irqrestore(&lp->lock, flags); + mdelay(1); + spin_lock_irqsave(&lp->lock, flags); + rmb(); + ticks++; + } + if (ticks == 200) { + if (netif_msg_hw(lp)) + printk("%s: Desc %d failed to reset!\n", + dev->name, x); + break; + } + } + + lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ + wmb(); + if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { + printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); + + for (x = 0; x < numbuffs; x++) { + printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x); + skb = lp->rx_skbuff[x]; + for (i = 0; i < size; i++) { + printk("%02x ", *(skb->data + i)); + } + printk("\n"); + } + } + + x = 0; + rc = 0; + while (x < numbuffs && !rc) { + skb = lp->rx_skbuff[x]; + packet = lp->tx_skbuff[x]->data; + for (i = 0; i < size; i++) { + if (*(skb->data + i) != packet[i]) { + if (netif_msg_hw(lp)) + printk(KERN_DEBUG + "%s: Error in compare! %2x - %02x %02x\n", + dev->name, i, *(skb->data + i), + packet[i]); + rc = 1; + break; + } + } + x++; + } + if (!rc) { + *data1 = 0; } - x++; - } - if (!rc) { - *data1 = 0; - } -clean_up: - pcnet32_purge_tx_ring(dev); - x = a->read_csr(ioaddr, 15) & 0xFFFF; - a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ + clean_up: + pcnet32_purge_tx_ring(dev); + x = a->read_csr(ioaddr, 15) & 0xFFFF; + a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ - x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ - x = x & ~0x0002; - a->write_bcr(ioaddr, 32, x); + x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ + x = x & ~0x0002; + a->write_bcr(ioaddr, 32, x); - spin_unlock_irqrestore(&lp->lock, flags); + spin_unlock_irqrestore(&lp->lock, flags); - if (netif_running(dev)) { - pcnet32_open(dev); - } else { - lp->a.write_bcr (ioaddr, 20, 4); /* return to 16bit mode */ - } + if (netif_running(dev)) { + pcnet32_open(dev); + } else { + lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ + } - return(rc); -} /* end pcnet32_loopback_test */ + return (rc); +} /* end pcnet32_loopback_test */ static void pcnet32_led_blink_callback(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - struct pcnet32_access *a = &lp->a; - ulong ioaddr = dev->base_addr; - unsigned long flags; - int i; - - spin_lock_irqsave(&lp->lock, flags); - for (i=4; i<8; i++) { - a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); - } - spin_unlock_irqrestore(&lp->lock, flags); - - mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); + struct pcnet32_private *lp = dev->priv; + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + int i; + + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) { + a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); + } + spin_unlock_irqrestore(&lp->lock, flags); + + mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); } static int pcnet32_phys_id(struct net_device *dev, u32 data) { - struct pcnet32_private *lp = dev->priv; - struct pcnet32_access *a = &lp->a; - ulong ioaddr = dev->base_addr; - unsigned long flags; - int i, regs[4]; - - if (!lp->blink_timer.function) { - init_timer(&lp->blink_timer); - lp->blink_timer.function = (void *) pcnet32_led_blink_callback; - lp->blink_timer.data = (unsigned long) dev; - } - - /* Save the current value of the bcrs */ - spin_lock_irqsave(&lp->lock, flags); - for (i=4; i<8; i++) { - regs[i-4] = a->read_bcr(ioaddr, i); - } - spin_unlock_irqrestore(&lp->lock, flags); - - mod_timer(&lp->blink_timer, jiffies); - set_current_state(TASK_INTERRUPTIBLE); - - if ((!data) || (data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))) - data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); - - msleep_interruptible(data * 1000); - del_timer_sync(&lp->blink_timer); - - /* Restore the original value of the bcrs */ - spin_lock_irqsave(&lp->lock, flags); - for (i=4; i<8; i++) { - a->write_bcr(ioaddr, i, regs[i-4]); - } - spin_unlock_irqrestore(&lp->lock, flags); - - return 0; + struct pcnet32_private *lp = dev->priv; + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + int i, regs[4]; + + if (!lp->blink_timer.function) { + init_timer(&lp->blink_timer); + lp->blink_timer.function = (void *)pcnet32_led_blink_callback; + lp->blink_timer.data = (unsigned long)dev; + } + + /* Save the current value of the bcrs */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) { + regs[i - 4] = a->read_bcr(ioaddr, i); + } + spin_unlock_irqrestore(&lp->lock, flags); + + mod_timer(&lp->blink_timer, jiffies); + set_current_state(TASK_INTERRUPTIBLE); + + if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))) + data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ); + + msleep_interruptible(data * 1000); + del_timer_sync(&lp->blink_timer); + + /* Restore the original value of the bcrs */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) { + a->write_bcr(ioaddr, i, regs[i - 4]); + } + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; } +#define PCNET32_REGS_PER_PHY 32 +#define PCNET32_MAX_PHYS 32 static int pcnet32_get_regs_len(struct net_device *dev) { - return(PCNET32_NUM_REGS * sizeof(u16)); + struct pcnet32_private *lp = dev->priv; + int j = lp->phycount * PCNET32_REGS_PER_PHY; + + return ((PCNET32_NUM_REGS + j) * sizeof(u16)); } static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, - void *ptr) + void *ptr) { - int i, csr0; - u16 *buff = ptr; - struct pcnet32_private *lp = dev->priv; - struct pcnet32_access *a = &lp->a; - ulong ioaddr = dev->base_addr; - int ticks; - unsigned long flags; - - spin_lock_irqsave(&lp->lock, flags); - - csr0 = a->read_csr(ioaddr, 0); - if (!(csr0 & 0x0004)) { /* If not stopped */ - /* set SUSPEND (SPND) - CSR5 bit 0 */ - a->write_csr(ioaddr, 5, 0x0001); - - /* poll waiting for bit to be set */ - ticks = 0; - while (!(a->read_csr(ioaddr, 5) & 0x0001)) { - spin_unlock_irqrestore(&lp->lock, flags); - mdelay(1); - spin_lock_irqsave(&lp->lock, flags); - ticks++; - if (ticks > 200) { - if (netif_msg_hw(lp)) - printk(KERN_DEBUG "%s: Error getting into suspend!\n", - dev->name); - break; - } - } - } + int i, csr0; + u16 *buff = ptr; + struct pcnet32_private *lp = dev->priv; + struct pcnet32_access *a = &lp->a; + ulong ioaddr = dev->base_addr; + int ticks; + unsigned long flags; - /* read address PROM */ - for (i=0; i<16; i += 2) - *buff++ = inw(ioaddr + i); + spin_lock_irqsave(&lp->lock, flags); - /* read control and status registers */ - for (i=0; i<90; i++) { - *buff++ = a->read_csr(ioaddr, i); - } + csr0 = a->read_csr(ioaddr, 0); + if (!(csr0 & 0x0004)) { /* If not stopped */ + /* set SUSPEND (SPND) - CSR5 bit 0 */ + a->write_csr(ioaddr, 5, 0x0001); + + /* poll waiting for bit to be set */ + ticks = 0; + while (!(a->read_csr(ioaddr, 5) & 0x0001)) { + spin_unlock_irqrestore(&lp->lock, flags); + mdelay(1); + spin_lock_irqsave(&lp->lock, flags); + ticks++; + if (ticks > 200) { + if (netif_msg_hw(lp)) + printk(KERN_DEBUG + "%s: Error getting into suspend!\n", + dev->name); + break; + } + } + } - *buff++ = a->read_csr(ioaddr, 112); - *buff++ = a->read_csr(ioaddr, 114); + /* read address PROM */ + for (i = 0; i < 16; i += 2) + *buff++ = inw(ioaddr + i); - /* read bus configuration registers */ - for (i=0; i<30; i++) { - *buff++ = a->read_bcr(ioaddr, i); - } - *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ - for (i=31; i<36; i++) { - *buff++ = a->read_bcr(ioaddr, i); - } + /* read control and status registers */ + for (i = 0; i < 90; i++) { + *buff++ = a->read_csr(ioaddr, i); + } + + *buff++ = a->read_csr(ioaddr, 112); + *buff++ = a->read_csr(ioaddr, 114); - /* read mii phy registers */ - if (lp->mii) { - for (i=0; i<32; i++) { - lp->a.write_bcr(ioaddr, 33, ((lp->mii_if.phy_id) << 5) | i); - *buff++ = lp->a.read_bcr(ioaddr, 34); + /* read bus configuration registers */ + for (i = 0; i < 30; i++) { + *buff++ = a->read_bcr(ioaddr, i); + } + *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ + for (i = 31; i < 36; i++) { + *buff++ = a->read_bcr(ioaddr, i); } - } - if (!(csr0 & 0x0004)) { /* If not stopped */ - /* clear SUSPEND (SPND) - CSR5 bit 0 */ - a->write_csr(ioaddr, 5, 0x0000); - } + /* read mii phy registers */ + if (lp->mii) { + int j; + for (j = 0; j < PCNET32_MAX_PHYS; j++) { + if (lp->phymask & (1 << j)) { + for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { + lp->a.write_bcr(ioaddr, 33, + (j << 5) | i); + *buff++ = lp->a.read_bcr(ioaddr, 34); + } + } + } + } - i = buff - (u16 *)ptr; - for (; i < PCNET32_NUM_REGS; i++) - *buff++ = 0; + if (!(csr0 & 0x0004)) { /* If not stopped */ + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + a->write_csr(ioaddr, 5, 0x0000); + } - spin_unlock_irqrestore(&lp->lock, flags); + spin_unlock_irqrestore(&lp->lock, flags); } static struct ethtool_ops pcnet32_ethtool_ops = { - .get_settings = pcnet32_get_settings, - .set_settings = pcnet32_set_settings, - .get_drvinfo = pcnet32_get_drvinfo, - .get_msglevel = pcnet32_get_msglevel, - .set_msglevel = pcnet32_set_msglevel, - .nway_reset = pcnet32_nway_reset, - .get_link = pcnet32_get_link, - .get_ringparam = pcnet32_get_ringparam, - .set_ringparam = pcnet32_set_ringparam, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, - .get_tso = ethtool_op_get_tso, - .get_strings = pcnet32_get_strings, - .self_test_count = pcnet32_self_test_count, - .self_test = pcnet32_ethtool_test, - .phys_id = pcnet32_phys_id, - .get_regs_len = pcnet32_get_regs_len, - .get_regs = pcnet32_get_regs, - .get_perm_addr = ethtool_op_get_perm_addr, + .get_settings = pcnet32_get_settings, + .set_settings = pcnet32_set_settings, + .get_drvinfo = pcnet32_get_drvinfo, + .get_msglevel = pcnet32_get_msglevel, + .set_msglevel = pcnet32_set_msglevel, + .nway_reset = pcnet32_nway_reset, + .get_link = pcnet32_get_link, + .get_ringparam = pcnet32_get_ringparam, + .set_ringparam = pcnet32_set_ringparam, + .get_tx_csum = ethtool_op_get_tx_csum, + .get_sg = ethtool_op_get_sg, + .get_tso = ethtool_op_get_tso, + .get_strings = pcnet32_get_strings, + .self_test_count = pcnet32_self_test_count, + .self_test = pcnet32_ethtool_test, + .phys_id = pcnet32_phys_id, + .get_regs_len = pcnet32_get_regs_len, + .get_regs = pcnet32_get_regs, + .get_perm_addr = ethtool_op_get_perm_addr, }; /* only probes for non-PCI devices, the rest are handled by * pci_register_driver via pcnet32_probe_pci */ -static void __devinit -pcnet32_probe_vlbus(void) +static void __devinit pcnet32_probe_vlbus(void) { - unsigned int *port, ioaddr; - - /* search for PCnet32 VLB cards at known addresses */ - for (port = pcnet32_portlist; (ioaddr = *port); port++) { - if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { - /* check if there is really a pcnet chip on that ioaddr */ - if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) { - pcnet32_probe1(ioaddr, 0, NULL); - } else { - release_region(ioaddr, PCNET32_TOTAL_SIZE); - } - } - } + unsigned int *port, ioaddr; + + /* search for PCnet32 VLB cards at known addresses */ + for (port = pcnet32_portlist; (ioaddr = *port); port++) { + if (request_region + (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { + /* check if there is really a pcnet chip on that ioaddr */ + if ((inb(ioaddr + 14) == 0x57) + && (inb(ioaddr + 15) == 0x57)) { + pcnet32_probe1(ioaddr, 0, NULL); + } else { + release_region(ioaddr, PCNET32_TOTAL_SIZE); + } + } + } } - static int __devinit pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) { - unsigned long ioaddr; - int err; - - err = pci_enable_device(pdev); - if (err < 0) { - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err); - return err; - } - pci_set_master(pdev); + unsigned long ioaddr; + int err; + + err = pci_enable_device(pdev); + if (err < 0) { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_ERR PFX + "failed to enable device -- err=%d\n", err); + return err; + } + pci_set_master(pdev); + + ioaddr = pci_resource_start(pdev, 0); + if (!ioaddr) { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_ERR PFX + "card has no PCI IO resources, aborting\n"); + return -ENODEV; + } - ioaddr = pci_resource_start (pdev, 0); - if (!ioaddr) { - if (pcnet32_debug & NETIF_MSG_PROBE) - printk (KERN_ERR PFX "card has no PCI IO resources, aborting\n"); - return -ENODEV; - } + if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_ERR PFX + "architecture does not support 32bit PCI busmaster DMA\n"); + return -ENODEV; + } + if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == + NULL) { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_ERR PFX + "io address range already allocated\n"); + return -EBUSY; + } - if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_ERR PFX "architecture does not support 32bit PCI busmaster DMA\n"); - return -ENODEV; - } - if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == NULL) { - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_ERR PFX "io address range already allocated\n"); - return -EBUSY; - } - - err = pcnet32_probe1(ioaddr, 1, pdev); - if (err < 0) { - pci_disable_device(pdev); - } - return err; + err = pcnet32_probe1(ioaddr, 1, pdev); + if (err < 0) { + pci_disable_device(pdev); + } + return err; } - /* pcnet32_probe1 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. * pdev will be NULL when called from pcnet32_probe_vlbus. @@ -1107,630 +1028,764 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) static int __devinit pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) { - struct pcnet32_private *lp; - dma_addr_t lp_dma_addr; - int i, media; - int fdx, mii, fset, dxsuflo; - int chip_version; - char *chipname; - struct net_device *dev; - struct pcnet32_access *a = NULL; - u8 promaddr[6]; - int ret = -ENODEV; - - /* reset the chip */ - pcnet32_wio_reset(ioaddr); - - /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ - if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { - a = &pcnet32_wio; - } else { - pcnet32_dwio_reset(ioaddr); - if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) { - a = &pcnet32_dwio; - } else - goto err_release_region; - } - - chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr,89) << 16); - if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) - printk(KERN_INFO " PCnet chip version is %#x.\n", chip_version); - if ((chip_version & 0xfff) != 0x003) { - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_INFO PFX "Unsupported chip version.\n"); - goto err_release_region; - } - - /* initialize variables */ - fdx = mii = fset = dxsuflo = 0; - chip_version = (chip_version >> 12) & 0xffff; - - switch (chip_version) { - case 0x2420: - chipname = "PCnet/PCI 79C970"; /* PCI */ - break; - case 0x2430: - if (shared) - chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ - else - chipname = "PCnet/32 79C965"; /* 486/VL bus */ - break; - case 0x2621: - chipname = "PCnet/PCI II 79C970A"; /* PCI */ - fdx = 1; - break; - case 0x2623: - chipname = "PCnet/FAST 79C971"; /* PCI */ - fdx = 1; mii = 1; fset = 1; - break; - case 0x2624: - chipname = "PCnet/FAST+ 79C972"; /* PCI */ - fdx = 1; mii = 1; fset = 1; - break; - case 0x2625: - chipname = "PCnet/FAST III 79C973"; /* PCI */ - fdx = 1; mii = 1; - break; - case 0x2626: - chipname = "PCnet/Home 79C978"; /* PCI */ - fdx = 1; + struct pcnet32_private *lp; + dma_addr_t lp_dma_addr; + int i, media; + int fdx, mii, fset, dxsuflo; + int chip_version; + char *chipname; + struct net_device *dev; + struct pcnet32_access *a = NULL; + u8 promaddr[6]; + int ret = -ENODEV; + + /* reset the chip */ + pcnet32_wio_reset(ioaddr); + + /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ + if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { + a = &pcnet32_wio; + } else { + pcnet32_dwio_reset(ioaddr); + if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 + && pcnet32_dwio_check(ioaddr)) { + a = &pcnet32_dwio; + } else + goto err_release_region; + } + + chip_version = + a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); + if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) + printk(KERN_INFO " PCnet chip version is %#x.\n", + chip_version); + if ((chip_version & 0xfff) != 0x003) { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_INFO PFX "Unsupported chip version.\n"); + goto err_release_region; + } + + /* initialize variables */ + fdx = mii = fset = dxsuflo = 0; + chip_version = (chip_version >> 12) & 0xffff; + + switch (chip_version) { + case 0x2420: + chipname = "PCnet/PCI 79C970"; /* PCI */ + break; + case 0x2430: + if (shared) + chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ + else + chipname = "PCnet/32 79C965"; /* 486/VL bus */ + break; + case 0x2621: + chipname = "PCnet/PCI II 79C970A"; /* PCI */ + fdx = 1; + break; + case 0x2623: + chipname = "PCnet/FAST 79C971"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + break; + case 0x2624: + chipname = "PCnet/FAST+ 79C972"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + break; + case 0x2625: + chipname = "PCnet/FAST III 79C973"; /* PCI */ + fdx = 1; + mii = 1; + break; + case 0x2626: + chipname = "PCnet/Home 79C978"; /* PCI */ + fdx = 1; + /* + * This is based on specs published at www.amd.com. This section + * assumes that a card with a 79C978 wants to go into standard + * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, + * and the module option homepna=1 can select this instead. + */ + media = a->read_bcr(ioaddr, 49); + media &= ~3; /* default to 10Mb ethernet */ + if (cards_found < MAX_UNITS && homepna[cards_found]) + media |= 1; /* switch to home wiring mode */ + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_DEBUG PFX "media set to %sMbit mode.\n", + (media & 1) ? "1" : "10"); + a->write_bcr(ioaddr, 49, media); + break; + case 0x2627: + chipname = "PCnet/FAST III 79C975"; /* PCI */ + fdx = 1; + mii = 1; + break; + case 0x2628: + chipname = "PCnet/PRO 79C976"; + fdx = 1; + mii = 1; + break; + default: + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_INFO PFX + "PCnet version %#x, no PCnet32 chip.\n", + chip_version); + goto err_release_region; + } + /* - * This is based on specs published at www.amd.com. This section - * assumes that a card with a 79C978 wants to go into standard - * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, - * and the module option homepna=1 can select this instead. + * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit + * starting until the packet is loaded. Strike one for reliability, lose + * one for latency - although on PCI this isnt a big loss. Older chips + * have FIFO's smaller than a packet, so you can't do this. + * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. */ - media = a->read_bcr(ioaddr, 49); - media &= ~3; /* default to 10Mb ethernet */ - if (cards_found < MAX_UNITS && homepna[cards_found]) - media |= 1; /* switch to home wiring mode */ - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_DEBUG PFX "media set to %sMbit mode.\n", - (media & 1) ? "1" : "10"); - a->write_bcr(ioaddr, 49, media); - break; - case 0x2627: - chipname = "PCnet/FAST III 79C975"; /* PCI */ - fdx = 1; mii = 1; - break; - case 0x2628: - chipname = "PCnet/PRO 79C976"; - fdx = 1; mii = 1; - break; - default: - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n", - chip_version); - goto err_release_region; - } - - /* - * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit - * starting until the packet is loaded. Strike one for reliability, lose - * one for latency - although on PCI this isnt a big loss. Older chips - * have FIFO's smaller than a packet, so you can't do this. - * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. - */ - - if (fset) { - a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); - a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); - dxsuflo = 1; - } - - dev = alloc_etherdev(0); - if (!dev) { + + if (fset) { + a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); + a->write_csr(ioaddr, 80, + (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); + dxsuflo = 1; + } + + dev = alloc_etherdev(0); + if (!dev) { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_ERR PFX "Memory allocation failed.\n"); + ret = -ENOMEM; + goto err_release_region; + } + SET_NETDEV_DEV(dev, &pdev->dev); + if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_ERR PFX "Memory allocation failed.\n"); - ret = -ENOMEM; - goto err_release_region; - } - SET_NETDEV_DEV(dev, &pdev->dev); - - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); - - /* In most chips, after a chip reset, the ethernet address is read from the - * station address PROM at the base address and programmed into the - * "Physical Address Registers" CSR12-14. - * As a precautionary measure, we read the PROM values and complain if - * they disagree with the CSRs. Either way, we use the CSR values, and - * double check that they are valid. - */ - for (i = 0; i < 3; i++) { - unsigned int val; - val = a->read_csr(ioaddr, i+12) & 0x0ffff; - /* There may be endianness issues here. */ - dev->dev_addr[2*i] = val & 0x0ff; - dev->dev_addr[2*i+1] = (val >> 8) & 0x0ff; - } - - /* read PROM address and compare with CSR address */ - for (i = 0; i < 6; i++) - promaddr[i] = inb(ioaddr + i); - - if (memcmp(promaddr, dev->dev_addr, 6) - || !is_valid_ether_addr(dev->dev_addr)) { - if (is_valid_ether_addr(promaddr)) { - if (pcnet32_debug & NETIF_MSG_PROBE) { - printk(" warning: CSR address invalid,\n"); - printk(KERN_INFO " using instead PROM address of"); - } - memcpy(dev->dev_addr, promaddr, 6); - } - } - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - - /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ - if (!is_valid_ether_addr(dev->perm_addr)) - memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); - - if (pcnet32_debug & NETIF_MSG_PROBE) { + printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); + + /* In most chips, after a chip reset, the ethernet address is read from the + * station address PROM at the base address and programmed into the + * "Physical Address Registers" CSR12-14. + * As a precautionary measure, we read the PROM values and complain if + * they disagree with the CSRs. Either way, we use the CSR values, and + * double check that they are valid. + */ + for (i = 0; i < 3; i++) { + unsigned int val; + val = a->read_csr(ioaddr, i + 12) & 0x0ffff; + /* There may be endianness issues here. */ + dev->dev_addr[2 * i] = val & 0x0ff; + dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; + } + + /* read PROM address and compare with CSR address */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i]); - - /* Version 0x2623 and 0x2624 */ - if (((chip_version + 1) & 0xfffe) == 0x2624) { - i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ - printk("\n" KERN_INFO " tx_start_pt(0x%04x):",i); - switch(i>>10) { - case 0: printk(" 20 bytes,"); break; - case 1: printk(" 64 bytes,"); break; - case 2: printk(" 128 bytes,"); break; - case 3: printk("~220 bytes,"); break; - } - i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ - printk(" BCR18(%x):",i&0xffff); - if (i & (1<<5)) printk("BurstWrEn "); - if (i & (1<<6)) printk("BurstRdEn "); - if (i & (1<<7)) printk("DWordIO "); - if (i & (1<<11)) printk("NoUFlow "); - i = a->read_bcr(ioaddr, 25); - printk("\n" KERN_INFO " SRAMSIZE=0x%04x,",i<<8); - i = a->read_bcr(ioaddr, 26); - printk(" SRAM_BND=0x%04x,",i<<8); - i = a->read_bcr(ioaddr, 27); - if (i & (1<<14)) printk("LowLatRx"); - } - } - - dev->base_addr = ioaddr; - /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ - if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) { - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); - ret = -ENOMEM; - goto err_free_netdev; - } - - memset(lp, 0, sizeof(*lp)); - lp->dma_addr = lp_dma_addr; - lp->pci_dev = pdev; - - spin_lock_init(&lp->lock); - - SET_MODULE_OWNER(dev); - SET_NETDEV_DEV(dev, &pdev->dev); - dev->priv = lp; - lp->name = chipname; - lp->shared_irq = shared; - lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ - lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ - lp->tx_mod_mask = lp->tx_ring_size - 1; - lp->rx_mod_mask = lp->rx_ring_size - 1; - lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); - lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); - lp->mii_if.full_duplex = fdx; - lp->mii_if.phy_id_mask = 0x1f; - lp->mii_if.reg_num_mask = 0x1f; - lp->dxsuflo = dxsuflo; - lp->mii = mii; - lp->msg_enable = pcnet32_debug; - if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping))) - lp->options = PCNET32_PORT_ASEL; - else - lp->options = options_mapping[options[cards_found]]; - lp->mii_if.dev = dev; - lp->mii_if.mdio_read = mdio_read; - lp->mii_if.mdio_write = mdio_write; - - if (fdx && !(lp->options & PCNET32_PORT_ASEL) && - ((cards_found>=MAX_UNITS) || full_duplex[cards_found])) - lp->options |= PCNET32_PORT_FD; - - if (!a) { - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_ERR PFX "No access methods\n"); - ret = -ENODEV; - goto err_free_consistent; - } - lp->a = *a; - - /* prior to register_netdev, dev->name is not yet correct */ - if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { - ret = -ENOMEM; - goto err_free_ring; - } - /* detect special T1/E1 WAN card by checking for MAC address */ - if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 + promaddr[i] = inb(ioaddr + i); + + if (memcmp(promaddr, dev->dev_addr, 6) + || !is_valid_ether_addr(dev->dev_addr)) { + if (is_valid_ether_addr(promaddr)) { + if (pcnet32_debug & NETIF_MSG_PROBE) { + printk(" warning: CSR address invalid,\n"); + printk(KERN_INFO + " using instead PROM address of"); + } + memcpy(dev->dev_addr, promaddr, 6); + } + } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ + if (!is_valid_ether_addr(dev->perm_addr)) + memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); + + if (pcnet32_debug & NETIF_MSG_PROBE) { + for (i = 0; i < 6; i++) + printk(" %2.2x", dev->dev_addr[i]); + + /* Version 0x2623 and 0x2624 */ + if (((chip_version + 1) & 0xfffe) == 0x2624) { + i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ + printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i); + switch (i >> 10) { + case 0: + printk(" 20 bytes,"); + break; + case 1: + printk(" 64 bytes,"); + break; + case 2: + printk(" 128 bytes,"); + break; + case 3: + printk("~220 bytes,"); + break; + } + i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ + printk(" BCR18(%x):", i & 0xffff); + if (i & (1 << 5)) + printk("BurstWrEn "); + if (i & (1 << 6)) + printk("BurstRdEn "); + if (i & (1 << 7)) + printk("DWordIO "); + if (i & (1 << 11)) + printk("NoUFlow "); + i = a->read_bcr(ioaddr, 25); + printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 26); + printk(" SRAM_BND=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 27); + if (i & (1 << 14)) + printk("LowLatRx"); + } + } + + dev->base_addr = ioaddr; + /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ + if ((lp = + pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_ERR PFX + "Consistent memory allocation failed.\n"); + ret = -ENOMEM; + goto err_free_netdev; + } + + memset(lp, 0, sizeof(*lp)); + lp->dma_addr = lp_dma_addr; + lp->pci_dev = pdev; + + spin_lock_init(&lp->lock); + + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, &pdev->dev); + dev->priv = lp; + lp->name = chipname; + lp->shared_irq = shared; + lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ + lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); + lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); + lp->mii_if.full_duplex = fdx; + lp->mii_if.phy_id_mask = 0x1f; + lp->mii_if.reg_num_mask = 0x1f; + lp->dxsuflo = dxsuflo; + lp->mii = mii; + lp->msg_enable = pcnet32_debug; + if ((cards_found >= MAX_UNITS) + || (options[cards_found] > sizeof(options_mapping))) + lp->options = PCNET32_PORT_ASEL; + else + lp->options = options_mapping[options[cards_found]]; + lp->mii_if.dev = dev; + lp->mii_if.mdio_read = mdio_read; + lp->mii_if.mdio_write = mdio_write; + + if (fdx && !(lp->options & PCNET32_PORT_ASEL) && + ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) + lp->options |= PCNET32_PORT_FD; + + if (!a) { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_ERR PFX "No access methods\n"); + ret = -ENODEV; + goto err_free_consistent; + } + lp->a = *a; + + /* prior to register_netdev, dev->name is not yet correct */ + if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { + ret = -ENOMEM; + goto err_free_ring; + } + /* detect special T1/E1 WAN card by checking for MAC address */ + if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && dev->dev_addr[2] == 0x75) - lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; - - lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ - lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); - for (i = 0; i < 6; i++) - lp->init_block.phys_addr[i] = dev->dev_addr[i]; - lp->init_block.filter[0] = 0x00000000; - lp->init_block.filter[1] = 0x00000000; - lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); - lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); - - /* switch pcnet32 to 32bit mode */ - a->write_bcr(ioaddr, 20, 2); - - a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, - init_block)) & 0xffff); - a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, - init_block)) >> 16); - - if (pdev) { /* use the IRQ provided by PCI */ - dev->irq = pdev->irq; - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(" assigned IRQ %d.\n", dev->irq); - } else { - unsigned long irq_mask = probe_irq_on(); + lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; - /* - * To auto-IRQ we enable the initialization-done and DMA error - * interrupts. For ISA boards we get a DMA error, but VLB and PCI - * boards will work. - */ - /* Trigger an initialization just for the interrupt. */ - a->write_csr (ioaddr, 0, 0x41); - mdelay (1); + lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ + lp->init_block.tlen_rlen = + le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); + for (i = 0; i < 6; i++) + lp->init_block.phys_addr[i] = dev->dev_addr[i]; + lp->init_block.filter[0] = 0x00000000; + lp->init_block.filter[1] = 0x00000000; + lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); + lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); + + /* switch pcnet32 to 32bit mode */ + a->write_bcr(ioaddr, 20, 2); + + a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, + init_block)) & 0xffff); + a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, + init_block)) >> 16); + + if (pdev) { /* use the IRQ provided by PCI */ + dev->irq = pdev->irq; + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(" assigned IRQ %d.\n", dev->irq); + } else { + unsigned long irq_mask = probe_irq_on(); + + /* + * To auto-IRQ we enable the initialization-done and DMA error + * interrupts. For ISA boards we get a DMA error, but VLB and PCI + * boards will work. + */ + /* Trigger an initialization just for the interrupt. */ + a->write_csr(ioaddr, 0, 0x41); + mdelay(1); + + dev->irq = probe_irq_off(irq_mask); + if (!dev->irq) { + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(", failed to detect IRQ line.\n"); + ret = -ENODEV; + goto err_free_ring; + } + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(", probed IRQ %d.\n", dev->irq); + } - dev->irq = probe_irq_off (irq_mask); - if (!dev->irq) { - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(", failed to detect IRQ line.\n"); - ret = -ENODEV; - goto err_free_ring; + /* Set the mii phy_id so that we can query the link state */ + if (lp->mii) { + /* lp->phycount and lp->phymask are set to 0 by memset above */ + + lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f; + /* scan for PHYs */ + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + unsigned short id1, id2; + + id1 = mdio_read(dev, i, MII_PHYSID1); + if (id1 == 0xffff) + continue; + id2 = mdio_read(dev, i, MII_PHYSID2); + if (id2 == 0xffff) + continue; + if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) + continue; /* 79C971 & 79C972 have phantom phy at id 31 */ + lp->phycount++; + lp->phymask |= (1 << i); + lp->mii_if.phy_id = i; + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_INFO PFX + "Found PHY %04x:%04x at address %d.\n", + id1, id2, i); + } + lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); + if (lp->phycount > 1) { + lp->options |= PCNET32_PORT_MII; + } } - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(", probed IRQ %d.\n", dev->irq); - } - - /* Set the mii phy_id so that we can query the link state */ - if (lp->mii) - lp->mii_if.phy_id = ((lp->a.read_bcr (ioaddr, 33)) >> 5) & 0x1f; - - init_timer (&lp->watchdog_timer); - lp->watchdog_timer.data = (unsigned long) dev; - lp->watchdog_timer.function = (void *) &pcnet32_watchdog; - - /* The PCNET32-specific entries in the device structure. */ - dev->open = &pcnet32_open; - dev->hard_start_xmit = &pcnet32_start_xmit; - dev->stop = &pcnet32_close; - dev->get_stats = &pcnet32_get_stats; - dev->set_multicast_list = &pcnet32_set_multicast_list; - dev->do_ioctl = &pcnet32_ioctl; - dev->ethtool_ops = &pcnet32_ethtool_ops; - dev->tx_timeout = pcnet32_tx_timeout; - dev->watchdog_timeo = (5*HZ); + + init_timer(&lp->watchdog_timer); + lp->watchdog_timer.data = (unsigned long)dev; + lp->watchdog_timer.function = (void *)&pcnet32_watchdog; + + /* The PCNET32-specific entries in the device structure. */ + dev->open = &pcnet32_open; + dev->hard_start_xmit = &pcnet32_start_xmit; + dev->stop = &pcnet32_close; + dev->get_stats = &pcnet32_get_stats; + dev->set_multicast_list = &pcnet32_set_multicast_list; + dev->do_ioctl = &pcnet32_ioctl; + dev->ethtool_ops = &pcnet32_ethtool_ops; + dev->tx_timeout = pcnet32_tx_timeout; + dev->watchdog_timeo = (5 * HZ); #ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = pcnet32_poll_controller; + dev->poll_controller = pcnet32_poll_controller; #endif - /* Fill in the generic fields of the device structure. */ - if (register_netdev(dev)) - goto err_free_ring; - - if (pdev) { - pci_set_drvdata(pdev, dev); - } else { - lp->next = pcnet32_dev; - pcnet32_dev = dev; - } - - if (pcnet32_debug & NETIF_MSG_PROBE) - printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name); - cards_found++; - - /* enable LED writes */ - a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); - - return 0; - -err_free_ring: - pcnet32_free_ring(dev); -err_free_consistent: - pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); -err_free_netdev: - free_netdev(dev); -err_release_region: - release_region(ioaddr, PCNET32_TOTAL_SIZE); - return ret; -} + /* Fill in the generic fields of the device structure. */ + if (register_netdev(dev)) + goto err_free_ring; + + if (pdev) { + pci_set_drvdata(pdev, dev); + } else { + lp->next = pcnet32_dev; + pcnet32_dev = dev; + } + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name); + cards_found++; + + /* enable LED writes */ + a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); + + return 0; + + err_free_ring: + pcnet32_free_ring(dev); + err_free_consistent: + pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); + err_free_netdev: + free_netdev(dev); + err_release_region: + release_region(ioaddr, PCNET32_TOTAL_SIZE); + return ret; +} /* if any allocation fails, caller must also call pcnet32_free_ring */ static int pcnet32_alloc_ring(struct net_device *dev, char *name) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = dev->priv; - lp->tx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, - &lp->tx_ring_dma_addr); - if (lp->tx_ring == NULL) { - if (pcnet32_debug & NETIF_MSG_DRV) - printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", - name); - return -ENOMEM; - } - - lp->rx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, - &lp->rx_ring_dma_addr); - if (lp->rx_ring == NULL) { - if (pcnet32_debug & NETIF_MSG_DRV) - printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", - name); - return -ENOMEM; - } - - lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, - GFP_ATOMIC); - if (!lp->tx_dma_addr) { - if (pcnet32_debug & NETIF_MSG_DRV) - printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); - return -ENOMEM; - } - memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); - - lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, - GFP_ATOMIC); - if (!lp->rx_dma_addr) { - if (pcnet32_debug & NETIF_MSG_DRV) - printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); - return -ENOMEM; - } - memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); - - lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, - GFP_ATOMIC); - if (!lp->tx_skbuff) { - if (pcnet32_debug & NETIF_MSG_DRV) - printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); - return -ENOMEM; - } - memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); - - lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, - GFP_ATOMIC); - if (!lp->rx_skbuff) { - if (pcnet32_debug & NETIF_MSG_DRV) - printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); - return -ENOMEM; - } - memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); + lp->tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, + &lp->tx_ring_dma_addr); + if (lp->tx_ring == NULL) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk("\n" KERN_ERR PFX + "%s: Consistent memory allocation failed.\n", + name); + return -ENOMEM; + } - return 0; -} + lp->rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, + &lp->rx_ring_dma_addr); + if (lp->rx_ring == NULL) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk("\n" KERN_ERR PFX + "%s: Consistent memory allocation failed.\n", + name); + return -ENOMEM; + } + lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, + GFP_ATOMIC); + if (!lp->tx_dma_addr) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk("\n" KERN_ERR PFX + "%s: Memory allocation failed.\n", name); + return -ENOMEM; + } + memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); + + lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, + GFP_ATOMIC); + if (!lp->rx_dma_addr) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk("\n" KERN_ERR PFX + "%s: Memory allocation failed.\n", name); + return -ENOMEM; + } + memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); + + lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, + GFP_ATOMIC); + if (!lp->tx_skbuff) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk("\n" KERN_ERR PFX + "%s: Memory allocation failed.\n", name); + return -ENOMEM; + } + memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); + + lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, + GFP_ATOMIC); + if (!lp->rx_skbuff) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk("\n" KERN_ERR PFX + "%s: Memory allocation failed.\n", name); + return -ENOMEM; + } + memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); + + return 0; +} static void pcnet32_free_ring(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; + struct pcnet32_private *lp = dev->priv; - kfree(lp->tx_skbuff); - lp->tx_skbuff = NULL; + kfree(lp->tx_skbuff); + lp->tx_skbuff = NULL; - kfree(lp->rx_skbuff); - lp->rx_skbuff = NULL; + kfree(lp->rx_skbuff); + lp->rx_skbuff = NULL; - kfree(lp->tx_dma_addr); - lp->tx_dma_addr = NULL; + kfree(lp->tx_dma_addr); + lp->tx_dma_addr = NULL; - kfree(lp->rx_dma_addr); - lp->rx_dma_addr = NULL; + kfree(lp->rx_dma_addr); + lp->rx_dma_addr = NULL; - if (lp->tx_ring) { - pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, - lp->tx_ring, lp->tx_ring_dma_addr); - lp->tx_ring = NULL; - } + if (lp->tx_ring) { + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, lp->tx_ring, + lp->tx_ring_dma_addr); + lp->tx_ring = NULL; + } - if (lp->rx_ring) { - pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, - lp->rx_ring, lp->rx_ring_dma_addr); - lp->rx_ring = NULL; - } + if (lp->rx_ring) { + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, lp->rx_ring, + lp->rx_ring_dma_addr); + lp->rx_ring = NULL; + } } - -static int -pcnet32_open(struct net_device *dev) +static int pcnet32_open(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - unsigned long ioaddr = dev->base_addr; - u16 val; - int i; - int rc; - unsigned long flags; - - if (request_irq(dev->irq, &pcnet32_interrupt, - lp->shared_irq ? SA_SHIRQ : 0, dev->name, (void *)dev)) { - return -EAGAIN; - } - - spin_lock_irqsave(&lp->lock, flags); - /* Check for a valid station address */ - if (!is_valid_ether_addr(dev->dev_addr)) { - rc = -EINVAL; - goto err_free_irq; - } - - /* Reset the PCNET32 */ - lp->a.reset (ioaddr); - - /* switch pcnet32 to 32bit mode */ - lp->a.write_bcr (ioaddr, 20, 2); - - if (netif_msg_ifup(lp)) - printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", - dev->name, dev->irq, - (u32) (lp->tx_ring_dma_addr), - (u32) (lp->rx_ring_dma_addr), - (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); - - /* set/reset autoselect bit */ - val = lp->a.read_bcr (ioaddr, 2) & ~2; - if (lp->options & PCNET32_PORT_ASEL) - val |= 2; - lp->a.write_bcr (ioaddr, 2, val); - - /* handle full duplex setting */ - if (lp->mii_if.full_duplex) { - val = lp->a.read_bcr (ioaddr, 9) & ~3; - if (lp->options & PCNET32_PORT_FD) { - val |= 1; - if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) + struct pcnet32_private *lp = dev->priv; + unsigned long ioaddr = dev->base_addr; + u16 val; + int i; + int rc; + unsigned long flags; + + if (request_irq(dev->irq, &pcnet32_interrupt, + lp->shared_irq ? SA_SHIRQ : 0, dev->name, + (void *)dev)) { + return -EAGAIN; + } + + spin_lock_irqsave(&lp->lock, flags); + /* Check for a valid station address */ + if (!is_valid_ether_addr(dev->dev_addr)) { + rc = -EINVAL; + goto err_free_irq; + } + + /* Reset the PCNET32 */ + lp->a.reset(ioaddr); + + /* switch pcnet32 to 32bit mode */ + lp->a.write_bcr(ioaddr, 20, 2); + + if (netif_msg_ifup(lp)) + printk(KERN_DEBUG + "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", + dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr), + (u32) (lp->rx_ring_dma_addr), + (u32) (lp->dma_addr + + offsetof(struct pcnet32_private, init_block))); + + /* set/reset autoselect bit */ + val = lp->a.read_bcr(ioaddr, 2) & ~2; + if (lp->options & PCNET32_PORT_ASEL) val |= 2; - } else if (lp->options & PCNET32_PORT_ASEL) { - /* workaround of xSeries250, turn on for 79C975 only */ - i = ((lp->a.read_csr(ioaddr, 88) | - (lp->a.read_csr(ioaddr,89) << 16)) >> 12) & 0xffff; - if (i == 0x2627) - val |= 3; - } - lp->a.write_bcr (ioaddr, 9, val); - } - - /* set/reset GPSI bit in test register */ - val = lp->a.read_csr (ioaddr, 124) & ~0x10; - if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) - val |= 0x10; - lp->a.write_csr (ioaddr, 124, val); - - /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ - if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && + lp->a.write_bcr(ioaddr, 2, val); + + /* handle full duplex setting */ + if (lp->mii_if.full_duplex) { + val = lp->a.read_bcr(ioaddr, 9) & ~3; + if (lp->options & PCNET32_PORT_FD) { + val |= 1; + if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) + val |= 2; + } else if (lp->options & PCNET32_PORT_ASEL) { + /* workaround of xSeries250, turn on for 79C975 only */ + i = ((lp->a.read_csr(ioaddr, 88) | + (lp->a. + read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff; + if (i == 0x2627) + val |= 3; + } + lp->a.write_bcr(ioaddr, 9, val); + } + + /* set/reset GPSI bit in test register */ + val = lp->a.read_csr(ioaddr, 124) & ~0x10; + if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) + val |= 0x10; + lp->a.write_csr(ioaddr, 124, val); + + /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ + if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { - if (lp->options & PCNET32_PORT_ASEL) { - lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; - if (netif_msg_link(lp)) - printk(KERN_DEBUG "%s: Setting 100Mb-Full Duplex.\n", - dev->name); - } - } - { - /* - * 24 Jun 2004 according AMD, in order to change the PHY, - * DANAS (or DISPM for 79C976) must be set; then select the speed, - * duplex, and/or enable auto negotiation, and clear DANAS - */ - if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { - lp->a.write_bcr(ioaddr, 32, - lp->a.read_bcr(ioaddr, 32) | 0x0080); - /* disable Auto Negotiation, set 10Mpbs, HD */ - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; - if (lp->options & PCNET32_PORT_FD) - val |= 0x10; - if (lp->options & PCNET32_PORT_100) - val |= 0x08; - lp->a.write_bcr (ioaddr, 32, val); + if (lp->options & PCNET32_PORT_ASEL) { + lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; + if (netif_msg_link(lp)) + printk(KERN_DEBUG + "%s: Setting 100Mb-Full Duplex.\n", + dev->name); + } + } + if (lp->phycount < 2) { + /* + * 24 Jun 2004 according AMD, in order to change the PHY, + * DANAS (or DISPM for 79C976) must be set; then select the speed, + * duplex, and/or enable auto negotiation, and clear DANAS + */ + if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { + lp->a.write_bcr(ioaddr, 32, + lp->a.read_bcr(ioaddr, 32) | 0x0080); + /* disable Auto Negotiation, set 10Mpbs, HD */ + val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; + if (lp->options & PCNET32_PORT_FD) + val |= 0x10; + if (lp->options & PCNET32_PORT_100) + val |= 0x08; + lp->a.write_bcr(ioaddr, 32, val); + } else { + if (lp->options & PCNET32_PORT_ASEL) { + lp->a.write_bcr(ioaddr, 32, + lp->a.read_bcr(ioaddr, + 32) | 0x0080); + /* enable auto negotiate, setup, disable fd */ + val = lp->a.read_bcr(ioaddr, 32) & ~0x98; + val |= 0x20; + lp->a.write_bcr(ioaddr, 32, val); + } + } } else { - if (lp->options & PCNET32_PORT_ASEL) { - lp->a.write_bcr(ioaddr, 32, - lp->a.read_bcr(ioaddr, 32) | 0x0080); - /* enable auto negotiate, setup, disable fd */ - val = lp->a.read_bcr(ioaddr, 32) & ~0x98; - val |= 0x20; - lp->a.write_bcr(ioaddr, 32, val); - } + int first_phy = -1; + u16 bmcr; + u32 bcr9; + struct ethtool_cmd ecmd; + + /* + * There is really no good other way to handle multiple PHYs + * other than turning off all automatics + */ + val = lp->a.read_bcr(ioaddr, 2); + lp->a.write_bcr(ioaddr, 2, val & ~2); + val = lp->a.read_bcr(ioaddr, 32); + lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ + + if (!(lp->options & PCNET32_PORT_ASEL)) { + /* setup ecmd */ + ecmd.port = PORT_MII; + ecmd.transceiver = XCVR_INTERNAL; + ecmd.autoneg = AUTONEG_DISABLE; + ecmd.speed = + lp-> + options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10; + bcr9 = lp->a.read_bcr(ioaddr, 9); + + if (lp->options & PCNET32_PORT_FD) { + ecmd.duplex = DUPLEX_FULL; + bcr9 |= (1 << 0); + } else { + ecmd.duplex = DUPLEX_HALF; + bcr9 |= ~(1 << 0); + } + lp->a.write_bcr(ioaddr, 9, bcr9); + } + + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + if (lp->phymask & (1 << i)) { + /* isolate all but the first PHY */ + bmcr = mdio_read(dev, i, MII_BMCR); + if (first_phy == -1) { + first_phy = i; + mdio_write(dev, i, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } else { + mdio_write(dev, i, MII_BMCR, + bmcr | BMCR_ISOLATE); + } + /* use mii_ethtool_sset to setup PHY */ + lp->mii_if.phy_id = i; + ecmd.phy_address = i; + if (lp->options & PCNET32_PORT_ASEL) { + mii_ethtool_gset(&lp->mii_if, &ecmd); + ecmd.autoneg = AUTONEG_ENABLE; + } + mii_ethtool_sset(&lp->mii_if, &ecmd); + } + } + lp->mii_if.phy_id = first_phy; + if (netif_msg_link(lp)) + printk(KERN_INFO "%s: Using PHY number %d.\n", + dev->name, first_phy); } - } #ifdef DO_DXSUFLO - if (lp->dxsuflo) { /* Disable transmit stop on underflow */ - val = lp->a.read_csr (ioaddr, 3); - val |= 0x40; - lp->a.write_csr (ioaddr, 3, val); - } + if (lp->dxsuflo) { /* Disable transmit stop on underflow */ + val = lp->a.read_csr(ioaddr, 3); + val |= 0x40; + lp->a.write_csr(ioaddr, 3, val); + } #endif - lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); - pcnet32_load_multicast(dev); - - if (pcnet32_init_ring(dev)) { - rc = -ENOMEM; - goto err_free_ring; - } - - /* Re-initialize the PCNET32, and start it when done. */ - lp->a.write_csr (ioaddr, 1, (lp->dma_addr + - offsetof(struct pcnet32_private, init_block)) & 0xffff); - lp->a.write_csr (ioaddr, 2, (lp->dma_addr + - offsetof(struct pcnet32_private, init_block)) >> 16); - - lp->a.write_csr (ioaddr, 4, 0x0915); - lp->a.write_csr (ioaddr, 0, 0x0001); - - netif_start_queue(dev); - - /* If we have mii, print the link status and start the watchdog */ - if (lp->mii) { - mii_check_media (&lp->mii_if, netif_msg_link(lp), 1); - mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); - } - - i = 0; - while (i++ < 100) - if (lp->a.read_csr (ioaddr, 0) & 0x0100) - break; - /* - * We used to clear the InitDone bit, 0x0100, here but Mark Stockton - * reports that doing so triggers a bug in the '974. - */ - lp->a.write_csr (ioaddr, 0, 0x0042); - - if (netif_msg_ifup(lp)) - printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", - dev->name, i, (u32) (lp->dma_addr + - offsetof(struct pcnet32_private, init_block)), - lp->a.read_csr(ioaddr, 0)); - - spin_unlock_irqrestore(&lp->lock, flags); - - return 0; /* Always succeed */ - -err_free_ring: - /* free any allocated skbuffs */ - for (i = 0; i < lp->rx_ring_size; i++) { - lp->rx_ring[i].status = 0; - if (lp->rx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, - PCI_DMA_FROMDEVICE); - dev_kfree_skb(lp->rx_skbuff[i]); - } - lp->rx_skbuff[i] = NULL; - lp->rx_dma_addr[i] = 0; - } - - pcnet32_free_ring(dev); - - /* - * Switch back to 16bit mode to avoid problems with dumb - * DOS packet driver after a warm reboot - */ - lp->a.write_bcr (ioaddr, 20, 4); - -err_free_irq: - spin_unlock_irqrestore(&lp->lock, flags); - free_irq(dev->irq, dev); - return rc; + lp->init_block.mode = + le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + pcnet32_load_multicast(dev); + + if (pcnet32_init_ring(dev)) { + rc = -ENOMEM; + goto err_free_ring; + } + + /* Re-initialize the PCNET32, and start it when done. */ + lp->a.write_csr(ioaddr, 1, (lp->dma_addr + + offsetof(struct pcnet32_private, + init_block)) & 0xffff); + lp->a.write_csr(ioaddr, 2, + (lp->dma_addr + + offsetof(struct pcnet32_private, init_block)) >> 16); + + lp->a.write_csr(ioaddr, 4, 0x0915); + lp->a.write_csr(ioaddr, 0, 0x0001); + + netif_start_queue(dev); + + /* Print the link status and start the watchdog */ + pcnet32_check_media(dev, 1); + mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + + i = 0; + while (i++ < 100) + if (lp->a.read_csr(ioaddr, 0) & 0x0100) + break; + /* + * We used to clear the InitDone bit, 0x0100, here but Mark Stockton + * reports that doing so triggers a bug in the '974. + */ + lp->a.write_csr(ioaddr, 0, 0x0042); + + if (netif_msg_ifup(lp)) + printk(KERN_DEBUG + "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", + dev->name, i, + (u32) (lp->dma_addr + + offsetof(struct pcnet32_private, init_block)), + lp->a.read_csr(ioaddr, 0)); + + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; /* Always succeed */ + + err_free_ring: + /* free any allocated skbuffs */ + for (i = 0; i < lp->rx_ring_size; i++) { + lp->rx_ring[i].status = 0; + if (lp->rx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_skbuff[i]); + } + lp->rx_skbuff[i] = NULL; + lp->rx_dma_addr[i] = 0; + } + + pcnet32_free_ring(dev); + + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ + lp->a.write_bcr(ioaddr, 20, 4); + + err_free_irq: + spin_unlock_irqrestore(&lp->lock, flags); + free_irq(dev->irq, dev); + return rc; } /* @@ -1746,727 +1801,893 @@ err_free_irq: * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com */ -static void -pcnet32_purge_tx_ring(struct net_device *dev) +static void pcnet32_purge_tx_ring(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - int i; - - for (i = 0; i < lp->tx_ring_size; i++) { - lp->tx_ring[i].status = 0; /* CPU owns buffer */ - wmb(); /* Make sure adapter sees owner change */ - if (lp->tx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], - lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); - dev_kfree_skb_any(lp->tx_skbuff[i]); - } - lp->tx_skbuff[i] = NULL; - lp->tx_dma_addr[i] = 0; - } -} + struct pcnet32_private *lp = dev->priv; + int i; + for (i = 0; i < lp->tx_ring_size; i++) { + lp->tx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + if (lp->tx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_any(lp->tx_skbuff[i]); + } + lp->tx_skbuff[i] = NULL; + lp->tx_dma_addr[i] = 0; + } +} /* Initialize the PCNET32 Rx and Tx rings. */ -static int -pcnet32_init_ring(struct net_device *dev) +static int pcnet32_init_ring(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - int i; - - lp->tx_full = 0; - lp->cur_rx = lp->cur_tx = 0; - lp->dirty_rx = lp->dirty_tx = 0; - - for (i = 0; i < lp->rx_ring_size; i++) { - struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; - if (rx_skbuff == NULL) { - if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { - /* there is not much, we can do at this point */ - if (pcnet32_debug & NETIF_MSG_DRV) - printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n", - dev->name); - return -1; - } - skb_reserve (rx_skbuff, 2); - } - - rmb(); - if (lp->rx_dma_addr[i] == 0) - lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, - PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); - lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); - lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ); - wmb(); /* Make sure owner changes after all others are visible */ - lp->rx_ring[i].status = le16_to_cpu(0x8000); - } - /* The Tx buffer address is filled in as needed, but we do need to clear - * the upper ownership bit. */ - for (i = 0; i < lp->tx_ring_size; i++) { - lp->tx_ring[i].status = 0; /* CPU owns buffer */ - wmb(); /* Make sure adapter sees owner change */ - lp->tx_ring[i].base = 0; - lp->tx_dma_addr[i] = 0; - } - - lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); - for (i = 0; i < 6; i++) - lp->init_block.phys_addr[i] = dev->dev_addr[i]; - lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); - lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); - wmb(); /* Make sure all changes are visible */ - return 0; + struct pcnet32_private *lp = dev->priv; + int i; + + lp->tx_full = 0; + lp->cur_rx = lp->cur_tx = 0; + lp->dirty_rx = lp->dirty_tx = 0; + + for (i = 0; i < lp->rx_ring_size; i++) { + struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; + if (rx_skbuff == NULL) { + if (! + (rx_skbuff = lp->rx_skbuff[i] = + dev_alloc_skb(PKT_BUF_SZ))) { + /* there is not much, we can do at this point */ + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_ERR + "%s: pcnet32_init_ring dev_alloc_skb failed.\n", + dev->name); + return -1; + } + skb_reserve(rx_skbuff, 2); + } + + rmb(); + if (lp->rx_dma_addr[i] == 0) + lp->rx_dma_addr[i] = + pci_map_single(lp->pci_dev, rx_skbuff->data, + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]); + lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); + wmb(); /* Make sure owner changes after all others are visible */ + lp->rx_ring[i].status = le16_to_cpu(0x8000); + } + /* The Tx buffer address is filled in as needed, but we do need to clear + * the upper ownership bit. */ + for (i = 0; i < lp->tx_ring_size; i++) { + lp->tx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + lp->tx_ring[i].base = 0; + lp->tx_dma_addr[i] = 0; + } + + lp->init_block.tlen_rlen = + le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); + for (i = 0; i < 6; i++) + lp->init_block.phys_addr[i] = dev->dev_addr[i]; + lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); + lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); + wmb(); /* Make sure all changes are visible */ + return 0; } /* the pcnet32 has been issued a stop or reset. Wait for the stop bit * then flush the pending transmit operations, re-initialize the ring, * and tell the chip to initialize. */ -static void -pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) +static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) { - struct pcnet32_private *lp = dev->priv; - unsigned long ioaddr = dev->base_addr; - int i; + struct pcnet32_private *lp = dev->priv; + unsigned long ioaddr = dev->base_addr; + int i; - /* wait for stop */ - for (i=0; i<100; i++) - if (lp->a.read_csr(ioaddr, 0) & 0x0004) - break; + /* wait for stop */ + for (i = 0; i < 100; i++) + if (lp->a.read_csr(ioaddr, 0) & 0x0004) + break; - if (i >= 100 && netif_msg_drv(lp)) - printk(KERN_ERR "%s: pcnet32_restart timed out waiting for stop.\n", - dev->name); + if (i >= 100 && netif_msg_drv(lp)) + printk(KERN_ERR + "%s: pcnet32_restart timed out waiting for stop.\n", + dev->name); - pcnet32_purge_tx_ring(dev); - if (pcnet32_init_ring(dev)) - return; + pcnet32_purge_tx_ring(dev); + if (pcnet32_init_ring(dev)) + return; - /* ReInit Ring */ - lp->a.write_csr (ioaddr, 0, 1); - i = 0; - while (i++ < 1000) - if (lp->a.read_csr (ioaddr, 0) & 0x0100) - break; + /* ReInit Ring */ + lp->a.write_csr(ioaddr, 0, 1); + i = 0; + while (i++ < 1000) + if (lp->a.read_csr(ioaddr, 0) & 0x0100) + break; - lp->a.write_csr (ioaddr, 0, csr0_bits); + lp->a.write_csr(ioaddr, 0, csr0_bits); } - -static void -pcnet32_tx_timeout (struct net_device *dev) +static void pcnet32_tx_timeout(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - unsigned long ioaddr = dev->base_addr, flags; - - spin_lock_irqsave(&lp->lock, flags); - /* Transmitter timeout, serious problems. */ - if (pcnet32_debug & NETIF_MSG_DRV) - printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n", - dev->name, lp->a.read_csr(ioaddr, 0)); - lp->a.write_csr (ioaddr, 0, 0x0004); - lp->stats.tx_errors++; - if (netif_msg_tx_err(lp)) { - int i; - printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", - lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", - lp->cur_rx); - for (i = 0 ; i < lp->rx_ring_size; i++) - printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", - le32_to_cpu(lp->rx_ring[i].base), - (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, - le32_to_cpu(lp->rx_ring[i].msg_length), - le16_to_cpu(lp->rx_ring[i].status)); - for (i = 0 ; i < lp->tx_ring_size; i++) - printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", - le32_to_cpu(lp->tx_ring[i].base), - (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, - le32_to_cpu(lp->tx_ring[i].misc), - le16_to_cpu(lp->tx_ring[i].status)); - printk("\n"); - } - pcnet32_restart(dev, 0x0042); - - dev->trans_start = jiffies; - netif_wake_queue(dev); - - spin_unlock_irqrestore(&lp->lock, flags); -} + struct pcnet32_private *lp = dev->priv; + unsigned long ioaddr = dev->base_addr, flags; + + spin_lock_irqsave(&lp->lock, flags); + /* Transmitter timeout, serious problems. */ + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_ERR + "%s: transmit timed out, status %4.4x, resetting.\n", + dev->name, lp->a.read_csr(ioaddr, 0)); + lp->a.write_csr(ioaddr, 0, 0x0004); + lp->stats.tx_errors++; + if (netif_msg_tx_err(lp)) { + int i; + printk(KERN_DEBUG + " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", + lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", + lp->cur_rx); + for (i = 0; i < lp->rx_ring_size; i++) + printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", + le32_to_cpu(lp->rx_ring[i].base), + (-le16_to_cpu(lp->rx_ring[i].buf_length)) & + 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), + le16_to_cpu(lp->rx_ring[i].status)); + for (i = 0; i < lp->tx_ring_size; i++) + printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", + le32_to_cpu(lp->tx_ring[i].base), + (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, + le32_to_cpu(lp->tx_ring[i].misc), + le16_to_cpu(lp->tx_ring[i].status)); + printk("\n"); + } + pcnet32_restart(dev, 0x0042); + + dev->trans_start = jiffies; + netif_wake_queue(dev); + spin_unlock_irqrestore(&lp->lock, flags); +} -static int -pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - unsigned long ioaddr = dev->base_addr; - u16 status; - int entry; - unsigned long flags; + struct pcnet32_private *lp = dev->priv; + unsigned long ioaddr = dev->base_addr; + u16 status; + int entry; + unsigned long flags; - spin_lock_irqsave(&lp->lock, flags); + spin_lock_irqsave(&lp->lock, flags); - if (netif_msg_tx_queued(lp)) { - printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", - dev->name, lp->a.read_csr(ioaddr, 0)); - } + if (netif_msg_tx_queued(lp)) { + printk(KERN_DEBUG + "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", + dev->name, lp->a.read_csr(ioaddr, 0)); + } - /* Default status -- will not enable Successful-TxDone - * interrupt when that option is available to us. - */ - status = 0x8300; + /* Default status -- will not enable Successful-TxDone + * interrupt when that option is available to us. + */ + status = 0x8300; - /* Fill in a Tx ring entry */ + /* Fill in a Tx ring entry */ - /* Mask to ring buffer boundary. */ - entry = lp->cur_tx & lp->tx_mod_mask; + /* Mask to ring buffer boundary. */ + entry = lp->cur_tx & lp->tx_mod_mask; - /* Caution: the write order is important here, set the status - * with the "ownership" bits last. */ + /* Caution: the write order is important here, set the status + * with the "ownership" bits last. */ - lp->tx_ring[entry].length = le16_to_cpu(-skb->len); + lp->tx_ring[entry].length = le16_to_cpu(-skb->len); - lp->tx_ring[entry].misc = 0x00000000; + lp->tx_ring[entry].misc = 0x00000000; - lp->tx_skbuff[entry] = skb; - lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, - PCI_DMA_TODEVICE); - lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]); - wmb(); /* Make sure owner changes after all others are visible */ - lp->tx_ring[entry].status = le16_to_cpu(status); + lp->tx_skbuff[entry] = skb; + lp->tx_dma_addr[entry] = + pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]); + wmb(); /* Make sure owner changes after all others are visible */ + lp->tx_ring[entry].status = le16_to_cpu(status); - lp->cur_tx++; - lp->stats.tx_bytes += skb->len; + lp->cur_tx++; + lp->stats.tx_bytes += skb->len; - /* Trigger an immediate send poll. */ - lp->a.write_csr (ioaddr, 0, 0x0048); + /* Trigger an immediate send poll. */ + lp->a.write_csr(ioaddr, 0, 0x0048); - dev->trans_start = jiffies; + dev->trans_start = jiffies; - if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) { - lp->tx_full = 1; - netif_stop_queue(dev); - } - spin_unlock_irqrestore(&lp->lock, flags); - return 0; + if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { + lp->tx_full = 1; + netif_stop_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + return 0; } /* The PCNET32 interrupt handler. */ static irqreturn_t -pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs) +pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs) { - struct net_device *dev = dev_id; - struct pcnet32_private *lp; - unsigned long ioaddr; - u16 csr0,rap; - int boguscnt = max_interrupt_work; - int must_restart; - - if (!dev) { - if (pcnet32_debug & NETIF_MSG_INTR) - printk (KERN_DEBUG "%s(): irq %d for unknown device\n", - __FUNCTION__, irq); - return IRQ_NONE; - } - - ioaddr = dev->base_addr; - lp = dev->priv; - - spin_lock(&lp->lock); - - rap = lp->a.read_rap(ioaddr); - while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) { - if (csr0 == 0xffff) { - break; /* PCMCIA remove happened */ + struct net_device *dev = dev_id; + struct pcnet32_private *lp; + unsigned long ioaddr; + u16 csr0, rap; + int boguscnt = max_interrupt_work; + int must_restart; + + if (!dev) { + if (pcnet32_debug & NETIF_MSG_INTR) + printk(KERN_DEBUG "%s(): irq %d for unknown device\n", + __FUNCTION__, irq); + return IRQ_NONE; } - /* Acknowledge all of the current interrupt sources ASAP. */ - lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f); - must_restart = 0; + ioaddr = dev->base_addr; + lp = dev->priv; - if (netif_msg_intr(lp)) - printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", - dev->name, csr0, lp->a.read_csr (ioaddr, 0)); - - if (csr0 & 0x0400) /* Rx interrupt */ - pcnet32_rx(dev); - - if (csr0 & 0x0200) { /* Tx-done interrupt */ - unsigned int dirty_tx = lp->dirty_tx; - int delta; - - while (dirty_tx != lp->cur_tx) { - int entry = dirty_tx & lp->tx_mod_mask; - int status = (short)le16_to_cpu(lp->tx_ring[entry].status); - - if (status < 0) - break; /* It still hasn't been Txed */ - - lp->tx_ring[entry].base = 0; - - if (status & 0x4000) { - /* There was an major error, log it. */ - int err_status = le32_to_cpu(lp->tx_ring[entry].misc); - lp->stats.tx_errors++; - if (netif_msg_tx_err(lp)) - printk(KERN_ERR "%s: Tx error status=%04x err_status=%08x\n", - dev->name, status, err_status); - if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; - if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; - if (err_status & 0x10000000) lp->stats.tx_window_errors++; + spin_lock(&lp->lock); + + rap = lp->a.read_rap(ioaddr); + while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) { + if (csr0 == 0xffff) { + break; /* PCMCIA remove happened */ + } + /* Acknowledge all of the current interrupt sources ASAP. */ + lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f); + + must_restart = 0; + + if (netif_msg_intr(lp)) + printk(KERN_DEBUG + "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", + dev->name, csr0, lp->a.read_csr(ioaddr, 0)); + + if (csr0 & 0x0400) /* Rx interrupt */ + pcnet32_rx(dev); + + if (csr0 & 0x0200) { /* Tx-done interrupt */ + unsigned int dirty_tx = lp->dirty_tx; + int delta; + + while (dirty_tx != lp->cur_tx) { + int entry = dirty_tx & lp->tx_mod_mask; + int status = + (short)le16_to_cpu(lp->tx_ring[entry]. + status); + + if (status < 0) + break; /* It still hasn't been Txed */ + + lp->tx_ring[entry].base = 0; + + if (status & 0x4000) { + /* There was an major error, log it. */ + int err_status = + le32_to_cpu(lp->tx_ring[entry]. + misc); + lp->stats.tx_errors++; + if (netif_msg_tx_err(lp)) + printk(KERN_ERR + "%s: Tx error status=%04x err_status=%08x\n", + dev->name, status, + err_status); + if (err_status & 0x04000000) + lp->stats.tx_aborted_errors++; + if (err_status & 0x08000000) + lp->stats.tx_carrier_errors++; + if (err_status & 0x10000000) + lp->stats.tx_window_errors++; #ifndef DO_DXSUFLO - if (err_status & 0x40000000) { - lp->stats.tx_fifo_errors++; - /* Ackk! On FIFO errors the Tx unit is turned off! */ - /* Remove this verbosity later! */ - if (netif_msg_tx_err(lp)) - printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", - dev->name, csr0); - must_restart = 1; - } + if (err_status & 0x40000000) { + lp->stats.tx_fifo_errors++; + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + if (netif_msg_tx_err(lp)) + printk(KERN_ERR + "%s: Tx FIFO error! CSR0=%4.4x\n", + dev->name, csr0); + must_restart = 1; + } #else - if (err_status & 0x40000000) { - lp->stats.tx_fifo_errors++; - if (! lp->dxsuflo) { /* If controller doesn't recover ... */ - /* Ackk! On FIFO errors the Tx unit is turned off! */ - /* Remove this verbosity later! */ - if (netif_msg_tx_err(lp)) - printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", - dev->name, csr0); - must_restart = 1; - } - } + if (err_status & 0x40000000) { + lp->stats.tx_fifo_errors++; + if (!lp->dxsuflo) { /* If controller doesn't recover ... */ + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + if (netif_msg_tx_err + (lp)) + printk(KERN_ERR + "%s: Tx FIFO error! CSR0=%4.4x\n", + dev-> + name, + csr0); + must_restart = 1; + } + } #endif - } else { - if (status & 0x1800) - lp->stats.collisions++; - lp->stats.tx_packets++; + } else { + if (status & 0x1800) + lp->stats.collisions++; + lp->stats.tx_packets++; + } + + /* We must free the original skb */ + if (lp->tx_skbuff[entry]) { + pci_unmap_single(lp->pci_dev, + lp->tx_dma_addr[entry], + lp->tx_skbuff[entry]-> + len, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(lp->tx_skbuff[entry]); + lp->tx_skbuff[entry] = NULL; + lp->tx_dma_addr[entry] = 0; + } + dirty_tx++; + } + + delta = + (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + + lp->tx_ring_size); + if (delta > lp->tx_ring_size) { + if (netif_msg_drv(lp)) + printk(KERN_ERR + "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", + dev->name, dirty_tx, lp->cur_tx, + lp->tx_full); + dirty_tx += lp->tx_ring_size; + delta -= lp->tx_ring_size; + } + + if (lp->tx_full && + netif_queue_stopped(dev) && + delta < lp->tx_ring_size - 2) { + /* The ring is no longer full, clear tbusy. */ + lp->tx_full = 0; + netif_wake_queue(dev); + } + lp->dirty_tx = dirty_tx; + } + + /* Log misc errors. */ + if (csr0 & 0x4000) + lp->stats.tx_errors++; /* Tx babble. */ + if (csr0 & 0x1000) { + /* + * this happens when our receive ring is full. This shouldn't + * be a problem as we will see normal rx interrupts for the frames + * in the receive ring. But there are some PCI chipsets (I can + * reproduce this on SP3G with Intel saturn chipset) which have + * sometimes problems and will fill up the receive ring with + * error descriptors. In this situation we don't get a rx + * interrupt, but a missed frame interrupt sooner or later. + * So we try to clean up our receive ring here. + */ + pcnet32_rx(dev); + lp->stats.rx_errors++; /* Missed a Rx frame. */ + } + if (csr0 & 0x0800) { + if (netif_msg_drv(lp)) + printk(KERN_ERR + "%s: Bus master arbitration failure, status %4.4x.\n", + dev->name, csr0); + /* unlike for the lance, there is no restart needed */ } - /* We must free the original skb */ - if (lp->tx_skbuff[entry]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry], - lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(lp->tx_skbuff[entry]); - lp->tx_skbuff[entry] = NULL; - lp->tx_dma_addr[entry] = 0; + if (must_restart) { + /* reset the chip to clear the error condition, then restart */ + lp->a.reset(ioaddr); + lp->a.write_csr(ioaddr, 4, 0x0915); + pcnet32_restart(dev, 0x0002); + netif_wake_queue(dev); } - dirty_tx++; - } - - delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); - if (delta > lp->tx_ring_size) { - if (netif_msg_drv(lp)) - printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", - dev->name, dirty_tx, lp->cur_tx, lp->tx_full); - dirty_tx += lp->tx_ring_size; - delta -= lp->tx_ring_size; - } - - if (lp->tx_full && - netif_queue_stopped(dev) && - delta < lp->tx_ring_size - 2) { - /* The ring is no longer full, clear tbusy. */ - lp->tx_full = 0; - netif_wake_queue (dev); - } - lp->dirty_tx = dirty_tx; - } - - /* Log misc errors. */ - if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */ - if (csr0 & 0x1000) { - /* - * this happens when our receive ring is full. This shouldn't - * be a problem as we will see normal rx interrupts for the frames - * in the receive ring. But there are some PCI chipsets (I can - * reproduce this on SP3G with Intel saturn chipset) which have - * sometimes problems and will fill up the receive ring with - * error descriptors. In this situation we don't get a rx - * interrupt, but a missed frame interrupt sooner or later. - * So we try to clean up our receive ring here. - */ - pcnet32_rx(dev); - lp->stats.rx_errors++; /* Missed a Rx frame. */ - } - if (csr0 & 0x0800) { - if (netif_msg_drv(lp)) - printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n", - dev->name, csr0); - /* unlike for the lance, there is no restart needed */ - } - - if (must_restart) { - /* reset the chip to clear the error condition, then restart */ - lp->a.reset(ioaddr); - lp->a.write_csr(ioaddr, 4, 0x0915); - pcnet32_restart(dev, 0x0002); - netif_wake_queue(dev); - } - } - - /* Set interrupt enable. */ - lp->a.write_csr (ioaddr, 0, 0x0040); - lp->a.write_rap (ioaddr,rap); - - if (netif_msg_intr(lp)) - printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", - dev->name, lp->a.read_csr (ioaddr, 0)); - - spin_unlock(&lp->lock); - - return IRQ_HANDLED; + } + + /* Set interrupt enable. */ + lp->a.write_csr(ioaddr, 0, 0x0040); + lp->a.write_rap(ioaddr, rap); + + if (netif_msg_intr(lp)) + printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", + dev->name, lp->a.read_csr(ioaddr, 0)); + + spin_unlock(&lp->lock); + + return IRQ_HANDLED; } -static int -pcnet32_rx(struct net_device *dev) +static int pcnet32_rx(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - int entry = lp->cur_rx & lp->rx_mod_mask; - int boguscnt = lp->rx_ring_size / 2; - - /* If we own the next entry, it's a new packet. Send it up. */ - while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { - int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; - - if (status != 0x03) { /* There was an error. */ - /* - * There is a tricky error noted by John Murphy, - * <murf@perftech.com> to Russ Nelson: Even with full-sized - * buffers it's possible for a jabber packet to use two - * buffers, with only the last correctly noting the error. - */ - if (status & 0x01) /* Only count a general error at the */ - lp->stats.rx_errors++; /* end of a packet.*/ - if (status & 0x20) lp->stats.rx_frame_errors++; - if (status & 0x10) lp->stats.rx_over_errors++; - if (status & 0x08) lp->stats.rx_crc_errors++; - if (status & 0x04) lp->stats.rx_fifo_errors++; - lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); - } else { - /* Malloc up new buffer, compatible with net-2e. */ - short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4; - struct sk_buff *skb; - - /* Discard oversize frames. */ - if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { - if (netif_msg_drv(lp)) - printk(KERN_ERR "%s: Impossible packet size %d!\n", - dev->name, pkt_len); - lp->stats.rx_errors++; - } else if (pkt_len < 60) { - if (netif_msg_rx_err(lp)) - printk(KERN_ERR "%s: Runt packet!\n", dev->name); - lp->stats.rx_errors++; - } else { - int rx_in_place = 0; - - if (pkt_len > rx_copybreak) { - struct sk_buff *newskb; - - if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) { - skb_reserve (newskb, 2); - skb = lp->rx_skbuff[entry]; - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry], - PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); - skb_put (skb, pkt_len); - lp->rx_skbuff[entry] = newskb; - newskb->dev = dev; - lp->rx_dma_addr[entry] = - pci_map_single(lp->pci_dev, newskb->data, - PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); - lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]); - rx_in_place = 1; - } else - skb = NULL; + struct pcnet32_private *lp = dev->priv; + int entry = lp->cur_rx & lp->rx_mod_mask; + int boguscnt = lp->rx_ring_size / 2; + + /* If we own the next entry, it's a new packet. Send it up. */ + while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { + int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; + + if (status != 0x03) { /* There was an error. */ + /* + * There is a tricky error noted by John Murphy, + * <murf@perftech.com> to Russ Nelson: Even with full-sized + * buffers it's possible for a jabber packet to use two + * buffers, with only the last correctly noting the error. + */ + if (status & 0x01) /* Only count a general error at the */ + lp->stats.rx_errors++; /* end of a packet. */ + if (status & 0x20) + lp->stats.rx_frame_errors++; + if (status & 0x10) + lp->stats.rx_over_errors++; + if (status & 0x08) + lp->stats.rx_crc_errors++; + if (status & 0x04) + lp->stats.rx_fifo_errors++; + lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); } else { - skb = dev_alloc_skb(pkt_len+2); - } - - if (skb == NULL) { - int i; - if (netif_msg_drv(lp)) - printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", - dev->name); - for (i = 0; i < lp->rx_ring_size; i++) - if ((short)le16_to_cpu(lp->rx_ring[(entry+i) - & lp->rx_mod_mask].status) < 0) - break; - - if (i > lp->rx_ring_size -2) { - lp->stats.rx_dropped++; - lp->rx_ring[entry].status |= le16_to_cpu(0x8000); - wmb(); /* Make sure adapter sees owner change */ - lp->cur_rx++; - } - break; - } - skb->dev = dev; - if (!rx_in_place) { - skb_reserve(skb,2); /* 16 byte align */ - skb_put(skb,pkt_len); /* Make room */ - pci_dma_sync_single_for_cpu(lp->pci_dev, - lp->rx_dma_addr[entry], - PKT_BUF_SZ-2, - PCI_DMA_FROMDEVICE); - eth_copy_and_sum(skb, - (unsigned char *)(lp->rx_skbuff[entry]->data), - pkt_len,0); - pci_dma_sync_single_for_device(lp->pci_dev, - lp->rx_dma_addr[entry], - PKT_BUF_SZ-2, - PCI_DMA_FROMDEVICE); + /* Malloc up new buffer, compatible with net-2e. */ + short pkt_len = + (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff) + - 4; + struct sk_buff *skb; + + /* Discard oversize frames. */ + if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { + if (netif_msg_drv(lp)) + printk(KERN_ERR + "%s: Impossible packet size %d!\n", + dev->name, pkt_len); + lp->stats.rx_errors++; + } else if (pkt_len < 60) { + if (netif_msg_rx_err(lp)) + printk(KERN_ERR "%s: Runt packet!\n", + dev->name); + lp->stats.rx_errors++; + } else { + int rx_in_place = 0; + + if (pkt_len > rx_copybreak) { + struct sk_buff *newskb; + + if ((newskb = + dev_alloc_skb(PKT_BUF_SZ))) { + skb_reserve(newskb, 2); + skb = lp->rx_skbuff[entry]; + pci_unmap_single(lp->pci_dev, + lp-> + rx_dma_addr + [entry], + PKT_BUF_SZ - 2, + PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[entry] = newskb; + newskb->dev = dev; + lp->rx_dma_addr[entry] = + pci_map_single(lp->pci_dev, + newskb->data, + PKT_BUF_SZ - + 2, + PCI_DMA_FROMDEVICE); + lp->rx_ring[entry].base = + le32_to_cpu(lp-> + rx_dma_addr + [entry]); + rx_in_place = 1; + } else + skb = NULL; + } else { + skb = dev_alloc_skb(pkt_len + 2); + } + + if (skb == NULL) { + int i; + if (netif_msg_drv(lp)) + printk(KERN_ERR + "%s: Memory squeeze, deferring packet.\n", + dev->name); + for (i = 0; i < lp->rx_ring_size; i++) + if ((short) + le16_to_cpu(lp-> + rx_ring[(entry + + i) + & lp-> + rx_mod_mask]. + status) < 0) + break; + + if (i > lp->rx_ring_size - 2) { + lp->stats.rx_dropped++; + lp->rx_ring[entry].status |= + le16_to_cpu(0x8000); + wmb(); /* Make sure adapter sees owner change */ + lp->cur_rx++; + } + break; + } + skb->dev = dev; + if (!rx_in_place) { + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, pkt_len); /* Make room */ + pci_dma_sync_single_for_cpu(lp->pci_dev, + lp-> + rx_dma_addr + [entry], + PKT_BUF_SZ - + 2, + PCI_DMA_FROMDEVICE); + eth_copy_and_sum(skb, + (unsigned char *)(lp-> + rx_skbuff + [entry]-> + data), + pkt_len, 0); + pci_dma_sync_single_for_device(lp-> + pci_dev, + lp-> + rx_dma_addr + [entry], + PKT_BUF_SZ + - 2, + PCI_DMA_FROMDEVICE); + } + lp->stats.rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->last_rx = jiffies; + lp->stats.rx_packets++; + } } - lp->stats.rx_bytes += skb->len; - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - dev->last_rx = jiffies; - lp->stats.rx_packets++; - } + /* + * The docs say that the buffer length isn't touched, but Andrew Boyd + * of QNX reports that some revs of the 79C965 clear it. + */ + lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); + wmb(); /* Make sure owner changes after all others are visible */ + lp->rx_ring[entry].status |= le16_to_cpu(0x8000); + entry = (++lp->cur_rx) & lp->rx_mod_mask; + if (--boguscnt <= 0) + break; /* don't stay in loop forever */ } - /* - * The docs say that the buffer length isn't touched, but Andrew Boyd - * of QNX reports that some revs of the 79C965 clear it. - */ - lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ); - wmb(); /* Make sure owner changes after all others are visible */ - lp->rx_ring[entry].status |= le16_to_cpu(0x8000); - entry = (++lp->cur_rx) & lp->rx_mod_mask; - if (--boguscnt <= 0) break; /* don't stay in loop forever */ - } - - return 0; + + return 0; } -static int -pcnet32_close(struct net_device *dev) +static int pcnet32_close(struct net_device *dev) { - unsigned long ioaddr = dev->base_addr; - struct pcnet32_private *lp = dev->priv; - int i; - unsigned long flags; + unsigned long ioaddr = dev->base_addr; + struct pcnet32_private *lp = dev->priv; + int i; + unsigned long flags; - del_timer_sync(&lp->watchdog_timer); + del_timer_sync(&lp->watchdog_timer); - netif_stop_queue(dev); + netif_stop_queue(dev); - spin_lock_irqsave(&lp->lock, flags); + spin_lock_irqsave(&lp->lock, flags); - lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); + lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); - if (netif_msg_ifdown(lp)) - printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", - dev->name, lp->a.read_csr (ioaddr, 0)); + if (netif_msg_ifdown(lp)) + printk(KERN_DEBUG + "%s: Shutting down ethercard, status was %2.2x.\n", + dev->name, lp->a.read_csr(ioaddr, 0)); - /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ - lp->a.write_csr (ioaddr, 0, 0x0004); + /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ + lp->a.write_csr(ioaddr, 0, 0x0004); - /* - * Switch back to 16bit mode to avoid problems with dumb - * DOS packet driver after a warm reboot - */ - lp->a.write_bcr (ioaddr, 20, 4); + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ + lp->a.write_bcr(ioaddr, 20, 4); - spin_unlock_irqrestore(&lp->lock, flags); + spin_unlock_irqrestore(&lp->lock, flags); - free_irq(dev->irq, dev); + free_irq(dev->irq, dev); - spin_lock_irqsave(&lp->lock, flags); + spin_lock_irqsave(&lp->lock, flags); - /* free all allocated skbuffs */ - for (i = 0; i < lp->rx_ring_size; i++) { - lp->rx_ring[i].status = 0; - wmb(); /* Make sure adapter sees owner change */ - if (lp->rx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, - PCI_DMA_FROMDEVICE); - dev_kfree_skb(lp->rx_skbuff[i]); + /* free all allocated skbuffs */ + for (i = 0; i < lp->rx_ring_size; i++) { + lp->rx_ring[i].status = 0; + wmb(); /* Make sure adapter sees owner change */ + if (lp->rx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], + PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_skbuff[i]); + } + lp->rx_skbuff[i] = NULL; + lp->rx_dma_addr[i] = 0; } - lp->rx_skbuff[i] = NULL; - lp->rx_dma_addr[i] = 0; - } - for (i = 0; i < lp->tx_ring_size; i++) { - lp->tx_ring[i].status = 0; /* CPU owns buffer */ - wmb(); /* Make sure adapter sees owner change */ - if (lp->tx_skbuff[i]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], - lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); - dev_kfree_skb(lp->tx_skbuff[i]); + for (i = 0; i < lp->tx_ring_size; i++) { + lp->tx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + if (lp->tx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); + dev_kfree_skb(lp->tx_skbuff[i]); + } + lp->tx_skbuff[i] = NULL; + lp->tx_dma_addr[i] = 0; } - lp->tx_skbuff[i] = NULL; - lp->tx_dma_addr[i] = 0; - } - spin_unlock_irqrestore(&lp->lock, flags); + spin_unlock_irqrestore(&lp->lock, flags); - return 0; + return 0; } -static struct net_device_stats * -pcnet32_get_stats(struct net_device *dev) +static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - unsigned long ioaddr = dev->base_addr; - u16 saved_addr; - unsigned long flags; - - spin_lock_irqsave(&lp->lock, flags); - saved_addr = lp->a.read_rap(ioaddr); - lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); - lp->a.write_rap(ioaddr, saved_addr); - spin_unlock_irqrestore(&lp->lock, flags); - - return &lp->stats; + struct pcnet32_private *lp = dev->priv; + unsigned long ioaddr = dev->base_addr; + u16 saved_addr; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + saved_addr = lp->a.read_rap(ioaddr); + lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); + lp->a.write_rap(ioaddr, saved_addr); + spin_unlock_irqrestore(&lp->lock, flags); + + return &lp->stats; } /* taken from the sunlance driver, which it took from the depca driver */ -static void pcnet32_load_multicast (struct net_device *dev) +static void pcnet32_load_multicast(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - volatile struct pcnet32_init_block *ib = &lp->init_block; - volatile u16 *mcast_table = (u16 *)&ib->filter; - struct dev_mc_list *dmi=dev->mc_list; - char *addrs; - int i; - u32 crc; - - /* set all multicast bits */ - if (dev->flags & IFF_ALLMULTI) { - ib->filter[0] = 0xffffffff; - ib->filter[1] = 0xffffffff; + struct pcnet32_private *lp = dev->priv; + volatile struct pcnet32_init_block *ib = &lp->init_block; + volatile u16 *mcast_table = (u16 *) & ib->filter; + struct dev_mc_list *dmi = dev->mc_list; + char *addrs; + int i; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = 0xffffffff; + ib->filter[1] = 0xffffffff; + return; + } + /* clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + + /* Add addresses */ + for (i = 0; i < dev->mc_count; i++) { + addrs = dmi->dmi_addr; + dmi = dmi->next; + + /* multicast address? */ + if (!(*addrs & 1)) + continue; + + crc = ether_crc_le(6, addrs); + crc = crc >> 26; + mcast_table[crc >> 4] = + le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | + (1 << (crc & 0xf))); + } return; - } - /* clear the multicast filter */ - ib->filter[0] = 0; - ib->filter[1] = 0; - - /* Add addresses */ - for (i = 0; i < dev->mc_count; i++) { - addrs = dmi->dmi_addr; - dmi = dmi->next; - - /* multicast address? */ - if (!(*addrs & 1)) - continue; - - crc = ether_crc_le(6, addrs); - crc = crc >> 26; - mcast_table [crc >> 4] = le16_to_cpu( - le16_to_cpu(mcast_table [crc >> 4]) | (1 << (crc & 0xf))); - } - return; } - /* * Set or clear the multicast filter for this adaptor. */ static void pcnet32_set_multicast_list(struct net_device *dev) { - unsigned long ioaddr = dev->base_addr, flags; - struct pcnet32_private *lp = dev->priv; - - spin_lock_irqsave(&lp->lock, flags); - if (dev->flags&IFF_PROMISC) { - /* Log any net taps. */ - if (netif_msg_hw(lp)) - printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); - lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); - } else { - lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); - pcnet32_load_multicast (dev); - } - - lp->a.write_csr (ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ - pcnet32_restart(dev, 0x0042); /* Resume normal operation */ - netif_wake_queue(dev); - - spin_unlock_irqrestore(&lp->lock, flags); + unsigned long ioaddr = dev->base_addr, flags; + struct pcnet32_private *lp = dev->priv; + + spin_lock_irqsave(&lp->lock, flags); + if (dev->flags & IFF_PROMISC) { + /* Log any net taps. */ + if (netif_msg_hw(lp)) + printk(KERN_INFO "%s: Promiscuous mode enabled.\n", + dev->name); + lp->init_block.mode = + le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << + 7); + } else { + lp->init_block.mode = + le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + pcnet32_load_multicast(dev); + } + + lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ + pcnet32_restart(dev, 0x0042); /* Resume normal operation */ + netif_wake_queue(dev); + + spin_unlock_irqrestore(&lp->lock, flags); } /* This routine assumes that the lp->lock is held */ static int mdio_read(struct net_device *dev, int phy_id, int reg_num) { - struct pcnet32_private *lp = dev->priv; - unsigned long ioaddr = dev->base_addr; - u16 val_out; + struct pcnet32_private *lp = dev->priv; + unsigned long ioaddr = dev->base_addr; + u16 val_out; - if (!lp->mii) - return 0; + if (!lp->mii) + return 0; - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); - val_out = lp->a.read_bcr(ioaddr, 34); + lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); + val_out = lp->a.read_bcr(ioaddr, 34); - return val_out; + return val_out; } /* This routine assumes that the lp->lock is held */ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) { - struct pcnet32_private *lp = dev->priv; - unsigned long ioaddr = dev->base_addr; + struct pcnet32_private *lp = dev->priv; + unsigned long ioaddr = dev->base_addr; - if (!lp->mii) - return; + if (!lp->mii) + return; - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); - lp->a.write_bcr(ioaddr, 34, val); + lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); + lp->a.write_bcr(ioaddr, 34, val); } static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct pcnet32_private *lp = dev->priv; - int rc; - unsigned long flags; + struct pcnet32_private *lp = dev->priv; + int rc; + unsigned long flags; + + /* SIOC[GS]MIIxxx ioctls */ + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); + spin_unlock_irqrestore(&lp->lock, flags); + } else { + rc = -EOPNOTSUPP; + } + + return rc; +} + +static int pcnet32_check_otherphy(struct net_device *dev) +{ + struct pcnet32_private *lp = dev->priv; + struct mii_if_info mii = lp->mii_if; + u16 bmcr; + int i; - /* SIOC[GS]MIIxxx ioctls */ - if (lp->mii) { - spin_lock_irqsave(&lp->lock, flags); - rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); - spin_unlock_irqrestore(&lp->lock, flags); - } else { - rc = -EOPNOTSUPP; - } + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + if (i == lp->mii_if.phy_id) + continue; /* skip active phy */ + if (lp->phymask & (1 << i)) { + mii.phy_id = i; + if (mii_link_ok(&mii)) { + /* found PHY with active link */ + if (netif_msg_link(lp)) + printk(KERN_INFO + "%s: Using PHY number %d.\n", + dev->name, i); + + /* isolate inactive phy */ + bmcr = + mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); + mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, + bmcr | BMCR_ISOLATE); + + /* de-isolate new phy */ + bmcr = mdio_read(dev, i, MII_BMCR); + mdio_write(dev, i, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* set new phy address */ + lp->mii_if.phy_id = i; + return 1; + } + } + } + return 0; +} + +/* + * Show the status of the media. Similar to mii_check_media however it + * correctly shows the link speed for all (tested) pcnet32 variants. + * Devices with no mii just report link state without speed. + * + * Caller is assumed to hold and release the lp->lock. + */ - return rc; +static void pcnet32_check_media(struct net_device *dev, int verbose) +{ + struct pcnet32_private *lp = dev->priv; + int curr_link; + int prev_link = netif_carrier_ok(dev) ? 1 : 0; + u32 bcr9; + + if (lp->mii) { + curr_link = mii_link_ok(&lp->mii_if); + } else { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0); + } + if (!curr_link) { + if (prev_link || verbose) { + netif_carrier_off(dev); + if (netif_msg_link(lp)) + printk(KERN_INFO "%s: link down\n", dev->name); + } + if (lp->phycount > 1) { + curr_link = pcnet32_check_otherphy(dev); + prev_link = 0; + } + } else if (verbose || !prev_link) { + netif_carrier_on(dev); + if (lp->mii) { + if (netif_msg_link(lp)) { + struct ethtool_cmd ecmd; + mii_ethtool_gset(&lp->mii_if, &ecmd); + printk(KERN_INFO + "%s: link up, %sMbps, %s-duplex\n", + dev->name, + (ecmd.speed == SPEED_100) ? "100" : "10", + (ecmd.duplex == + DUPLEX_FULL) ? "full" : "half"); + } + bcr9 = lp->a.read_bcr(dev->base_addr, 9); + if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { + if (lp->mii_if.full_duplex) + bcr9 |= (1 << 0); + else + bcr9 &= ~(1 << 0); + lp->a.write_bcr(dev->base_addr, 9, bcr9); + } + } else { + if (netif_msg_link(lp)) + printk(KERN_INFO "%s: link up\n", dev->name); + } + } } +/* + * Check for loss of link and link establishment. + * Can not use mii_check_media because it does nothing if mode is forced. + */ + static void pcnet32_watchdog(struct net_device *dev) { - struct pcnet32_private *lp = dev->priv; - unsigned long flags; + struct pcnet32_private *lp = dev->priv; + unsigned long flags; - /* Print the link status if it has changed */ - if (lp->mii) { + /* Print the link status if it has changed */ spin_lock_irqsave(&lp->lock, flags); - mii_check_media (&lp->mii_if, netif_msg_link(lp), 0); + pcnet32_check_media(dev, 0); spin_unlock_irqrestore(&lp->lock, flags); - } - mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); + mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); } static void __devexit pcnet32_remove_one(struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata(pdev); - - if (dev) { - struct pcnet32_private *lp = dev->priv; - - unregister_netdev(dev); - pcnet32_free_ring(dev); - release_region(dev->base_addr, PCNET32_TOTAL_SIZE); - pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); - free_netdev(dev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - } + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct pcnet32_private *lp = dev->priv; + + unregister_netdev(dev); + pcnet32_free_ring(dev); + release_region(dev->base_addr, PCNET32_TOTAL_SIZE); + pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); + free_netdev(dev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } } static struct pci_driver pcnet32_driver = { - .name = DRV_NAME, - .probe = pcnet32_probe_pci, - .remove = __devexit_p(pcnet32_remove_one), - .id_table = pcnet32_pci_tbl, + .name = DRV_NAME, + .probe = pcnet32_probe_pci, + .remove = __devexit_p(pcnet32_remove_one), + .id_table = pcnet32_pci_tbl, }; /* An additional parameter that may be passed in... */ @@ -2477,9 +2698,11 @@ static int pcnet32_have_pci; module_param(debug, int, 0); MODULE_PARM_DESC(debug, DRV_NAME " debug level"); module_param(max_interrupt_work, int, 0); -MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt"); +MODULE_PARM_DESC(max_interrupt_work, + DRV_NAME " maximum events handled per interrupt"); module_param(rx_copybreak, int, 0); -MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(rx_copybreak, + DRV_NAME " copy breakpoint for copy-only-tiny-frames"); module_param(tx_start_pt, int, 0); MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); module_param(pcnet32vlb, int, 0); @@ -2490,7 +2713,9 @@ module_param_array(full_duplex, int, NULL, 0); MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ module_param_array(homepna, int, NULL, 0); -MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); +MODULE_PARM_DESC(homepna, + DRV_NAME + " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); MODULE_AUTHOR("Thomas Bogendoerfer"); MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); @@ -2500,44 +2725,44 @@ MODULE_LICENSE("GPL"); static int __init pcnet32_init_module(void) { - printk(KERN_INFO "%s", version); + printk(KERN_INFO "%s", version); - pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); + pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); - if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) - tx_start = tx_start_pt; + if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) + tx_start = tx_start_pt; - /* find the PCI devices */ - if (!pci_module_init(&pcnet32_driver)) - pcnet32_have_pci = 1; + /* find the PCI devices */ + if (!pci_module_init(&pcnet32_driver)) + pcnet32_have_pci = 1; - /* should we find any remaining VLbus devices ? */ - if (pcnet32vlb) - pcnet32_probe_vlbus(); + /* should we find any remaining VLbus devices ? */ + if (pcnet32vlb) + pcnet32_probe_vlbus(); - if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) - printk(KERN_INFO PFX "%d cards_found.\n", cards_found); + if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) + printk(KERN_INFO PFX "%d cards_found.\n", cards_found); - return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; + return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; } static void __exit pcnet32_cleanup_module(void) { - struct net_device *next_dev; - - while (pcnet32_dev) { - struct pcnet32_private *lp = pcnet32_dev->priv; - next_dev = lp->next; - unregister_netdev(pcnet32_dev); - pcnet32_free_ring(pcnet32_dev); - release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); - pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); - free_netdev(pcnet32_dev); - pcnet32_dev = next_dev; - } + struct net_device *next_dev; + + while (pcnet32_dev) { + struct pcnet32_private *lp = pcnet32_dev->priv; + next_dev = lp->next; + unregister_netdev(pcnet32_dev); + pcnet32_free_ring(pcnet32_dev); + release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); + pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); + free_netdev(pcnet32_dev); + pcnet32_dev = next_dev; + } - if (pcnet32_have_pci) - pci_unregister_driver(&pcnet32_driver); + if (pcnet32_have_pci) + pci_unregister_driver(&pcnet32_driver); } module_init(pcnet32_init_module); diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 0245e40b51a1..b2073fce8216 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -46,6 +46,7 @@ #include <linux/rwsem.h> #include <linux/stddef.h> #include <linux/device.h> +#include <linux/mutex.h> #include <net/slhc_vj.h> #include <asm/atomic.h> @@ -198,11 +199,11 @@ static unsigned int cardmap_find_first_free(struct cardmap *map); static void cardmap_destroy(struct cardmap **map); /* - * all_ppp_sem protects the all_ppp_units mapping. + * all_ppp_mutex protects the all_ppp_units mapping. * It also ensures that finding a ppp unit in the all_ppp_units map * and updating its file.refcnt field is atomic. */ -static DECLARE_MUTEX(all_ppp_sem); +static DEFINE_MUTEX(all_ppp_mutex); static struct cardmap *all_ppp_units; static atomic_t ppp_unit_count = ATOMIC_INIT(0); @@ -804,7 +805,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, /* Attach to an existing ppp unit */ if (get_user(unit, p)) break; - down(&all_ppp_sem); + mutex_lock(&all_ppp_mutex); err = -ENXIO; ppp = ppp_find_unit(unit); if (ppp != 0) { @@ -812,7 +813,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, file->private_data = &ppp->file; err = 0; } - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); break; case PPPIOCATTCHAN: @@ -1691,8 +1692,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) || ppp->npmode[npi] != NPMODE_PASS) { kfree_skb(skb); } else { - skb_pull(skb, 2); /* chop off protocol */ - skb_postpull_rcsum(skb, skb->data - 2, 2); + /* chop off protocol */ + skb_pull_rcsum(skb, 2); skb->dev = ppp->dev; skb->protocol = htons(npindex_to_ethertype[npi]); skb->mac.raw = skb->data; @@ -2446,7 +2447,7 @@ ppp_create_interface(int unit, int *retp) dev->do_ioctl = ppp_net_ioctl; ret = -EEXIST; - down(&all_ppp_sem); + mutex_lock(&all_ppp_mutex); if (unit < 0) unit = cardmap_find_first_free(all_ppp_units); else if (cardmap_get(all_ppp_units, unit) != NULL) @@ -2465,12 +2466,12 @@ ppp_create_interface(int unit, int *retp) atomic_inc(&ppp_unit_count); cardmap_set(&all_ppp_units, unit, ppp); - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); *retp = 0; return ppp; out2: - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); free_netdev(dev); out1: kfree(ppp); @@ -2500,7 +2501,7 @@ static void ppp_shutdown_interface(struct ppp *ppp) { struct net_device *dev; - down(&all_ppp_sem); + mutex_lock(&all_ppp_mutex); ppp_lock(ppp); dev = ppp->dev; ppp->dev = NULL; @@ -2514,7 +2515,7 @@ static void ppp_shutdown_interface(struct ppp *ppp) ppp->file.dead = 1; ppp->owner = NULL; wake_up_interruptible(&ppp->file.rwait); - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); } /* @@ -2556,7 +2557,7 @@ static void ppp_destroy_interface(struct ppp *ppp) /* * Locate an existing ppp unit. - * The caller should have locked the all_ppp_sem. + * The caller should have locked the all_ppp_mutex. */ static struct ppp * ppp_find_unit(int unit) @@ -2601,7 +2602,7 @@ ppp_connect_channel(struct channel *pch, int unit) int ret = -ENXIO; int hdrlen; - down(&all_ppp_sem); + mutex_lock(&all_ppp_mutex); ppp = ppp_find_unit(unit); if (ppp == 0) goto out; @@ -2626,7 +2627,7 @@ ppp_connect_channel(struct channel *pch, int unit) outl: write_unlock_bh(&pch->upl); out: - up(&all_ppp_sem); + mutex_unlock(&all_ppp_mutex); return ret; } diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 9369f811075d..475dc930380f 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -337,8 +337,7 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) if (sk->sk_state & PPPOX_BOUND) { struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw; int len = ntohs(ph->length); - skb_pull(skb, sizeof(struct pppoe_hdr)); - skb_postpull_rcsum(skb, ph, sizeof(*ph)); + skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); if (pskb_trim_rcsum(skb, len)) goto abort_kfree; diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index a1cb07cdb60f..253440a98022 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -128,6 +128,7 @@ static const struct mii_chip_info { { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, + { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN }, { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME}, { "ICS LAN PHY", 0x0015, 0xF440, LAN }, diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c index a4b2b6975d6c..0784f558ca9a 100644 --- a/drivers/net/skfp/fplustm.c +++ b/drivers/net/skfp/fplustm.c @@ -549,12 +549,12 @@ void formac_tx_restart(struct s_smc *smc) static void enable_formac(struct s_smc *smc) { /* set formac IMSK : 0 enables irq */ - outpw(FM_A(FM_IMSK1U),~mac_imsk1u) ; - outpw(FM_A(FM_IMSK1L),~mac_imsk1l) ; - outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ; - outpw(FM_A(FM_IMSK2L),~mac_imsk2l) ; - outpw(FM_A(FM_IMSK3U),~mac_imsk3u) ; - outpw(FM_A(FM_IMSK3L),~mac_imsk3l) ; + outpw(FM_A(FM_IMSK1U),(unsigned short)~mac_imsk1u); + outpw(FM_A(FM_IMSK1L),(unsigned short)~mac_imsk1l); + outpw(FM_A(FM_IMSK2U),(unsigned short)~mac_imsk2u); + outpw(FM_A(FM_IMSK2L),(unsigned short)~mac_imsk2l); + outpw(FM_A(FM_IMSK3U),(unsigned short)~mac_imsk3u); + outpw(FM_A(FM_IMSK3L),(unsigned short)~mac_imsk3l); } #if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */ diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 25e028b7ce48..35dbf05c7f06 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -44,7 +44,7 @@ #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "1.3" +#define DRV_VERSION "1.5" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 @@ -104,7 +104,6 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 }; static const int rxqaddr[] = { Q_R1, Q_R2 }; static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; -static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; static int skge_get_regs_len(struct net_device *dev) { @@ -358,7 +357,7 @@ static struct net_device_stats *skge_get_stats(struct net_device *dev) skge->net_stats.rx_bytes = data[1]; skge->net_stats.tx_packets = data[2] + data[4] + data[6]; skge->net_stats.rx_packets = data[3] + data[5] + data[7]; - skge->net_stats.multicast = data[5] + data[7]; + skge->net_stats.multicast = data[3] + data[5]; skge->net_stats.collisions = data[10]; skge->net_stats.tx_aborted_errors = data[12]; @@ -728,19 +727,18 @@ static struct ethtool_ops skge_ethtool_ops = { * Allocate ring elements and chain them together * One-to-one association of board descriptors with ring elements */ -static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) +static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) { struct skge_tx_desc *d; struct skge_element *e; int i; - ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL); + ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL); if (!ring->start) return -ENOMEM; for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { e->desc = d; - e->skb = NULL; if (i == ring->count - 1) { e->next = ring->start; d->next_offset = base; @@ -783,7 +781,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, * Note: DMA address is not changed by chip. * MTU not changed while receiver active. */ -static void skge_rx_reuse(struct skge_element *e, unsigned int size) +static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) { struct skge_rx_desc *rd = e->desc; @@ -831,7 +829,7 @@ static int skge_rx_fill(struct skge_port *skge) do { struct sk_buff *skb; - skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); + skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -849,8 +847,7 @@ static void skge_link_up(struct skge_port *skge) LED_BLK_OFF|LED_SYNC_OFF|LED_ON); netif_carrier_on(skge->netdev); - if (skge->tx_avail > MAX_SKB_FRAGS + 1) - netif_wake_queue(skge->netdev); + netif_wake_queue(skge->netdev); if (netif_msg_link(skge)) printk(KERN_INFO PFX @@ -2157,7 +2154,7 @@ static int skge_up(struct net_device *dev) printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); if (dev->mtu > RX_BUF_SIZE) - skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN; + skge->rx_buf_size = dev->mtu + ETH_HLEN; else skge->rx_buf_size = RX_BUF_SIZE; @@ -2169,27 +2166,29 @@ static int skge_up(struct net_device *dev) if (!skge->mem) return -ENOMEM; + BUG_ON(skge->dma & 7); + + if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { + printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n"); + err = -EINVAL; + goto free_pci_mem; + } + memset(skge->mem, 0, skge->mem_size); - if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) + err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); + if (err) goto free_pci_mem; err = skge_rx_fill(skge); if (err) goto free_rx_ring; - if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, - skge->dma + rx_size))) + err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, + skge->dma + rx_size); + if (err) goto free_rx_ring; - skge->tx_avail = skge->tx_ring.count - 1; - - /* Enable IRQ from port */ - spin_lock_irq(&hw->hw_lock); - hw->intr_mask |= portirqmask[port]; - skge_write32(hw, B0_IMSK, hw->intr_mask); - spin_unlock_irq(&hw->hw_lock); - /* Initialize MAC */ spin_lock_bh(&hw->phy_lock); if (hw->chip_id == CHIP_ID_GENESIS) @@ -2246,11 +2245,6 @@ static int skge_down(struct net_device *dev) else yukon_stop(skge); - spin_lock_irq(&hw->hw_lock); - hw->intr_mask &= ~portirqmask[skge->port]; - skge_write32(hw, B0_IMSK, hw->intr_mask); - spin_unlock_irq(&hw->hw_lock); - /* Stop transmitter */ skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), @@ -2297,6 +2291,12 @@ static int skge_down(struct net_device *dev) return 0; } +static inline int skge_avail(const struct skge_ring *ring) +{ + return ((ring->to_clean > ring->to_use) ? 0 : ring->count) + + (ring->to_clean - ring->to_use) - 1; +} + static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); @@ -2307,27 +2307,24 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) int i; u32 control, len; u64 map; - unsigned long flags; skb = skb_padto(skb, ETH_ZLEN); if (!skb) return NETDEV_TX_OK; - local_irq_save(flags); if (!spin_trylock(&skge->tx_lock)) { - /* Collision - tell upper layer to requeue */ - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } + /* Collision - tell upper layer to requeue */ + return NETDEV_TX_LOCKED; + } - if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { + if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", dev->name); } - spin_unlock_irqrestore(&skge->tx_lock, flags); + spin_unlock(&skge->tx_lock); return NETDEV_TX_BUSY; } @@ -2396,49 +2393,51 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) dev->name, e - ring->start, skb->len); ring->to_use = e->next; - skge->tx_avail -= skb_shinfo(skb)->nr_frags + 1; - if (skge->tx_avail <= MAX_SKB_FRAGS + 1) { + if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) { pr_debug("%s: transmit queue full\n", dev->name); netif_stop_queue(dev); } + mmiowb(); + spin_unlock(&skge->tx_lock); + dev->trans_start = jiffies; - spin_unlock_irqrestore(&skge->tx_lock, flags); return NETDEV_TX_OK; } -static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) +static void skge_tx_complete(struct skge_port *skge, struct skge_element *last) { - /* This ring element can be skb or fragment */ - if (e->skb) { - pci_unmap_single(hw->pdev, - pci_unmap_addr(e, mapaddr), - pci_unmap_len(e, maplen), - PCI_DMA_TODEVICE); - dev_kfree_skb_any(e->skb); + struct pci_dev *pdev = skge->hw->pdev; + struct skge_element *e; + + for (e = skge->tx_ring.to_clean; e != last; e = e->next) { + struct sk_buff *skb = e->skb; + int i; + e->skb = NULL; - } else { - pci_unmap_page(hw->pdev, - pci_unmap_addr(e, mapaddr), - pci_unmap_len(e, maplen), - PCI_DMA_TODEVICE); + pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), + skb_headlen(skb), PCI_DMA_TODEVICE); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + e = e->next; + pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), + skb_shinfo(skb)->frags[i].size, + PCI_DMA_TODEVICE); + } + + dev_kfree_skb(skb); } + skge->tx_ring.to_clean = e; } static void skge_tx_clean(struct skge_port *skge) { - struct skge_ring *ring = &skge->tx_ring; - struct skge_element *e; - unsigned long flags; - spin_lock_irqsave(&skge->tx_lock, flags); - for (e = ring->to_clean; e != ring->to_use; e = e->next) { - ++skge->tx_avail; - skge_tx_free(skge->hw, e); - } - ring->to_clean = e; - spin_unlock_irqrestore(&skge->tx_lock, flags); + spin_lock_bh(&skge->tx_lock); + skge_tx_complete(skge, skge->tx_ring.to_use); + netif_wake_queue(skge->netdev); + spin_unlock_bh(&skge->tx_lock); } static void skge_tx_timeout(struct net_device *dev) @@ -2597,7 +2596,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, goto error; if (len < RX_COPY_THRESHOLD) { - skb = dev_alloc_skb(len + 2); + skb = alloc_skb(len + 2, GFP_ATOMIC); if (!skb) goto resubmit; @@ -2612,10 +2611,11 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, skge_rx_reuse(e, skge->rx_buf_size); } else { struct sk_buff *nskb; - nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); + nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC); if (!nskb) goto resubmit; + skb_reserve(nskb, NET_IP_ALIGN); pci_unmap_single(skge->hw->pdev, pci_unmap_addr(e, mapaddr), pci_unmap_len(e, maplen), @@ -2663,6 +2663,36 @@ resubmit: return NULL; } +static void skge_tx_done(struct skge_port *skge) +{ + struct skge_ring *ring = &skge->tx_ring; + struct skge_element *e, *last; + + spin_lock(&skge->tx_lock); + last = ring->to_clean; + for (e = ring->to_clean; e != ring->to_use; e = e->next) { + struct skge_tx_desc *td = e->desc; + + if (td->control & BMU_OWN) + break; + + if (td->control & BMU_EOF) { + last = e->next; + if (unlikely(netif_msg_tx_done(skge))) + printk(KERN_DEBUG PFX "%s: tx done slot %td\n", + skge->netdev->name, e - ring->start); + } + } + + skge_tx_complete(skge, last); + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1) + netif_wake_queue(skge->netdev); + + spin_unlock(&skge->tx_lock); +} static int skge_poll(struct net_device *dev, int *budget) { @@ -2670,8 +2700,10 @@ static int skge_poll(struct net_device *dev, int *budget) struct skge_hw *hw = skge->hw; struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; - unsigned int to_do = min(dev->quota, *budget); - unsigned int work_done = 0; + int to_do = min(dev->quota, *budget); + int work_done = 0; + + skge_tx_done(skge); for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { struct skge_rx_desc *rd = e->desc; @@ -2683,15 +2715,14 @@ static int skge_poll(struct net_device *dev, int *budget) if (control & BMU_OWN) break; - skb = skge_rx_get(skge, e, control, rd->status, - le16_to_cpu(rd->csum2)); + skb = skge_rx_get(skge, e, control, rd->status, + le16_to_cpu(rd->csum2)); if (likely(skb)) { dev->last_rx = jiffies; netif_receive_skb(skb); ++work_done; - } else - skge_rx_reuse(e, skge->rx_buf_size); + } } ring->to_clean = e; @@ -2705,49 +2736,15 @@ static int skge_poll(struct net_device *dev, int *budget) if (work_done >= to_do) return 1; /* not done */ - spin_lock_irq(&hw->hw_lock); - __netif_rx_complete(dev); - hw->intr_mask |= portirqmask[skge->port]; + netif_rx_complete(dev); + mmiowb(); + + hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F); skge_write32(hw, B0_IMSK, hw->intr_mask); - spin_unlock_irq(&hw->hw_lock); return 0; } -static inline void skge_tx_intr(struct net_device *dev) -{ - struct skge_port *skge = netdev_priv(dev); - struct skge_hw *hw = skge->hw; - struct skge_ring *ring = &skge->tx_ring; - struct skge_element *e; - - spin_lock(&skge->tx_lock); - for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) { - struct skge_tx_desc *td = e->desc; - u32 control; - - rmb(); - control = td->control; - if (control & BMU_OWN) - break; - - if (unlikely(netif_msg_tx_done(skge))) - printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n", - dev->name, e - ring->start, td->status); - - skge_tx_free(hw, e); - e->skb = NULL; - ++skge->tx_avail; - } - ring->to_clean = e; - skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); - - if (skge->tx_avail > MAX_SKB_FRAGS + 1) - netif_wake_queue(dev); - - spin_unlock(&skge->tx_lock); -} - /* Parity errors seem to happen when Genesis is connected to a switch * with no other ports present. Heartbeat error?? */ @@ -2770,17 +2767,6 @@ static void skge_mac_parity(struct skge_hw *hw, int port) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); } -static void skge_pci_clear(struct skge_hw *hw) -{ - u16 status; - - pci_read_config_word(hw->pdev, PCI_STATUS, &status); - skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - pci_write_config_word(hw->pdev, PCI_STATUS, - status | PCI_STATUS_ERROR_BITS); - skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); -} - static void skge_mac_intr(struct skge_hw *hw, int port) { if (hw->chip_id == CHIP_ID_GENESIS) @@ -2822,23 +2808,39 @@ static void skge_error_irq(struct skge_hw *hw) if (hwstatus & IS_M2_PAR_ERR) skge_mac_parity(hw, 1); - if (hwstatus & IS_R1_PAR_ERR) + if (hwstatus & IS_R1_PAR_ERR) { + printk(KERN_ERR PFX "%s: receive queue parity error\n", + hw->dev[0]->name); skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); + } - if (hwstatus & IS_R2_PAR_ERR) + if (hwstatus & IS_R2_PAR_ERR) { + printk(KERN_ERR PFX "%s: receive queue parity error\n", + hw->dev[1]->name); skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); + } if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { - printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n", - hwstatus); + u16 pci_status, pci_cmd; - skge_pci_clear(hw); + pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); + + printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n", + pci_name(hw->pdev), pci_cmd, pci_status); + + /* Write the error bits back to clear them. */ + pci_status &= PCI_STATUS_ERROR_BITS; + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_write_config_word(hw->pdev, PCI_COMMAND, + pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); + pci_write_config_word(hw->pdev, PCI_STATUS, pci_status); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); /* if error still set then just ignore it */ hwstatus = skge_read32(hw, B0_HWE_ISRC); if (hwstatus & IS_IRQ_STAT) { - pr_debug("IRQ status %x: still set ignoring hardware errors\n", - hwstatus); + printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n"); hw->intr_mask &= ~IS_HW_ERR; } } @@ -2855,12 +2857,11 @@ static void skge_extirq(unsigned long data) int port; spin_lock(&hw->phy_lock); - for (port = 0; port < 2; port++) { + for (port = 0; port < hw->ports; port++) { struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); - if (dev && netif_running(dev)) { - struct skge_port *skge = netdev_priv(dev); - + if (netif_running(dev)) { if (hw->chip_id != CHIP_ID_GENESIS) yukon_phy_intr(skge); else @@ -2869,38 +2870,39 @@ static void skge_extirq(unsigned long data) } spin_unlock(&hw->phy_lock); - spin_lock_irq(&hw->hw_lock); hw->intr_mask |= IS_EXT_REG; skge_write32(hw, B0_IMSK, hw->intr_mask); - spin_unlock_irq(&hw->hw_lock); } static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) { struct skge_hw *hw = dev_id; - u32 status = skge_read32(hw, B0_SP_ISRC); + u32 status; - if (status == 0 || status == ~0) /* hotplug or shared irq */ + /* Reading this register masks IRQ */ + status = skge_read32(hw, B0_SP_ISRC); + if (status == 0) return IRQ_NONE; - spin_lock(&hw->hw_lock); - if (status & IS_R1_F) { + if (status & IS_EXT_REG) { + hw->intr_mask &= ~IS_EXT_REG; + tasklet_schedule(&hw->ext_tasklet); + } + + if (status & (IS_R1_F|IS_XA1_F)) { skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); - hw->intr_mask &= ~IS_R1_F; + hw->intr_mask &= ~(IS_R1_F|IS_XA1_F); netif_rx_schedule(hw->dev[0]); } - if (status & IS_R2_F) { + if (status & (IS_R2_F|IS_XA2_F)) { skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); - hw->intr_mask &= ~IS_R2_F; + hw->intr_mask &= ~(IS_R2_F|IS_XA2_F); netif_rx_schedule(hw->dev[1]); } - if (status & IS_XA1_F) - skge_tx_intr(hw->dev[0]); - - if (status & IS_XA2_F) - skge_tx_intr(hw->dev[1]); + if (likely((status & hw->intr_mask) == 0)) + return IRQ_HANDLED; if (status & IS_PA_TO_RX1) { struct skge_port *skge = netdev_priv(hw->dev[0]); @@ -2929,13 +2931,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) if (status & IS_HW_ERR) skge_error_irq(hw); - if (status & IS_EXT_REG) { - hw->intr_mask &= ~IS_EXT_REG; - tasklet_schedule(&hw->ext_tasklet); - } - skge_write32(hw, B0_IMSK, hw->intr_mask); - spin_unlock(&hw->hw_lock); return IRQ_HANDLED; } @@ -3010,7 +3006,7 @@ static const char *skge_board_name(const struct skge_hw *hw) static int skge_reset(struct skge_hw *hw) { u32 reg; - u16 ctst; + u16 ctst, pci_status; u8 t8, mac_cfg, pmd_type, phy_type; int i; @@ -3021,8 +3017,13 @@ static int skge_reset(struct skge_hw *hw) skge_write8(hw, B0_CTST, CS_RST_CLR); /* clear PCI errors, if any */ - skge_pci_clear(hw); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + skge_write8(hw, B2_TST_CTRL2, 0); + pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); + pci_write_config_word(hw->pdev, PCI_STATUS, + pci_status | PCI_STATUS_ERROR_BITS); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); skge_write8(hw, B0_CTST, CS_MRST_CLR); /* restore CLK_RUN bits (for Yukon-Lite) */ @@ -3081,7 +3082,10 @@ static int skge_reset(struct skge_hw *hw) else hw->ram_size = t8 * 4096; - hw->intr_mask = IS_HW_ERR | IS_EXT_REG; + hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; + if (hw->ports > 1) + hw->intr_mask |= IS_PORT_2; + if (hw->chip_id == CHIP_ID_GENESIS) genesis_init(hw); else { @@ -3251,13 +3255,15 @@ static int __devinit skge_probe(struct pci_dev *pdev, struct skge_hw *hw; int err, using_dac = 0; - if ((err = pci_enable_device(pdev))) { + err = pci_enable_device(pdev); + if (err) { printk(KERN_ERR PFX "%s cannot enable PCI device\n", pci_name(pdev)); goto err_out; } - if ((err = pci_request_regions(pdev, DRV_NAME))) { + err = pci_request_regions(pdev, DRV_NAME); + if (err) { printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", pci_name(pdev)); goto err_out_disable_pdev; @@ -3265,22 +3271,18 @@ static int __devinit skge_probe(struct pci_dev *pdev, pci_set_master(pdev); - if (sizeof(dma_addr_t) > sizeof(u32) && - !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); - if (err < 0) { - printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " - "for consistent allocations\n", pci_name(pdev)); - goto err_out_free_regions; - } - } else { - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); - if (err) { - printk(KERN_ERR PFX "%s no usable DMA configuration\n", - pci_name(pdev)); - goto err_out_free_regions; - } + } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { + using_dac = 0; + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + } + + if (err) { + printk(KERN_ERR PFX "%s no usable DMA configuration\n", + pci_name(pdev)); + goto err_out_free_regions; } #ifdef __BIG_ENDIAN @@ -3304,7 +3306,6 @@ static int __devinit skge_probe(struct pci_dev *pdev, hw->pdev = pdev; spin_lock_init(&hw->phy_lock); - spin_lock_init(&hw->hw_lock); tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); @@ -3314,7 +3315,8 @@ static int __devinit skge_probe(struct pci_dev *pdev, goto err_out_free_hw; } - if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) { + err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw); + if (err) { printk(KERN_ERR PFX "%s: cannot assign irq %d\n", pci_name(pdev), pdev->irq); goto err_out_iounmap; @@ -3332,7 +3334,8 @@ static int __devinit skge_probe(struct pci_dev *pdev, if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) goto err_out_led_off; - if ((err = register_netdev(dev))) { + err = register_netdev(dev); + if (err) { printk(KERN_ERR PFX "%s: cannot register net device\n", pci_name(pdev)); goto err_out_free_netdev; @@ -3387,7 +3390,6 @@ static void __devexit skge_remove(struct pci_dev *pdev) skge_write32(hw, B0_IMSK, 0); skge_write16(hw, B0_LED, LED_STAT_OFF); - skge_pci_clear(hw); skge_write8(hw, B0_CTST, CS_RST_SET); tasklet_kill(&hw->ext_tasklet); diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 941f12a333b6..1f1ce88c8186 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h @@ -2402,7 +2402,6 @@ struct skge_hw { struct tasklet_struct ext_tasklet; spinlock_t phy_lock; - spinlock_t hw_lock; }; enum { @@ -2419,7 +2418,6 @@ struct skge_port { int port; spinlock_t tx_lock; - u32 tx_avail; struct skge_ring tx_ring; struct skge_ring rx_ring; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 73260364cba3..68f9c206a620 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -51,7 +51,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "0.15" +#define DRV_VERSION "1.1" #define PFX DRV_NAME " " /* @@ -61,10 +61,6 @@ * a receive requires one (or two if using 64 bit dma). */ -#define is_ec_a1(hw) \ - unlikely((hw)->chip_id == CHIP_ID_YUKON_EC && \ - (hw)->chip_rev == CHIP_REV_YU_EC_A1) - #define RX_LE_SIZE 512 #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) @@ -96,6 +92,10 @@ static int copybreak __read_mostly = 256; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); +static int disable_msi = 0; +module_param(disable_msi, int, 0); +MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, @@ -504,9 +504,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) /* Force a renegotiation */ static void sky2_phy_reinit(struct sky2_port *sky2) { - down(&sky2->phy_sema); + spin_lock_bh(&sky2->phy_lock); sky2_phy_init(sky2->hw, sky2->port); - up(&sky2->phy_sema); + spin_unlock_bh(&sky2->phy_lock); } static void sky2_mac_init(struct sky2_hw *hw, unsigned port) @@ -571,9 +571,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); - down(&sky2->phy_sema); + spin_lock_bh(&sky2->phy_lock); sky2_phy_init(hw, port); - up(&sky2->phy_sema); + spin_unlock_bh(&sky2->phy_lock); /* MIB clear */ reg = gma_read16(hw, port, GM_PHY_ADDR); @@ -725,37 +725,11 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) return le; } -/* - * This is a workaround code taken from SysKonnect sk98lin driver - * to deal with chip bug on Yukon EC rev 0 in the wraparound case. - */ -static void sky2_put_idx(struct sky2_hw *hw, unsigned q, - u16 idx, u16 *last, u16 size) +/* Update chip's next pointer */ +static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) { wmb(); - if (is_ec_a1(hw) && idx < *last) { - u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); - - if (hwget == 0) { - /* Start prefetching again */ - sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0); - goto setnew; - } - - if (hwget == size - 1) { - /* set watermark to one list element */ - sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8); - - /* set put index to first list element */ - sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0); - } else /* have hardware go to end of list */ - sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), - size - 1); - } else { -setnew: - sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); - } - *last = idx; + sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); mmiowb(); } @@ -878,7 +852,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!netif_running(dev)) return -ENODEV; /* Phy still in reset */ - switch(cmd) { + switch (cmd) { case SIOCGMIIPHY: data->phy_id = PHY_ADDR_MARV; @@ -886,9 +860,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: { u16 val = 0; - down(&sky2->phy_sema); + spin_lock_bh(&sky2->phy_lock); err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); - up(&sky2->phy_sema); + spin_unlock_bh(&sky2->phy_lock); data->val_out = val; break; @@ -898,10 +872,10 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!capable(CAP_NET_ADMIN)) return -EPERM; - down(&sky2->phy_sema); + spin_lock_bh(&sky2->phy_lock); err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, data->val_in); - up(&sky2->phy_sema); + spin_unlock_bh(&sky2->phy_lock); break; } return err; @@ -1001,7 +975,6 @@ static int sky2_rx_start(struct sky2_port *sky2) /* Tell chip about available buffers */ sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); - sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX)); return 0; nomem: sky2_rx_clean(sky2); @@ -1014,7 +987,7 @@ static int sky2_up(struct net_device *dev) struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; - u32 ramsize, rxspace; + u32 ramsize, rxspace, imask; int err = -ENOMEM; if (netif_msg_ifup(sky2)) @@ -1079,10 +1052,10 @@ static int sky2_up(struct net_device *dev) goto err_out; /* Enable interrupts from phy/mac for port */ - spin_lock_irq(&hw->hw_lock); - hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; - sky2_write32(hw, B0_IMSK, hw->intr_mask); - spin_unlock_irq(&hw->hw_lock); + imask = sky2_read32(hw, B0_IMSK); + imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; + sky2_write32(hw, B0_IMSK, imask); + return 0; err_out: @@ -1202,7 +1175,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) /* just drop the packet if non-linear expansion fails */ if (skb_header_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { - dev_kfree_skb_any(skb); + dev_kfree_skb(skb); goto out_unlock; } @@ -1299,8 +1272,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); } - sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod, - &sky2->tx_last_put, TX_RING_SIZE); + sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); out_unlock: spin_unlock(&sky2->tx_lock); @@ -1332,7 +1304,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) struct tx_ring_info *re = sky2->tx_ring + put; struct sk_buff *skb = re->skb; - nxt = re->idx; + nxt = re->idx; BUG_ON(nxt >= TX_RING_SIZE); prefetch(sky2->tx_ring + nxt); @@ -1348,15 +1320,15 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) struct tx_ring_info *fre; fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), - skb_shinfo(skb)->frags[i].size, + skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE); } - dev_kfree_skb_any(skb); + dev_kfree_skb(skb); } sky2->tx_cons = put; - if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) + if (tx_avail(sky2) > MAX_SKB_TX_LE) netif_wake_queue(dev); } @@ -1375,6 +1347,7 @@ static int sky2_down(struct net_device *dev) struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u16 ctrl; + u32 imask; /* Never really got started! */ if (!sky2->tx_le) @@ -1386,14 +1359,6 @@ static int sky2_down(struct net_device *dev) /* Stop more packets from being queued */ netif_stop_queue(dev); - /* Disable port IRQ */ - spin_lock_irq(&hw->hw_lock); - hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); - sky2_write32(hw, B0_IMSK, hw->intr_mask); - spin_unlock_irq(&hw->hw_lock); - - flush_scheduled_work(); - sky2_phy_reset(hw, port); /* Stop transmitter */ @@ -1437,6 +1402,11 @@ static int sky2_down(struct net_device *dev) sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + /* Disable port IRQ */ + imask = sky2_read32(hw, B0_IMSK); + imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; + sky2_write32(hw, B0_IMSK, imask); + /* turn off LED's */ sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); @@ -1631,20 +1601,19 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) return 0; } -/* - * Interrupt from PHY are handled outside of interrupt context - * because accessing phy registers requires spin wait which might - * cause excess interrupt latency. - */ -static void sky2_phy_task(void *arg) +/* Interrupt from PHY */ +static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) { - struct sky2_port *sky2 = arg; - struct sky2_hw *hw = sky2->hw; + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); u16 istatus, phystat; - down(&sky2->phy_sema); - istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT); - phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT); + spin_lock(&sky2->phy_lock); + istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + + if (!netif_running(dev)) + goto out; if (netif_msg_intr(sky2)) printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n", @@ -1670,12 +1639,7 @@ static void sky2_phy_task(void *arg) sky2_link_down(sky2); } out: - up(&sky2->phy_sema); - - spin_lock_irq(&hw->hw_lock); - hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2; - sky2_write32(hw, B0_IMSK, hw->intr_mask); - spin_unlock_irq(&hw->hw_lock); + spin_unlock(&sky2->phy_lock); } @@ -1687,31 +1651,40 @@ static void sky2_tx_timeout(struct net_device *dev) struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned txq = txqaddr[sky2->port]; - u16 ridx; - - /* Maybe we just missed an status interrupt */ - spin_lock(&sky2->tx_lock); - ridx = sky2_read16(hw, - sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX); - sky2_tx_complete(sky2, ridx); - spin_unlock(&sky2->tx_lock); - - if (!netif_queue_stopped(dev)) { - if (net_ratelimit()) - pr_info(PFX "transmit interrupt missed? recovered\n"); - return; - } + u16 report, done; if (netif_msg_timer(sky2)) printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); - sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); - sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); + report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX); + done = sky2_read16(hw, Q_ADDR(txq, Q_DONE)); - sky2_tx_clean(sky2); + printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n", + dev->name, + sky2->tx_cons, sky2->tx_prod, report, done); + + if (report != done) { + printk(KERN_INFO PFX "status burst pending (irq moderation?)\n"); + + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); + } else if (report != sky2->tx_cons) { + printk(KERN_INFO PFX "status report lost?\n"); + + spin_lock_bh(&sky2->tx_lock); + sky2_tx_complete(sky2, report); + spin_unlock_bh(&sky2->tx_lock); + } else { + printk(KERN_INFO PFX "hardware hung? flushing\n"); - sky2_qset(hw, txq); - sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); + sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); + sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); + + sky2_tx_clean(sky2); + + sky2_qset(hw, txq); + sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); + } } @@ -1730,6 +1703,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) struct sky2_hw *hw = sky2->hw; int err; u16 ctl, mode; + u32 imask; if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) return -EINVAL; @@ -1742,12 +1716,15 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) return 0; } + imask = sky2_read32(hw, B0_IMSK); sky2_write32(hw, B0_IMSK, 0); dev->trans_start = jiffies; /* prevent tx timeout */ netif_stop_queue(dev); netif_poll_disable(hw->dev[0]); + synchronize_irq(hw->pdev->irq); + ctl = gma_read16(hw, sky2->port, GM_GP_CTRL); gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); sky2_rx_stop(sky2); @@ -1766,7 +1743,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD); err = sky2_rx_start(sky2); - sky2_write32(hw, B0_IMSK, hw->intr_mask); + sky2_write32(hw, B0_IMSK, imask); if (err) dev_close(dev); @@ -1843,8 +1820,7 @@ resubmit: sky2_rx_add(sky2, re->mapaddr); /* Tell receiver about new buffers. */ - sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put, - &sky2->rx_last_put, RX_LE_SIZE); + sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put); return skb; @@ -1871,76 +1847,51 @@ error: goto resubmit; } -/* - * Check for transmit complete - */ -#define TX_NO_STATUS 0xffff - -static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last) +/* Transmit complete */ +static inline void sky2_tx_done(struct net_device *dev, u16 last) { - if (last != TX_NO_STATUS) { - struct net_device *dev = hw->dev[port]; - if (dev && netif_running(dev)) { - struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_port *sky2 = netdev_priv(dev); - spin_lock(&sky2->tx_lock); - sky2_tx_complete(sky2, last); - spin_unlock(&sky2->tx_lock); - } + if (netif_running(dev)) { + spin_lock(&sky2->tx_lock); + sky2_tx_complete(sky2, last); + spin_unlock(&sky2->tx_lock); } } -/* - * Both ports share the same status interrupt, therefore there is only - * one poll routine. - */ -static int sky2_poll(struct net_device *dev0, int *budget) +/* Process status response ring */ +static int sky2_status_intr(struct sky2_hw *hw, int to_do) { - struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; - unsigned int to_do = min(dev0->quota, *budget); - unsigned int work_done = 0; - u16 hwidx; - u16 tx_done[2] = { TX_NO_STATUS, TX_NO_STATUS }; - - sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); - - /* - * Kick the STAT_LEV_TIMER_CTRL timer. - * This fixes my hangs on Yukon-EC (0xb6) rev 1. - * The if clause is there to start the timer only if it has been - * configured correctly and not been disabled via ethtool. - */ - if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) { - sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); - sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); - } + int work_done = 0; - hwidx = sky2_read16(hw, STAT_PUT_IDX); - BUG_ON(hwidx >= STATUS_RING_SIZE); rmb(); - while (hwidx != hw->st_idx) { + for(;;) { struct sky2_status_le *le = hw->st_le + hw->st_idx; struct net_device *dev; struct sky2_port *sky2; struct sk_buff *skb; u32 status; u16 length; + u8 link, opcode; + + opcode = le->opcode; + if (!opcode) + break; + opcode &= ~HW_OWNER; - le = hw->st_le + hw->st_idx; hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; - prefetch(hw->st_le + hw->st_idx); + le->opcode = 0; - BUG_ON(le->link >= 2); - dev = hw->dev[le->link]; - if (dev == NULL || !netif_running(dev)) - continue; + link = le->link; + BUG_ON(link >= 2); + dev = hw->dev[link]; sky2 = netdev_priv(dev); - status = le32_to_cpu(le->status); - length = le16_to_cpu(le->length); + length = le->length; + status = le->status; - switch (le->opcode & ~HW_OWNER) { + switch (opcode) { case OP_RXSTAT: skb = sky2_receive(sky2, length, status); if (!skb) @@ -1980,42 +1931,23 @@ static int sky2_poll(struct net_device *dev0, int *budget) case OP_TXINDEXLE: /* TX index reports status for both ports */ - tx_done[0] = status & 0xffff; - tx_done[1] = ((status >> 24) & 0xff) - | (u16)(length & 0xf) << 8; + sky2_tx_done(hw->dev[0], status & 0xffff); + if (hw->dev[1]) + sky2_tx_done(hw->dev[1], + ((status >> 24) & 0xff) + | (u16)(length & 0xf) << 8); break; default: if (net_ratelimit()) printk(KERN_WARNING PFX - "unknown status opcode 0x%x\n", le->opcode); + "unknown status opcode 0x%x\n", opcode); break; } } exit_loop: - sky2_tx_check(hw, 0, tx_done[0]); - sky2_tx_check(hw, 1, tx_done[1]); - - if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) { - sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); - sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); - } - - if (likely(work_done < to_do)) { - spin_lock_irq(&hw->hw_lock); - __netif_rx_complete(dev0); - - hw->intr_mask |= Y2_IS_STAT_BMU; - sky2_write32(hw, B0_IMSK, hw->intr_mask); - spin_unlock_irq(&hw->hw_lock); - - return 0; - } else { - *budget -= work_done; - dev0->quota -= work_done; - return 1; - } + return work_done; } static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) @@ -2134,57 +2066,97 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) } } -static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) +/* This should never happen it is a fatal situation */ +static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port, + const char *rxtx, u32 mask) { struct net_device *dev = hw->dev[port]; struct sky2_port *sky2 = netdev_priv(dev); + u32 imask; - hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); - sky2_write32(hw, B0_IMSK, hw->intr_mask); + printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n", + dev ? dev->name : "<not registered>", rxtx); - schedule_work(&sky2->phy_task); + imask = sky2_read32(hw, B0_IMSK); + imask &= ~mask; + sky2_write32(hw, B0_IMSK, imask); + + if (dev) { + spin_lock(&sky2->phy_lock); + sky2_link_down(sky2); + spin_unlock(&sky2->phy_lock); + } } -static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) +static int sky2_poll(struct net_device *dev0, int *budget) { - struct sky2_hw *hw = dev_id; - struct net_device *dev0 = hw->dev[0]; - u32 status; + struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; + int work_limit = min(dev0->quota, *budget); + int work_done = 0; + u32 status = sky2_read32(hw, B0_Y2_SP_EISR); - status = sky2_read32(hw, B0_Y2_SP_ISRC2); - if (status == 0 || status == ~0) - return IRQ_NONE; + if (unlikely(status & ~Y2_IS_STAT_BMU)) { + if (status & Y2_IS_HW_ERR) + sky2_hw_intr(hw); - spin_lock(&hw->hw_lock); - if (status & Y2_IS_HW_ERR) - sky2_hw_intr(hw); + if (status & Y2_IS_IRQ_PHY1) + sky2_phy_intr(hw, 0); - /* Do NAPI for Rx and Tx status */ - if (status & Y2_IS_STAT_BMU) { - hw->intr_mask &= ~Y2_IS_STAT_BMU; - sky2_write32(hw, B0_IMSK, hw->intr_mask); + if (status & Y2_IS_IRQ_PHY2) + sky2_phy_intr(hw, 1); - if (likely(__netif_rx_schedule_prep(dev0))) { - prefetch(&hw->st_le[hw->st_idx]); - __netif_rx_schedule(dev0); - } + if (status & Y2_IS_IRQ_MAC1) + sky2_mac_intr(hw, 0); + + if (status & Y2_IS_IRQ_MAC2) + sky2_mac_intr(hw, 1); + + if (status & Y2_IS_CHK_RX1) + sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); + + if (status & Y2_IS_CHK_RX2) + sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); + + if (status & Y2_IS_CHK_TXA1) + sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); + + if (status & Y2_IS_CHK_TXA2) + sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); } - if (status & Y2_IS_IRQ_PHY1) - sky2_phy_intr(hw, 0); + if (status & Y2_IS_STAT_BMU) { + work_done = sky2_status_intr(hw, work_limit); + *budget -= work_done; + dev0->quota -= work_done; + + if (work_done >= work_limit) + return 1; - if (status & Y2_IS_IRQ_PHY2) - sky2_phy_intr(hw, 1); + sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); + } - if (status & Y2_IS_IRQ_MAC1) - sky2_mac_intr(hw, 0); + netif_rx_complete(dev0); - if (status & Y2_IS_IRQ_MAC2) - sky2_mac_intr(hw, 1); + status = sky2_read32(hw, B0_Y2_SP_LISR); + return 0; +} - sky2_write32(hw, B0_Y2_SP_ICR, 2); +static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) +{ + struct sky2_hw *hw = dev_id; + struct net_device *dev0 = hw->dev[0]; + u32 status; + + /* Reading this mask interrupts as side effect */ + status = sky2_read32(hw, B0_Y2_SP_ISRC2); + if (status == 0 || status == ~0) + return IRQ_NONE; - spin_unlock(&hw->hw_lock); + prefetch(&hw->st_le[hw->st_idx]); + if (likely(__netif_rx_schedule_prep(dev0))) + __netif_rx_schedule(dev0); + else + printk(KERN_DEBUG PFX "irq race detected\n"); return IRQ_HANDLED; } @@ -2238,6 +2210,23 @@ static int sky2_reset(struct sky2_hw *hw) return -EOPNOTSUPP; } + hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; + + /* This rev is really old, and requires untested workarounds */ + if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) { + printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n", + pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], + hw->chip_id, hw->chip_rev); + return -EOPNOTSUPP; + } + + /* This chip is new and not tested yet */ + if (hw->chip_id == CHIP_ID_YUKON_EC_U) { + pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n", + pci_name(hw->pdev)); + pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n"); + } + /* disable ASF */ if (hw->chip_id <= CHIP_ID_YUKON_EC) { sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); @@ -2258,7 +2247,7 @@ static int sky2_reset(struct sky2_hw *hw) sky2_write8(hw, B0_CTST, CS_MRST_CLR); /* clear any PEX errors */ - if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) + if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); @@ -2271,7 +2260,6 @@ static int sky2_reset(struct sky2_hw *hw) if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) ++hw->ports; } - hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; sky2_set_power_state(hw, PCI_D0); @@ -2337,30 +2325,18 @@ static int sky2_reset(struct sky2_hw *hw) /* Set the list last index */ sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1); - /* These status setup values are copied from SysKonnect's driver */ - if (is_ec_a1(hw)) { - /* WA for dev. #4.3 */ - sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */ + sky2_write16(hw, STAT_TX_IDX_TH, 10); + sky2_write8(hw, STAT_FIFO_WM, 16); - /* set Status-FIFO watermark */ - sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */ - - /* set Status-FIFO ISR watermark */ - sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */ - sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 10000)); - } else { - sky2_write16(hw, STAT_TX_IDX_TH, 10); - sky2_write8(hw, STAT_FIFO_WM, 16); - - /* set Status-FIFO ISR watermark */ - if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) - sky2_write8(hw, STAT_FIFO_ISR_WM, 4); - else - sky2_write8(hw, STAT_FIFO_ISR_WM, 16); + /* set Status-FIFO ISR watermark */ + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) + sky2_write8(hw, STAT_FIFO_ISR_WM, 4); + else + sky2_write8(hw, STAT_FIFO_ISR_WM, 16); - sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); - sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7)); - } + sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); + sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); + sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); /* enable status unit */ sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); @@ -2502,17 +2478,34 @@ static const struct sky2_stat { { "rx_unicast", GM_RXF_UC_OK }, { "tx_mac_pause", GM_TXF_MPAUSE }, { "rx_mac_pause", GM_RXF_MPAUSE }, - { "collisions", GM_TXF_SNG_COL }, + { "collisions", GM_TXF_COL }, { "late_collision",GM_TXF_LAT_COL }, { "aborted", GM_TXF_ABO_COL }, + { "single_collisions", GM_TXF_SNG_COL }, { "multi_collisions", GM_TXF_MUL_COL }, - { "fifo_underrun", GM_TXE_FIFO_UR }, - { "fifo_overflow", GM_RXE_FIFO_OV }, - { "rx_toolong", GM_RXF_LNG_ERR }, - { "rx_jabber", GM_RXF_JAB_PKT }, + + { "rx_short", GM_RXF_SHT }, { "rx_runt", GM_RXE_FRAG }, + { "rx_64_byte_packets", GM_RXF_64B }, + { "rx_65_to_127_byte_packets", GM_RXF_127B }, + { "rx_128_to_255_byte_packets", GM_RXF_255B }, + { "rx_256_to_511_byte_packets", GM_RXF_511B }, + { "rx_512_to_1023_byte_packets", GM_RXF_1023B }, + { "rx_1024_to_1518_byte_packets", GM_RXF_1518B }, + { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ }, { "rx_too_long", GM_RXF_LNG_ERR }, + { "rx_fifo_overflow", GM_RXE_FIFO_OV }, + { "rx_jabber", GM_RXF_JAB_PKT }, { "rx_fcs_error", GM_RXF_FCS_ERR }, + + { "tx_64_byte_packets", GM_TXF_64B }, + { "tx_65_to_127_byte_packets", GM_TXF_127B }, + { "tx_128_to_255_byte_packets", GM_TXF_255B }, + { "tx_256_to_511_byte_packets", GM_TXF_511B }, + { "tx_512_to_1023_byte_packets", GM_TXF_1023B }, + { "tx_1024_to_1518_byte_packets", GM_TXF_1518B }, + { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ }, + { "tx_fifo_underrun", GM_TXE_FIFO_UR }, }; static u32 sky2_get_rx_csum(struct net_device *dev) @@ -2614,7 +2607,7 @@ static struct net_device_stats *sky2_get_stats(struct net_device *dev) sky2->net_stats.rx_bytes = data[1]; sky2->net_stats.tx_packets = data[2] + data[4] + data[6]; sky2->net_stats.rx_packets = data[3] + data[5] + data[7]; - sky2->net_stats.multicast = data[5] + data[7]; + sky2->net_stats.multicast = data[3] + data[5]; sky2->net_stats.collisions = data[10]; sky2->net_stats.tx_aborted_errors = data[12]; @@ -2743,7 +2736,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data) ms = data * 1000; /* save initial values */ - down(&sky2->phy_sema); + spin_lock_bh(&sky2->phy_lock); if (hw->chip_id == CHIP_ID_YUKON_XL) { u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); @@ -2759,9 +2752,9 @@ static int sky2_phys_id(struct net_device *dev, u32 data) sky2_led(hw, port, onoff); onoff = !onoff; - up(&sky2->phy_sema); + spin_unlock_bh(&sky2->phy_lock); interrupted = msleep_interruptible(250); - down(&sky2->phy_sema); + spin_lock_bh(&sky2->phy_lock); ms -= 250; } @@ -2776,7 +2769,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data) gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); } - up(&sky2->phy_sema); + spin_unlock_bh(&sky2->phy_lock); return 0; } @@ -2806,38 +2799,6 @@ static int sky2_set_pauseparam(struct net_device *dev, return err; } -#ifdef CONFIG_PM -static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct sky2_port *sky2 = netdev_priv(dev); - - wol->supported = WAKE_MAGIC; - wol->wolopts = sky2->wol ? WAKE_MAGIC : 0; -} - -static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - struct sky2_port *sky2 = netdev_priv(dev); - struct sky2_hw *hw = sky2->hw; - - if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) - return -EOPNOTSUPP; - - sky2->wol = wol->wolopts == WAKE_MAGIC; - - if (sky2->wol) { - memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN); - - sky2_write16(hw, WOL_CTRL_STAT, - WOL_CTL_ENA_PME_ON_MAGIC_PKT | - WOL_CTL_ENA_MAGIC_PKT_UNIT); - } else - sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT); - - return 0; -} -#endif - static int sky2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ecmd) { @@ -2878,19 +2839,11 @@ static int sky2_set_coalesce(struct net_device *dev, { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; - const u32 tmin = sky2_clk2us(hw, 1); - const u32 tmax = 5000; + const u32 tmax = sky2_clk2us(hw, 0x0ffffff); - if (ecmd->tx_coalesce_usecs != 0 && - (ecmd->tx_coalesce_usecs < tmin || ecmd->tx_coalesce_usecs > tmax)) - return -EINVAL; - - if (ecmd->rx_coalesce_usecs != 0 && - (ecmd->rx_coalesce_usecs < tmin || ecmd->rx_coalesce_usecs > tmax)) - return -EINVAL; - - if (ecmd->rx_coalesce_usecs_irq != 0 && - (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax)) + if (ecmd->tx_coalesce_usecs > tmax || + ecmd->rx_coalesce_usecs > tmax || + ecmd->rx_coalesce_usecs_irq > tmax) return -EINVAL; if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1) @@ -3025,10 +2978,6 @@ static struct ethtool_ops sky2_ethtool_ops = { .set_ringparam = sky2_set_ringparam, .get_pauseparam = sky2_get_pauseparam, .set_pauseparam = sky2_set_pauseparam, -#ifdef CONFIG_PM - .get_wol = sky2_get_wol, - .set_wol = sky2_set_wol, -#endif .phys_id = sky2_phys_id, .get_stats_count = sky2_get_stats_count, .get_ethtool_stats = sky2_get_ethtool_stats, @@ -3082,16 +3031,15 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, sky2->speed = -1; sky2->advertising = sky2_supported_modes(hw); - /* Receive checksum disabled for Yukon XL + /* Receive checksum disabled for Yukon XL * because of observed problems with incorrect * values when multiple packets are received in one interrupt */ sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); - INIT_WORK(&sky2->phy_task, sky2_phy_task, sky2); - init_MUTEX(&sky2->phy_sema); + spin_lock_init(&sky2->phy_lock); sky2->tx_pending = TX_DEF_PENDING; - sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING; + sky2->rx_pending = RX_DEF_PENDING; sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN); hw->dev[port] = dev; @@ -3133,6 +3081,66 @@ static void __devinit sky2_show_addr(struct net_device *dev) dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); } +/* Handle software interrupt used during MSI test */ +static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id, + struct pt_regs *regs) +{ + struct sky2_hw *hw = dev_id; + u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); + + if (status == 0) + return IRQ_NONE; + + if (status & Y2_IS_IRQ_SW) { + hw->msi_detected = 1; + wake_up(&hw->msi_wait); + sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); + } + sky2_write32(hw, B0_Y2_SP_ICR, 2); + + return IRQ_HANDLED; +} + +/* Test interrupt path by forcing a a software IRQ */ +static int __devinit sky2_test_msi(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + int err; + + sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); + + err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw); + if (err) { + printk(KERN_ERR PFX "%s: cannot assign irq %d\n", + pci_name(pdev), pdev->irq); + return err; + } + + init_waitqueue_head (&hw->msi_wait); + + sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); + wmb(); + + wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10); + + if (!hw->msi_detected) { + /* MSI test failed, go back to INTx mode */ + printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " + "switching to INTx mode. Please report this failure to " + "the PCI maintainer and include system chipset information.\n", + pci_name(pdev)); + + err = -EOPNOTSUPP; + sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); + } + + sky2_write32(hw, B0_IMSK, 0); + + free_irq(pdev->irq, hw); + + return err; +} + static int __devinit sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -3201,7 +3209,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev, goto err_out_free_hw; } hw->pm_cap = pm_cap; - spin_lock_init(&hw->hw_lock); #ifdef __BIG_ENDIAN /* byte swap descriptors in hardware */ @@ -3254,21 +3261,29 @@ static int __devinit sky2_probe(struct pci_dev *pdev, } } - err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw); + if (!disable_msi && pci_enable_msi(pdev) == 0) { + err = sky2_test_msi(hw); + if (err == -EOPNOTSUPP) + pci_disable_msi(pdev); + else if (err) + goto err_out_unregister; + } + + err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw); if (err) { printk(KERN_ERR PFX "%s: cannot assign irq %d\n", pci_name(pdev), pdev->irq); goto err_out_unregister; } - hw->intr_mask = Y2_IS_BASE; - sky2_write32(hw, B0_IMSK, hw->intr_mask); + sky2_write32(hw, B0_IMSK, Y2_IS_BASE); pci_set_drvdata(pdev, hw); return 0; err_out_unregister: + pci_disable_msi(pdev); if (dev1) { unregister_netdev(dev1); free_netdev(dev1); @@ -3311,6 +3326,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev) sky2_read8(hw, B0_CTST); free_irq(pdev->irq, hw); + pci_disable_msi(pdev); pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index dce955c76f3c..2838f661b393 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -278,13 +278,11 @@ enum { Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */ Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */ - Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU | - Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY | - Y2_IS_IRQ_SW | Y2_IS_TIMINT, - Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 | - Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1, - Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 | - Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2, + Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU, + Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 + | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, + Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 + | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, }; /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ @@ -1375,23 +1373,23 @@ enum { GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ +/* MIB Counters */ + GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */ + GM_MIB_CNT_SIZE = 256, }; -/* MIB Counters */ -#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ -#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ /* * MIB Counters base address definitions (low word) - * use offset 4 for access to high word (32 bit r/o) */ enum { - GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ + GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ - /* GM_MIB_CNT_BASE + 40: reserved */ + GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ @@ -1399,37 +1397,36 @@ enum { GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ - GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */ - GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */ - GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */ - GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */ - GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */ - GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */ - GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */ - GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */ - /* GM_MIB_CNT_BASE + 168: reserved */ - GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */ - /* GM_MIB_CNT_BASE + 184: reserved */ - GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */ - GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */ - GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */ - GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */ - GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */ - GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */ - GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */ - GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */ - GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */ - GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */ - GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */ - GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */ - GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */ - - GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */ - GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */ - GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */ - GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */ - GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */ - GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */ + GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */ + GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */ + GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */ + GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */ + GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */ + GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */ + GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */ + GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */ + + GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */ + GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */ + GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */ + GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */ + GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */ + GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */ + GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */ + GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */ + GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */ + GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */ + GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */ + GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */ + GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */ + GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */ + + GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */ + GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */ + GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */ + GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */ + GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */ + GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */ }; /* GMAC Bit Definitions */ @@ -1832,6 +1829,7 @@ struct sky2_port { struct net_device *netdev; unsigned port; u32 msg_enable; + spinlock_t phy_lock; spinlock_t tx_lock ____cacheline_aligned_in_smp; struct tx_ring_info *tx_ring; @@ -1840,7 +1838,6 @@ struct sky2_port { u16 tx_prod; /* next le to use */ u32 tx_addr64; u16 tx_pending; - u16 tx_last_put; u16 tx_last_mss; struct ring_info *rx_ring ____cacheline_aligned_in_smp; @@ -1849,7 +1846,6 @@ struct sky2_port { u16 rx_next; /* next re to check */ u16 rx_put; /* next le index to use */ u16 rx_pending; - u16 rx_last_put; u16 rx_bufsize; #ifdef SKY2_VLAN_TAG_USED u16 rx_tag; @@ -1865,20 +1861,15 @@ struct sky2_port { u8 rx_pause; u8 tx_pause; u8 rx_csum; - u8 wol; struct net_device_stats net_stats; - struct work_struct phy_task; - struct semaphore phy_sema; }; struct sky2_hw { void __iomem *regs; struct pci_dev *pdev; struct net_device *dev[2]; - spinlock_t hw_lock; - u32 intr_mask; int pm_cap; u8 chip_id; @@ -1889,6 +1880,8 @@ struct sky2_hw { struct sky2_status_le *st_le; u32 st_idx; dma_addr_t st_dma; + int msi_detected; + wait_queue_head_t msi_wait; }; /* Register accessor for memory mapped device */ diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 7ec08127c9d6..0e9833adf9fe 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -215,15 +215,12 @@ struct smc_local { spinlock_t lock; -#ifdef SMC_CAN_USE_DATACS - u32 __iomem *datacs; -#endif - #ifdef SMC_USE_PXA_DMA /* DMA needs the physical address of the chip */ u_long physaddr; #endif void __iomem *base; + void __iomem *datacs; }; #if SMC_DEBUG > 0 @@ -2104,9 +2101,8 @@ static int smc_enable_device(struct platform_device *pdev) * Set the appropriate byte/word mode. */ ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; -#ifndef SMC_CAN_USE_16BIT - ecsr |= ECSR_IOIS8; -#endif + if (!SMC_CAN_USE_16BIT) + ecsr |= ECSR_IOIS8; writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); local_irq_restore(flags); @@ -2143,40 +2139,39 @@ static void smc_release_attrib(struct platform_device *pdev) release_mem_region(res->start, ATTRIB_SIZE); } -#ifdef SMC_CAN_USE_DATACS -static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) +static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) { - struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); - struct smc_local *lp = netdev_priv(ndev); + if (SMC_CAN_USE_DATACS) { + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); + struct smc_local *lp = netdev_priv(ndev); - if (!res) - return; + if (!res) + return; - if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { - printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); - return; - } + if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { + printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); + return; + } - lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); + lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); + } } static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) { - struct smc_local *lp = netdev_priv(ndev); - struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); + if (SMC_CAN_USE_DATACS) { + struct smc_local *lp = netdev_priv(ndev); + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); - if (lp->datacs) - iounmap(lp->datacs); + if (lp->datacs) + iounmap(lp->datacs); - lp->datacs = NULL; + lp->datacs = NULL; - if (res) - release_mem_region(res->start, SMC_DATA_EXTENT); + if (res) + release_mem_region(res->start, SMC_DATA_EXTENT); + } } -#else -static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) {} -static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) {} -#endif /* * smc_init(void) @@ -2221,6 +2216,10 @@ static int smc_drv_probe(struct platform_device *pdev) ndev->dma = (unsigned char)-1; ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) { + ret = -ENODEV; + goto out_free_netdev; + } ret = smc_request_attrib(pdev); if (ret) diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index e0efd1964e72..e1be1af51201 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -275,7 +275,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l) #define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; }) -static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l) +#define SMC_outsw LPD7A40X_SMC_outsw + +static inline void LPD7A40X_SMC_outsw(unsigned long a, int r, + unsigned char* p, int l) { unsigned short* ps = (unsigned short*) p; while (l-- > 0) { @@ -342,10 +345,6 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l) #endif -#ifndef SMC_IRQ_FLAGS -#define SMC_IRQ_FLAGS SA_TRIGGER_RISING -#endif - #ifdef SMC_USE_PXA_DMA /* * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is @@ -441,10 +440,85 @@ smc_pxa_dma_irq(int dma, void *dummy, struct pt_regs *regs) #endif /* SMC_USE_PXA_DMA */ -/* Because of bank switching, the LAN91x uses only 16 I/O ports */ +/* + * Everything a particular hardware setup needs should have been defined + * at this point. Add stubs for the undefined cases, mainly to avoid + * compilation warnings since they'll be optimized away, or to prevent buggy + * use of them. + */ + +#if ! SMC_CAN_USE_32BIT +#define SMC_inl(ioaddr, reg) ({ BUG(); 0; }) +#define SMC_outl(x, ioaddr, reg) BUG() +#define SMC_insl(a, r, p, l) BUG() +#define SMC_outsl(a, r, p, l) BUG() +#endif + +#if !defined(SMC_insl) || !defined(SMC_outsl) +#define SMC_insl(a, r, p, l) BUG() +#define SMC_outsl(a, r, p, l) BUG() +#endif + +#if ! SMC_CAN_USE_16BIT + +/* + * Any 16-bit access is performed with two 8-bit accesses if the hardware + * can't do it directly. Most registers are 16-bit so those are mandatory. + */ +#define SMC_outw(x, ioaddr, reg) \ + do { \ + unsigned int __val16 = (x); \ + SMC_outb( __val16, ioaddr, reg ); \ + SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ + } while (0) +#define SMC_inw(ioaddr, reg) \ + ({ \ + unsigned int __val16; \ + __val16 = SMC_inb( ioaddr, reg ); \ + __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ + __val16; \ + }) + +#define SMC_insw(a, r, p, l) BUG() +#define SMC_outsw(a, r, p, l) BUG() + +#endif + +#if !defined(SMC_insw) || !defined(SMC_outsw) +#define SMC_insw(a, r, p, l) BUG() +#define SMC_outsw(a, r, p, l) BUG() +#endif + +#if ! SMC_CAN_USE_8BIT +#define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) +#define SMC_outb(x, ioaddr, reg) BUG() +#define SMC_insb(a, r, p, l) BUG() +#define SMC_outsb(a, r, p, l) BUG() +#endif + +#if !defined(SMC_insb) || !defined(SMC_outsb) +#define SMC_insb(a, r, p, l) BUG() +#define SMC_outsb(a, r, p, l) BUG() +#endif + +#ifndef SMC_CAN_USE_DATACS +#define SMC_CAN_USE_DATACS 0 +#endif + #ifndef SMC_IO_SHIFT #define SMC_IO_SHIFT 0 #endif + +#ifndef SMC_IRQ_FLAGS +#define SMC_IRQ_FLAGS SA_TRIGGER_RISING +#endif + +#ifndef SMC_INTERRUPT_PREAMBLE +#define SMC_INTERRUPT_PREAMBLE +#endif + + +/* Because of bank switching, the LAN91x uses only 16 I/O ports */ #define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) #define SMC_DATA_EXTENT (4) @@ -817,6 +891,11 @@ static const char * chip_ids[ 16 ] = { * Note: the following macros do *not* select the bank -- this must * be done separately as needed in the main code. The SMC_REG() macro * only uses the bank argument for debugging purposes (when enabled). + * + * Note: despite inline functions being safer, everything leading to this + * should preferably be macros to let BUG() display the line number in + * the core source code since we're interested in the top call site + * not in any inline function location. */ #if SMC_DEBUG > 0 @@ -834,62 +913,142 @@ static const char * chip_ids[ 16 ] = { #define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT) #endif -#if SMC_CAN_USE_8BIT -#define SMC_GET_PN() SMC_inb( ioaddr, PN_REG ) -#define SMC_SET_PN(x) SMC_outb( x, ioaddr, PN_REG ) -#define SMC_GET_AR() SMC_inb( ioaddr, AR_REG ) -#define SMC_GET_TXFIFO() SMC_inb( ioaddr, TXFIFO_REG ) -#define SMC_GET_RXFIFO() SMC_inb( ioaddr, RXFIFO_REG ) -#define SMC_GET_INT() SMC_inb( ioaddr, INT_REG ) -#define SMC_ACK_INT(x) SMC_outb( x, ioaddr, INT_REG ) -#define SMC_GET_INT_MASK() SMC_inb( ioaddr, IM_REG ) -#define SMC_SET_INT_MASK(x) SMC_outb( x, ioaddr, IM_REG ) -#else -#define SMC_GET_PN() (SMC_inw( ioaddr, PN_REG ) & 0xFF) -#define SMC_SET_PN(x) SMC_outw( x, ioaddr, PN_REG ) -#define SMC_GET_AR() (SMC_inw( ioaddr, PN_REG ) >> 8) -#define SMC_GET_TXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) & 0xFF) -#define SMC_GET_RXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) >> 8) -#define SMC_GET_INT() (SMC_inw( ioaddr, INT_REG ) & 0xFF) +/* + * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not + * aligned to a 32 bit boundary. I tell you that does exist! + * Fortunately the affected register accesses can be easily worked around + * since we can write zeroes to the preceeding 16 bits without adverse + * effects and use a 32-bit access. + * + * Enforce it on any 32-bit capable setup for now. + */ +#define SMC_MUST_ALIGN_WRITE SMC_CAN_USE_32BIT + +#define SMC_GET_PN() \ + ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, PN_REG)) \ + : (SMC_inw(ioaddr, PN_REG) & 0xFF) ) + +#define SMC_SET_PN(x) \ + do { \ + if (SMC_MUST_ALIGN_WRITE) \ + SMC_outl((x)<<16, ioaddr, SMC_REG(0, 2)); \ + else if (SMC_CAN_USE_8BIT) \ + SMC_outb(x, ioaddr, PN_REG); \ + else \ + SMC_outw(x, ioaddr, PN_REG); \ + } while (0) + +#define SMC_GET_AR() \ + ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, AR_REG)) \ + : (SMC_inw(ioaddr, PN_REG) >> 8) ) + +#define SMC_GET_TXFIFO() \ + ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, TXFIFO_REG)) \ + : (SMC_inw(ioaddr, TXFIFO_REG) & 0xFF) ) + +#define SMC_GET_RXFIFO() \ + ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, RXFIFO_REG)) \ + : (SMC_inw(ioaddr, TXFIFO_REG) >> 8) ) + +#define SMC_GET_INT() \ + ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, INT_REG)) \ + : (SMC_inw(ioaddr, INT_REG) & 0xFF) ) + #define SMC_ACK_INT(x) \ do { \ - unsigned long __flags; \ - int __mask; \ - local_irq_save(__flags); \ - __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \ - SMC_outw( __mask | (x), ioaddr, INT_REG ); \ - local_irq_restore(__flags); \ + if (SMC_CAN_USE_8BIT) \ + SMC_outb(x, ioaddr, INT_REG); \ + else { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \ + SMC_outw( __mask | (x), ioaddr, INT_REG ); \ + local_irq_restore(__flags); \ + } \ + } while (0) + +#define SMC_GET_INT_MASK() \ + ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, IM_REG)) \ + : (SMC_inw( ioaddr, INT_REG ) >> 8) ) + +#define SMC_SET_INT_MASK(x) \ + do { \ + if (SMC_CAN_USE_8BIT) \ + SMC_outb(x, ioaddr, IM_REG); \ + else \ + SMC_outw((x) << 8, ioaddr, INT_REG); \ + } while (0) + +#define SMC_CURRENT_BANK() SMC_inw(ioaddr, BANK_SELECT) + +#define SMC_SELECT_BANK(x) \ + do { \ + if (SMC_MUST_ALIGN_WRITE) \ + SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \ + else \ + SMC_outw(x, ioaddr, BANK_SELECT); \ + } while (0) + +#define SMC_GET_BASE() SMC_inw(ioaddr, BASE_REG) + +#define SMC_SET_BASE(x) SMC_outw(x, ioaddr, BASE_REG) + +#define SMC_GET_CONFIG() SMC_inw(ioaddr, CONFIG_REG) + +#define SMC_SET_CONFIG(x) SMC_outw(x, ioaddr, CONFIG_REG) + +#define SMC_GET_COUNTER() SMC_inw(ioaddr, COUNTER_REG) + +#define SMC_GET_CTL() SMC_inw(ioaddr, CTL_REG) + +#define SMC_SET_CTL(x) SMC_outw(x, ioaddr, CTL_REG) + +#define SMC_GET_MII() SMC_inw(ioaddr, MII_REG) + +#define SMC_SET_MII(x) SMC_outw(x, ioaddr, MII_REG) + +#define SMC_GET_MIR() SMC_inw(ioaddr, MIR_REG) + +#define SMC_SET_MIR(x) SMC_outw(x, ioaddr, MIR_REG) + +#define SMC_GET_MMU_CMD() SMC_inw(ioaddr, MMU_CMD_REG) + +#define SMC_SET_MMU_CMD(x) SMC_outw(x, ioaddr, MMU_CMD_REG) + +#define SMC_GET_FIFO() SMC_inw(ioaddr, FIFO_REG) + +#define SMC_GET_PTR() SMC_inw(ioaddr, PTR_REG) + +#define SMC_SET_PTR(x) \ + do { \ + if (SMC_MUST_ALIGN_WRITE) \ + SMC_outl((x)<<16, ioaddr, SMC_REG(4, 2)); \ + else \ + SMC_outw(x, ioaddr, PTR_REG); \ } while (0) -#define SMC_GET_INT_MASK() (SMC_inw( ioaddr, INT_REG ) >> 8) -#define SMC_SET_INT_MASK(x) SMC_outw( (x) << 8, ioaddr, INT_REG ) -#endif -#define SMC_CURRENT_BANK() SMC_inw( ioaddr, BANK_SELECT ) -#define SMC_SELECT_BANK(x) SMC_outw( x, ioaddr, BANK_SELECT ) -#define SMC_GET_BASE() SMC_inw( ioaddr, BASE_REG ) -#define SMC_SET_BASE(x) SMC_outw( x, ioaddr, BASE_REG ) -#define SMC_GET_CONFIG() SMC_inw( ioaddr, CONFIG_REG ) -#define SMC_SET_CONFIG(x) SMC_outw( x, ioaddr, CONFIG_REG ) -#define SMC_GET_COUNTER() SMC_inw( ioaddr, COUNTER_REG ) -#define SMC_GET_CTL() SMC_inw( ioaddr, CTL_REG ) -#define SMC_SET_CTL(x) SMC_outw( x, ioaddr, CTL_REG ) -#define SMC_GET_MII() SMC_inw( ioaddr, MII_REG ) -#define SMC_SET_MII(x) SMC_outw( x, ioaddr, MII_REG ) -#define SMC_GET_MIR() SMC_inw( ioaddr, MIR_REG ) -#define SMC_SET_MIR(x) SMC_outw( x, ioaddr, MIR_REG ) -#define SMC_GET_MMU_CMD() SMC_inw( ioaddr, MMU_CMD_REG ) -#define SMC_SET_MMU_CMD(x) SMC_outw( x, ioaddr, MMU_CMD_REG ) -#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG ) -#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG ) -#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG ) -#define SMC_GET_EPH_STATUS() SMC_inw( ioaddr, EPH_STATUS_REG ) -#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG ) -#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG ) -#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG ) -#define SMC_GET_RPC() SMC_inw( ioaddr, RPC_REG ) -#define SMC_SET_RPC(x) SMC_outw( x, ioaddr, RPC_REG ) -#define SMC_GET_TCR() SMC_inw( ioaddr, TCR_REG ) -#define SMC_SET_TCR(x) SMC_outw( x, ioaddr, TCR_REG ) +#define SMC_GET_EPH_STATUS() SMC_inw(ioaddr, EPH_STATUS_REG) + +#define SMC_GET_RCR() SMC_inw(ioaddr, RCR_REG) + +#define SMC_SET_RCR(x) SMC_outw(x, ioaddr, RCR_REG) + +#define SMC_GET_REV() SMC_inw(ioaddr, REV_REG) + +#define SMC_GET_RPC() SMC_inw(ioaddr, RPC_REG) + +#define SMC_SET_RPC(x) \ + do { \ + if (SMC_MUST_ALIGN_WRITE) \ + SMC_outl((x)<<16, ioaddr, SMC_REG(8, 0)); \ + else \ + SMC_outw(x, ioaddr, RPC_REG); \ + } while (0) + +#define SMC_GET_TCR() SMC_inw(ioaddr, TCR_REG) + +#define SMC_SET_TCR(x) SMC_outw(x, ioaddr, TCR_REG) #ifndef SMC_GET_MAC_ADDR #define SMC_GET_MAC_ADDR(addr) \ @@ -920,151 +1079,84 @@ static const char * chip_ids[ 16 ] = { SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \ } while (0) -#if SMC_CAN_USE_32BIT -/* - * Some setups just can't write 8 or 16 bits reliably when not aligned - * to a 32 bit boundary. I tell you that exists! - * We re-do the ones here that can be easily worked around if they can have - * their low parts written to 0 without adverse effects. - */ -#undef SMC_SELECT_BANK -#define SMC_SELECT_BANK(x) SMC_outl( (x)<<16, ioaddr, 12<<SMC_IO_SHIFT ) -#undef SMC_SET_RPC -#define SMC_SET_RPC(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(8, 0) ) -#undef SMC_SET_PN -#define SMC_SET_PN(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(0, 2) ) -#undef SMC_SET_PTR -#define SMC_SET_PTR(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(4, 2) ) -#endif - -#if SMC_CAN_USE_32BIT -#define SMC_PUT_PKT_HDR(status, length) \ - SMC_outl( (status) | (length) << 16, ioaddr, DATA_REG ) -#define SMC_GET_PKT_HDR(status, length) \ - do { \ - unsigned int __val = SMC_inl( ioaddr, DATA_REG ); \ - (status) = __val & 0xffff; \ - (length) = __val >> 16; \ - } while (0) -#else #define SMC_PUT_PKT_HDR(status, length) \ do { \ - SMC_outw( status, ioaddr, DATA_REG ); \ - SMC_outw( length, ioaddr, DATA_REG ); \ - } while (0) -#define SMC_GET_PKT_HDR(status, length) \ - do { \ - (status) = SMC_inw( ioaddr, DATA_REG ); \ - (length) = SMC_inw( ioaddr, DATA_REG ); \ + if (SMC_CAN_USE_32BIT) \ + SMC_outl((status) | (length)<<16, ioaddr, DATA_REG); \ + else { \ + SMC_outw(status, ioaddr, DATA_REG); \ + SMC_outw(length, ioaddr, DATA_REG); \ + } \ } while (0) -#endif -#if SMC_CAN_USE_32BIT -#define _SMC_PUSH_DATA(p, l) \ +#define SMC_GET_PKT_HDR(status, length) \ do { \ - char *__ptr = (p); \ - int __len = (l); \ - if (__len >= 2 && (unsigned long)__ptr & 2) { \ - __len -= 2; \ - SMC_outw( *(u16 *)__ptr, ioaddr, DATA_REG ); \ - __ptr += 2; \ - } \ - SMC_outsl( ioaddr, DATA_REG, __ptr, __len >> 2); \ - if (__len & 2) { \ - __ptr += (__len & ~3); \ - SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \ + if (SMC_CAN_USE_32BIT) { \ + unsigned int __val = SMC_inl(ioaddr, DATA_REG); \ + (status) = __val & 0xffff; \ + (length) = __val >> 16; \ + } else { \ + (status) = SMC_inw(ioaddr, DATA_REG); \ + (length) = SMC_inw(ioaddr, DATA_REG); \ } \ } while (0) -#define _SMC_PULL_DATA(p, l) \ - do { \ - char *__ptr = (p); \ - int __len = (l); \ - if ((unsigned long)__ptr & 2) { \ - /* \ - * We want 32bit alignment here. \ - * Since some buses perform a full 32bit \ - * fetch even for 16bit data we can't use \ - * SMC_inw() here. Back both source (on chip \ - * and destination) pointers of 2 bytes. \ - */ \ - __ptr -= 2; \ - __len += 2; \ - SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \ - } \ - __len += 2; \ - SMC_insl( ioaddr, DATA_REG, __ptr, __len >> 2); \ - } while (0) -#elif SMC_CAN_USE_16BIT -#define _SMC_PUSH_DATA(p, l) SMC_outsw( ioaddr, DATA_REG, p, (l) >> 1 ) -#define _SMC_PULL_DATA(p, l) SMC_insw ( ioaddr, DATA_REG, p, (l) >> 1 ) -#elif SMC_CAN_USE_8BIT -#define _SMC_PUSH_DATA(p, l) SMC_outsb( ioaddr, DATA_REG, p, l ) -#define _SMC_PULL_DATA(p, l) SMC_insb ( ioaddr, DATA_REG, p, l ) -#endif -#if ! SMC_CAN_USE_16BIT -#define SMC_outw(x, ioaddr, reg) \ +#define SMC_PUSH_DATA(p, l) \ do { \ - unsigned int __val16 = (x); \ - SMC_outb( __val16, ioaddr, reg ); \ - SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ + if (SMC_CAN_USE_32BIT) { \ + void *__ptr = (p); \ + int __len = (l); \ + void *__ioaddr = ioaddr; \ + if (__len >= 2 && (unsigned long)__ptr & 2) { \ + __len -= 2; \ + SMC_outw(*(u16 *)__ptr, ioaddr, DATA_REG); \ + __ptr += 2; \ + } \ + if (SMC_CAN_USE_DATACS && lp->datacs) \ + __ioaddr = lp->datacs; \ + SMC_outsl(__ioaddr, DATA_REG, __ptr, __len>>2); \ + if (__len & 2) { \ + __ptr += (__len & ~3); \ + SMC_outw(*((u16 *)__ptr), ioaddr, DATA_REG); \ + } \ + } else if (SMC_CAN_USE_16BIT) \ + SMC_outsw(ioaddr, DATA_REG, p, (l) >> 1); \ + else if (SMC_CAN_USE_8BIT) \ + SMC_outsb(ioaddr, DATA_REG, p, l); \ } while (0) -#define SMC_inw(ioaddr, reg) \ - ({ \ - unsigned int __val16; \ - __val16 = SMC_inb( ioaddr, reg ); \ - __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ - __val16; \ - }) -#endif - -#ifdef SMC_CAN_USE_DATACS -#define SMC_PUSH_DATA(p, l) \ - if ( lp->datacs ) { \ - unsigned char *__ptr = (p); \ - int __len = (l); \ - if (__len >= 2 && (unsigned long)__ptr & 2) { \ - __len -= 2; \ - SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \ - __ptr += 2; \ - } \ - outsl(lp->datacs, __ptr, __len >> 2); \ - if (__len & 2) { \ - __ptr += (__len & ~3); \ - SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \ - } \ - } else { \ - _SMC_PUSH_DATA(p, l); \ - } #define SMC_PULL_DATA(p, l) \ - if ( lp->datacs ) { \ - unsigned char *__ptr = (p); \ - int __len = (l); \ - if ((unsigned long)__ptr & 2) { \ - /* \ - * We want 32bit alignment here. \ - * Since some buses perform a full 32bit \ - * fetch even for 16bit data we can't use \ - * SMC_inw() here. Back both source (on chip \ - * and destination) pointers of 2 bytes. \ - */ \ - __ptr -= 2; \ + do { \ + if (SMC_CAN_USE_32BIT) { \ + void *__ptr = (p); \ + int __len = (l); \ + void *__ioaddr = ioaddr; \ + if ((unsigned long)__ptr & 2) { \ + /* \ + * We want 32bit alignment here. \ + * Since some buses perform a full \ + * 32bit fetch even for 16bit data \ + * we can't use SMC_inw() here. \ + * Back both source (on-chip) and \ + * destination pointers of 2 bytes. \ + * This is possible since the call to \ + * SMC_GET_PKT_HDR() already advanced \ + * the source pointer of 4 bytes, and \ + * the skb_reserve(skb, 2) advanced \ + * the destination pointer of 2 bytes. \ + */ \ + __ptr -= 2; \ + __len += 2; \ + SMC_SET_PTR(2|PTR_READ|PTR_RCV|PTR_AUTOINC); \ + } \ + if (SMC_CAN_USE_DATACS && lp->datacs) \ + __ioaddr = lp->datacs; \ __len += 2; \ - SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \ - } \ - __len += 2; \ - insl( lp->datacs, __ptr, __len >> 2); \ - } else { \ - _SMC_PULL_DATA(p, l); \ - } -#else -#define SMC_PUSH_DATA(p, l) _SMC_PUSH_DATA(p, l) -#define SMC_PULL_DATA(p, l) _SMC_PULL_DATA(p, l) -#endif - -#if !defined (SMC_INTERRUPT_PREAMBLE) -# define SMC_INTERRUPT_PREAMBLE -#endif + SMC_insl(__ioaddr, DATA_REG, __ptr, __len>>2); \ + } else if (SMC_CAN_USE_16BIT) \ + SMC_insw(ioaddr, DATA_REG, p, (l) >> 1); \ + else if (SMC_CAN_USE_8BIT) \ + SMC_insb(ioaddr, DATA_REG, p, l); \ + } while (0) #endif /* _SMC91X_H_ */ diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 28ce47a02408..38cd30cb7c75 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -55,6 +55,7 @@ #include <linux/workqueue.h> #include <linux/if_vlan.h> #include <linux/bitops.h> +#include <linux/mutex.h> #include <asm/system.h> #include <asm/io.h> @@ -2284,7 +2285,7 @@ static void gem_reset_task(void *data) { struct gem *gp = (struct gem *) data; - down(&gp->pm_sem); + mutex_lock(&gp->pm_mutex); netif_poll_disable(gp->dev); @@ -2311,7 +2312,7 @@ static void gem_reset_task(void *data) netif_poll_enable(gp->dev); - up(&gp->pm_sem); + mutex_unlock(&gp->pm_mutex); } @@ -2320,14 +2321,14 @@ static int gem_open(struct net_device *dev) struct gem *gp = dev->priv; int rc = 0; - down(&gp->pm_sem); + mutex_lock(&gp->pm_mutex); /* We need the cell enabled */ if (!gp->asleep) rc = gem_do_start(dev); gp->opened = (rc == 0); - up(&gp->pm_sem); + mutex_unlock(&gp->pm_mutex); return rc; } @@ -2340,13 +2341,13 @@ static int gem_close(struct net_device *dev) * our caller (dev_close) already did it for us */ - down(&gp->pm_sem); + mutex_lock(&gp->pm_mutex); gp->opened = 0; if (!gp->asleep) gem_do_stop(dev, 0); - up(&gp->pm_sem); + mutex_unlock(&gp->pm_mutex); return 0; } @@ -2358,7 +2359,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) struct gem *gp = dev->priv; unsigned long flags; - down(&gp->pm_sem); + mutex_lock(&gp->pm_mutex); netif_poll_disable(dev); @@ -2391,11 +2392,11 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) /* Stop the link timer */ del_timer_sync(&gp->link_timer); - /* Now we release the semaphore to not block the reset task who + /* Now we release the mutex to not block the reset task who * can take it too. We are marked asleep, so there will be no * conflict here */ - up(&gp->pm_sem); + mutex_unlock(&gp->pm_mutex); /* Wait for a pending reset task to complete */ while (gp->reset_task_pending) @@ -2424,7 +2425,7 @@ static int gem_resume(struct pci_dev *pdev) printk(KERN_INFO "%s: resuming\n", dev->name); - down(&gp->pm_sem); + mutex_lock(&gp->pm_mutex); /* Keep the cell enabled during the entire operation, no need to * take a lock here tho since nothing else can happen while we are @@ -2440,7 +2441,7 @@ static int gem_resume(struct pci_dev *pdev) * still asleep, a new sleep cycle may bring it back */ gem_put_cell(gp); - up(&gp->pm_sem); + mutex_unlock(&gp->pm_mutex); return 0; } pci_set_master(gp->pdev); @@ -2486,7 +2487,7 @@ static int gem_resume(struct pci_dev *pdev) netif_poll_enable(dev); - up(&gp->pm_sem); + mutex_unlock(&gp->pm_mutex); return 0; } @@ -2591,7 +2592,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) return 0; } - down(&gp->pm_sem); + mutex_lock(&gp->pm_mutex); spin_lock_irq(&gp->lock); spin_lock(&gp->tx_lock); dev->mtu = new_mtu; @@ -2602,7 +2603,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) } spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); - up(&gp->pm_sem); + mutex_unlock(&gp->pm_mutex); return 0; } @@ -2771,10 +2772,10 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) int rc = -EOPNOTSUPP; unsigned long flags; - /* Hold the PM semaphore while doing ioctl's or we may collide + /* Hold the PM mutex while doing ioctl's or we may collide * with power management. */ - down(&gp->pm_sem); + mutex_lock(&gp->pm_mutex); spin_lock_irqsave(&gp->lock, flags); gem_get_cell(gp); @@ -2812,7 +2813,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) gem_put_cell(gp); spin_unlock_irqrestore(&gp->lock, flags); - up(&gp->pm_sem); + mutex_unlock(&gp->pm_mutex); return rc; } @@ -3033,7 +3034,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, spin_lock_init(&gp->lock); spin_lock_init(&gp->tx_lock); - init_MUTEX(&gp->pm_sem); + mutex_init(&gp->pm_mutex); init_timer(&gp->link_timer); gp->link_timer.function = gem_link_timer; diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index 13006d759ad8..89847215d006 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h @@ -980,15 +980,15 @@ struct gem { int tx_new, tx_old; unsigned int has_wol : 1; /* chip supports wake-on-lan */ - unsigned int asleep : 1; /* chip asleep, protected by pm_sem */ + unsigned int asleep : 1; /* chip asleep, protected by pm_mutex */ unsigned int asleep_wol : 1; /* was asleep with WOL enabled */ - unsigned int opened : 1; /* driver opened, protected by pm_sem */ + unsigned int opened : 1; /* driver opened, protected by pm_mutex */ unsigned int running : 1; /* chip running, protected by lock */ /* cell enable count, protected by lock */ int cell_enabled; - struct semaphore pm_sem; + struct mutex pm_mutex; u32 msg_enable; u32 status; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6c6c5498899f..b5473325bff4 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -69,8 +69,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.49" -#define DRV_MODULE_RELDATE "Feb 2, 2006" +#define DRV_MODULE_VERSION "3.54" +#define DRV_MODULE_RELDATE "Mar 23, 2006" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -221,10 +221,26 @@ static struct pci_device_id tg3_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, @@ -534,6 +550,9 @@ static void tg3_enable_ints(struct tg3 *tp) (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, (tp->last_tag << 24)); + if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + (tp->last_tag << 24)); tg3_cond_int(tp); } @@ -1038,9 +1057,11 @@ static void tg3_frob_aux_power(struct tg3 *tp) struct net_device *dev_peer; dev_peer = pci_get_drvdata(tp->pdev_peer); + /* remove_one() may have been run on the peer. */ if (!dev_peer) - BUG(); - tp_peer = netdev_priv(dev_peer); + tp_peer = tp; + else + tp_peer = netdev_priv(dev_peer); } if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || @@ -1131,7 +1152,20 @@ static int tg3_halt_cpu(struct tg3 *, u32); static int tg3_nvram_lock(struct tg3 *); static void tg3_nvram_unlock(struct tg3 *); -static int tg3_set_power_state(struct tg3 *tp, int state) +static void tg3_power_down_phy(struct tg3 *tp) +{ + /* The PHY should not be powered down on some chips because + * of bugs. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && + (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) + return; + tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); +} + +static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) { u32 misc_host_ctrl; u16 power_control, power_caps; @@ -1150,7 +1184,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state) power_control |= PCI_PM_CTRL_PME_STATUS; power_control &= ~(PCI_PM_CTRL_STATE_MASK); switch (state) { - case 0: + case PCI_D0: power_control |= 0; pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, @@ -1163,15 +1197,15 @@ static int tg3_set_power_state(struct tg3 *tp, int state) return 0; - case 1: + case PCI_D1: power_control |= 1; break; - case 2: + case PCI_D2: power_control |= 2; break; - case 3: + case PCI_D3hot: power_control |= 3; break; @@ -1310,8 +1344,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state) tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF); tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) - tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); + tg3_power_down_phy(tp); } } @@ -2680,6 +2713,12 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } err |= tg3_readphy(tp, MII_BMCR, &bmcr); @@ -2748,6 +2787,13 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) bmcr = new_bmcr; err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; } } @@ -3338,6 +3384,23 @@ static inline void tg3_full_unlock(struct tg3 *tp) spin_unlock_bh(&tp->lock); } +/* One-shot MSI handler - Chip automatically disables interrupt + * after sending MSI so driver doesn't have to do it. + */ +static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct tg3 *tp = netdev_priv(dev); + + prefetch(tp->hw_status); + prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); + + if (likely(!tg3_irq_sync(tp))) + netif_rx_schedule(dev); /* schedule NAPI poll */ + + return IRQ_HANDLED; +} + /* MSI ISR - No need to check for interrupt sharing and no need to * flush status block and interrupt mailbox. PCI ordering rules * guarantee that MSI will arrive after the status block. @@ -3628,11 +3691,139 @@ static void tg3_set_txd(struct tg3 *tp, int entry, txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; } +/* hard_start_xmit for devices that don't have any bugs and + * support TG3_FLG2_HW_TSO_2 only. + */ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); dma_addr_t mapping; u32 len, entry, base_flags, mss; + + len = skb_headlen(skb); + + /* No BH disabling for tx_lock here. We are running in BH disabled + * context and TX reclaim runs via tp->poll inside of a software + * interrupt. Furthermore, IRQ processing runs lockless so we have + * no IRQ context deadlocks to worry about either. Rejoice! + */ + if (!spin_trylock(&tp->tx_lock)) + return NETDEV_TX_LOCKED; + + if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + + /* This is a hard error, log it. */ + printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " + "queue awake!\n", dev->name); + } + spin_unlock(&tp->tx_lock); + return NETDEV_TX_BUSY; + } + + entry = tp->tx_prod; + base_flags = 0; +#if TG3_TSO_SUPPORT != 0 + mss = 0; + if (skb->len > (tp->dev->mtu + ETH_HLEN) && + (mss = skb_shinfo(skb)->tso_size) != 0) { + int tcp_opt_len, ip_tcp_len; + + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + goto out_unlock; + } + + tcp_opt_len = ((skb->h.th->doff - 5) * 4); + ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); + + base_flags |= (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + skb->nh.iph->check = 0; + skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); + + skb->h.th->check = 0; + + mss |= (ip_tcp_len + tcp_opt_len) << 9; + } + else if (skb->ip_summed == CHECKSUM_HW) + base_flags |= TXD_FLAG_TCPUDP_CSUM; +#else + mss = 0; + if (skb->ip_summed == CHECKSUM_HW) + base_flags |= TXD_FLAG_TCPUDP_CSUM; +#endif +#if TG3_VLAN_TAG_USED + if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) + base_flags |= (TXD_FLAG_VLAN | + (vlan_tx_tag_get(skb) << 16)); +#endif + + /* Queue skb data, a.k.a. the main skb fragment. */ + mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); + + tp->tx_buffers[entry].skb = skb; + pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); + + tg3_set_txd(tp, entry, mapping, len, base_flags, + (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); + + entry = NEXT_TX(entry); + + /* Now loop through additional data fragments, and queue them. */ + if (skb_shinfo(skb)->nr_frags > 0) { + unsigned int i, last; + + last = skb_shinfo(skb)->nr_frags - 1; + for (i = 0; i <= last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = frag->size; + mapping = pci_map_page(tp->pdev, + frag->page, + frag->page_offset, + len, PCI_DMA_TODEVICE); + + tp->tx_buffers[entry].skb = NULL; + pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); + + tg3_set_txd(tp, entry, mapping, len, + base_flags, (i == last) | (mss << 1)); + + entry = NEXT_TX(entry); + } + } + + /* Packets are ready, update Tx producer idx local and on card. */ + tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); + + tp->tx_prod = entry; + if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { + netif_stop_queue(dev); + if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) + netif_wake_queue(tp->dev); + } + +out_unlock: + mmiowb(); + spin_unlock(&tp->tx_lock); + + dev->trans_start = jiffies; + + return NETDEV_TX_OK; +} + +/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and + * support TG3_FLG2_HW_TSO_1 or firmware TSO only. + */ +static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + dma_addr_t mapping; + u32 len, entry, base_flags, mss; int would_hit_hwbug; len = skb_headlen(skb); @@ -4369,6 +4560,11 @@ static int tg3_chip_reset(struct tg3 *tp) tp->nvram_lock_cnt = 0; } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + tw32(GRC_FASTBOOT_PC, 0); + /* * We must avoid the readl() that normally takes place. * It locks machines, causes machine checks, and other @@ -5518,6 +5714,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + if (!netif_running(dev)) + return 0; + spin_lock_bh(&tp->lock); __tg3_set_mac_addr(tp); spin_unlock_bh(&tp->lock); @@ -5585,6 +5784,9 @@ static int tg3_reset_hw(struct tg3 *tp) tg3_abort_hw(tp, 1); } + if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + tg3_phy_reset(tp); + err = tg3_chip_reset(tp); if (err) return err; @@ -5955,6 +6157,9 @@ static int tg3_reset_hw(struct tg3 *tp) gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | GRC_LCLCTRL_GPIO_OUTPUT3; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; + tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; /* GPIO1 must be driven high for eeprom write protect */ @@ -5993,6 +6198,11 @@ static int tg3_reset_hw(struct tg3 *tp) } } + /* Enable host coalescing bug fix */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)) + val |= (1 << 29); + tw32_f(WDMAC_MODE, val); udelay(40); @@ -6048,6 +6258,9 @@ static int tg3_reset_hw(struct tg3 *tp) udelay(100); tp->rx_mode = RX_MODE_ENABLE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); @@ -6097,6 +6310,17 @@ static int tg3_reset_hw(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; } + if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { + u32 tmp; + + tmp = tr32(SERDES_RX_CTRL); + tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); + tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; + tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + } + err = tg3_setup_phy(tp, 1); if (err) return err; @@ -6175,7 +6399,7 @@ static int tg3_init_hw(struct tg3 *tp) int err; /* Force the chip into D0. */ - err = tg3_set_power_state(tp, 0); + err = tg3_set_power_state(tp, PCI_D0); if (err) goto out; @@ -6331,6 +6555,26 @@ static void tg3_timer(unsigned long __opaque) add_timer(&tp->timer); } +static int tg3_request_irq(struct tg3 *tp) +{ + irqreturn_t (*fn)(int, void *, struct pt_regs *); + unsigned long flags; + struct net_device *dev = tp->dev; + + if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + fn = tg3_msi; + if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) + fn = tg3_msi_1shot; + flags = SA_SAMPLE_RANDOM; + } else { + fn = tg3_interrupt; + if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) + fn = tg3_interrupt_tagged; + flags = SA_SHIRQ | SA_SAMPLE_RANDOM; + } + return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev)); +} + static int tg3_test_interrupt(struct tg3 *tp) { struct net_device *dev = tp->dev; @@ -6367,16 +6611,7 @@ static int tg3_test_interrupt(struct tg3 *tp) free_irq(tp->pdev->irq, dev); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) - err = request_irq(tp->pdev->irq, tg3_msi, - SA_SAMPLE_RANDOM, dev->name, dev); - else { - irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) - fn = tg3_interrupt_tagged; - err = request_irq(tp->pdev->irq, fn, - SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); - } + err = tg3_request_irq(tp); if (err) return err; @@ -6428,14 +6663,7 @@ static int tg3_test_msi(struct tg3 *tp) tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; - { - irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) - fn = tg3_interrupt_tagged; - - err = request_irq(tp->pdev->irq, fn, - SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); - } + err = tg3_request_irq(tp); if (err) return err; @@ -6462,6 +6690,10 @@ static int tg3_open(struct net_device *dev) tg3_full_lock(tp, 0); + err = tg3_set_power_state(tp, PCI_D0); + if (err) + return err; + tg3_disable_ints(tp); tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; @@ -6476,7 +6708,9 @@ static int tg3_open(struct net_device *dev) if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && - (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { + (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) && + !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) && + (tp->pdev_peer == tp->pdev))) { /* All MSI supporting chips should support tagged * status. Assert that this is the case. */ @@ -6491,17 +6725,7 @@ static int tg3_open(struct net_device *dev) tp->tg3_flags2 |= TG3_FLG2_USING_MSI; } } - if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) - err = request_irq(tp->pdev->irq, tg3_msi, - SA_SAMPLE_RANDOM, dev->name, dev); - else { - irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) - fn = tg3_interrupt_tagged; - - err = request_irq(tp->pdev->irq, fn, - SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); - } + err = tg3_request_irq(tp); if (err) { if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { @@ -6566,6 +6790,14 @@ static int tg3_open(struct net_device *dev) return err; } + + if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { + if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) { + u32 val = tr32(0x7c04); + + tw32(0x7c04, val | (1 << 29)); + } + } } tg3_full_lock(tp, 0); @@ -6839,7 +7071,6 @@ static int tg3_close(struct net_device *dev) tp->tg3_flags &= ~(TG3_FLAG_INIT_COMPLETE | TG3_FLAG_GOT_SERDES_FLOWCTL); - netif_carrier_off(tp->dev); tg3_full_unlock(tp); @@ -6856,6 +7087,10 @@ static int tg3_close(struct net_device *dev) tg3_free_consistent(tp); + tg3_set_power_state(tp, PCI_D3hot); + + netif_carrier_off(tp->dev); + return 0; } @@ -7150,6 +7385,9 @@ static void tg3_set_rx_mode(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); + if (!netif_running(dev)) + return; + tg3_full_lock(tp, 0); __tg3_set_rx_mode(dev); tg3_full_unlock(tp); @@ -7174,6 +7412,9 @@ static void tg3_get_regs(struct net_device *dev, memset(p, 0, TG3_REGDUMP_LEN); + if (tp->link_config.phy_is_low_power) + return; + tg3_full_lock(tp, 0); #define __GET_REG32(reg) (*(p)++ = tr32(reg)) @@ -7240,6 +7481,7 @@ static int tg3_get_eeprom_len(struct net_device *dev) } static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val); +static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val); static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { @@ -7248,6 +7490,9 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *pd; u32 i, offset, len, val, b_offset, b_count; + if (tp->link_config.phy_is_low_power) + return -EAGAIN; + offset = eeprom->offset; len = eeprom->len; eeprom->len = 0; @@ -7309,6 +7554,9 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u32 offset, len, b_offset, odd_len, start, end; u8 *buf; + if (tp->link_config.phy_is_low_power) + return -EAGAIN; + if (eeprom->magic != TG3_EEPROM_MAGIC) return -EINVAL; @@ -7442,6 +7690,7 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info strcpy(info->driver, DRV_MODULE_NAME); strcpy(info->version, DRV_MODULE_VERSION); + strcpy(info->fw_version, tp->fw_ver); strcpy(info->bus_info, pci_name(tp->pdev)); } @@ -7536,11 +7785,20 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * ering->rx_max_pending = TG3_RX_RING_SIZE - 1; ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; + if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) + ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; + else + ering->rx_jumbo_max_pending = 0; + + ering->tx_max_pending = TG3_TX_RING_SIZE - 1; ering->rx_pending = tp->rx_pending; ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = tp->rx_jumbo_pending; + if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) + ering->rx_jumbo_pending = tp->rx_jumbo_pending; + else + ering->rx_jumbo_pending = 0; + ering->tx_pending = tp->tx_pending; } @@ -7661,10 +7919,11 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) return 0; } - if (data) - dev->features |= NETIF_F_IP_CSUM; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + ethtool_op_set_tx_hw_csum(dev, data); else - dev->features &= ~NETIF_F_IP_CSUM; + ethtool_op_set_tx_csum(dev, data); return 0; } @@ -7734,29 +7993,52 @@ static void tg3_get_ethtool_stats (struct net_device *dev, } #define NVRAM_TEST_SIZE 0x100 +#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14 static int tg3_test_nvram(struct tg3 *tp) { - u32 *buf, csum; - int i, j, err = 0; + u32 *buf, csum, magic; + int i, j, err = 0, size; + + if (tg3_nvram_read_swab(tp, 0, &magic) != 0) + return -EIO; + + if (magic == TG3_EEPROM_MAGIC) + size = NVRAM_TEST_SIZE; + else if ((magic & 0xff000000) == 0xa5000000) { + if ((magic & 0xe00000) == 0x200000) + size = NVRAM_SELFBOOT_FORMAT1_SIZE; + else + return 0; + } else + return -EIO; - buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL); + buf = kmalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; - for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) { + err = -EIO; + for (i = 0, j = 0; i < size; i += 4, j++) { u32 val; if ((err = tg3_nvram_read(tp, i, &val)) != 0) break; buf[j] = cpu_to_le32(val); } - if (i < NVRAM_TEST_SIZE) + if (i < size) goto out; - err = -EIO; - if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) - goto out; + /* Selfboot format */ + if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) { + u8 *buf8 = (u8 *) buf, csum8 = 0; + + for (i = 0; i < size; i++) + csum8 += buf8[i]; + + if (csum8 == 0) + return 0; + return -EIO; + } /* Bootstrap checksum at offset 0x10 */ csum = calc_crc((unsigned char *) buf, 0x10); @@ -7802,7 +8084,7 @@ static int tg3_test_link(struct tg3 *tp) } /* Only test the commonly used registers */ -static const int tg3_test_registers(struct tg3 *tp) +static int tg3_test_registers(struct tg3 *tp) { int i, is_5705; u32 offset, read_mask, write_mask, val, save_val, read_val; @@ -8050,14 +8332,25 @@ static int tg3_test_memory(struct tg3 *tp) { 0x00008000, 0x02000}, { 0x00010000, 0x0e000}, { 0xffffffff, 0x00000} + }, mem_tbl_5755[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x00800}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0c000}, + { 0xffffffff, 0x00000} }; struct mem_entry *mem_tbl; int err = 0; int i; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) - mem_tbl = mem_tbl_5705; - else + if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + mem_tbl = mem_tbl_5755; + else + mem_tbl = mem_tbl_5705; + } else mem_tbl = mem_tbl_570x; for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { @@ -8229,6 +8522,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, { struct tg3 *tp = netdev_priv(dev); + if (tp->link_config.phy_is_low_power) + tg3_set_power_state(tp, PCI_D0); + memset(data, 0, sizeof(u64) * TG3_NUM_TEST); if (tg3_test_nvram(tp) != 0) { @@ -8257,6 +8553,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, if (!err) tg3_nvram_unlock(tp); + if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) + tg3_phy_reset(tp); + if (tg3_test_registers(tp) != 0) { etest->flags |= ETH_TEST_FL_FAILED; data[2] = 1; @@ -8286,6 +8585,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, tg3_full_unlock(tp); } + if (tp->link_config.phy_is_low_power) + tg3_set_power_state(tp, PCI_D3hot); + } static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -8305,6 +8607,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) break; /* We have no PHY */ + if (tp->link_config.phy_is_low_power) + return -EAGAIN; + spin_lock_bh(&tp->lock); err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); spin_unlock_bh(&tp->lock); @@ -8321,6 +8626,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!capable(CAP_NET_ADMIN)) return -EPERM; + if (tp->link_config.phy_is_low_power) + return -EAGAIN; + spin_lock_bh(&tp->lock); err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); spin_unlock_bh(&tp->lock); @@ -8464,14 +8772,14 @@ static struct ethtool_ops tg3_ethtool_ops = { static void __devinit tg3_get_eeprom_size(struct tg3 *tp) { - u32 cursize, val; + u32 cursize, val, magic; tp->nvram_size = EEPROM_CHIP_SIZE; - if (tg3_nvram_read(tp, 0, &val) != 0) + if (tg3_nvram_read_swab(tp, 0, &magic) != 0) return; - if (swab32(val) != TG3_EEPROM_MAGIC) + if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000)) return; /* @@ -8479,13 +8787,13 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp) * When we encounter our validation signature, we know the addressing * has wrapped around, and thus have our chip size. */ - cursize = 0x800; + cursize = 0x10; while (cursize < tp->nvram_size) { - if (tg3_nvram_read(tp, cursize, &val) != 0) + if (tg3_nvram_read_swab(tp, cursize, &val) != 0) return; - if (swab32(val) == TG3_EEPROM_MAGIC) + if (val == magic) break; cursize <<= 1; @@ -8498,6 +8806,15 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp) { u32 val; + if (tg3_nvram_read_swab(tp, 0, &val) != 0) + return; + + /* Selfboot format */ + if (val != TG3_EEPROM_MAGIC) { + tg3_get_eeprom_size(tp); + return; + } + if (tg3_nvram_read(tp, 0xf0, &val) == 0) { if (val != 0) { tp->nvram_size = (val >> 16) * 1024; @@ -8621,6 +8938,85 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) } } +static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) + tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_4: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + break; + } +} + +static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + break; + } +} + /* Chips other than 5700/5701 use the NVRAM for fetching info. */ static void __devinit tg3_nvram_init(struct tg3 *tp) { @@ -8656,6 +9052,10 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) tg3_get_5752_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tg3_get_5755_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + tg3_get_5787_nvram_info(tp); else tg3_get_nvram_info(tp); @@ -8725,6 +9125,34 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) return 0; } +static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) +{ + if ((tp->tg3_flags & TG3_FLAG_NVRAM) && + (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && + (tp->tg3_flags2 & TG3_FLG2_FLASH) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr / tp->nvram_pagesize) << + ATMEL_AT45DB0X1B_PAGE_POS) + + (addr % tp->nvram_pagesize); + + return addr; +} + +static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) +{ + if ((tp->tg3_flags & TG3_FLAG_NVRAM) && + (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && + (tp->tg3_flags2 & TG3_FLG2_FLASH) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * + tp->nvram_pagesize) + + (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); + + return addr; +} + static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) { int ret; @@ -8737,14 +9165,7 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) return tg3_nvram_read_using_eeprom(tp, offset, val); - if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && - (tp->tg3_flags2 & TG3_FLG2_FLASH) && - (tp->nvram_jedecnum == JEDEC_ATMEL)) { - - offset = ((offset / tp->nvram_pagesize) << - ATMEL_AT45DB0X1B_PAGE_POS) + - (offset % tp->nvram_pagesize); - } + offset = tg3_nvram_phys_addr(tp, offset); if (offset > NVRAM_ADDR_MSK) return -EINVAL; @@ -8769,6 +9190,16 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) return ret; } +static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val) +{ + int err; + u32 tmp; + + err = tg3_nvram_read(tp, offset, &tmp); + *val = swab32(tmp); + return err; +} + static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, u32 offset, u32 len, u8 *buf) { @@ -8921,15 +9352,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, page_off = offset % tp->nvram_pagesize; - if ((tp->tg3_flags2 & TG3_FLG2_FLASH) && - (tp->nvram_jedecnum == JEDEC_ATMEL)) { - - phy_addr = ((offset / tp->nvram_pagesize) << - ATMEL_AT45DB0X1B_PAGE_POS) + page_off; - } - else { - phy_addr = offset; - } + phy_addr = tg3_nvram_phys_addr(tp, offset); tw32(NVRAM_ADDR, phy_addr); @@ -8944,6 +9367,8 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, nvram_cmd |= NVRAM_CMD_LAST; if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && (tp->nvram_jedecnum == JEDEC_ST) && (nvram_cmd & NVRAM_CMD_FIRST)) { @@ -9081,12 +9506,18 @@ static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp) return NULL; } -/* Since this function may be called in D3-hot power state during - * tg3_init_one(), only config cycles are allowed. - */ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) { u32 val; + u16 pmcsr; + + /* On some early chips the SRAM cannot be accessed in D3hot state, + * so need make sure we're in D0. + */ + pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr); + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr); + msleep(1); /* Make sure register accesses (indirect or otherwise) * will function correctly. @@ -9347,6 +9778,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp) { unsigned char vpd_data[256]; int i; + u32 magic; if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { /* Sun decided not to put the necessary bits in the @@ -9356,16 +9788,43 @@ static void __devinit tg3_read_partno(struct tg3 *tp) return; } - for (i = 0; i < 256; i += 4) { - u32 tmp; + if (tg3_nvram_read_swab(tp, 0x0, &magic)) + return; - if (tg3_nvram_read(tp, 0x100 + i, &tmp)) - goto out_not_found; + if (magic == TG3_EEPROM_MAGIC) { + for (i = 0; i < 256; i += 4) { + u32 tmp; + + if (tg3_nvram_read(tp, 0x100 + i, &tmp)) + goto out_not_found; - vpd_data[i + 0] = ((tmp >> 0) & 0xff); - vpd_data[i + 1] = ((tmp >> 8) & 0xff); - vpd_data[i + 2] = ((tmp >> 16) & 0xff); - vpd_data[i + 3] = ((tmp >> 24) & 0xff); + vpd_data[i + 0] = ((tmp >> 0) & 0xff); + vpd_data[i + 1] = ((tmp >> 8) & 0xff); + vpd_data[i + 2] = ((tmp >> 16) & 0xff); + vpd_data[i + 3] = ((tmp >> 24) & 0xff); + } + } else { + int vpd_cap; + + vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD); + for (i = 0; i < 256; i += 4) { + u32 tmp, j = 0; + u16 tmp16; + + pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR, + i); + while (j++ < 100) { + pci_read_config_word(tp->pdev, vpd_cap + + PCI_VPD_ADDR, &tmp16); + if (tmp16 & 0x8000) + break; + msleep(1); + } + pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, + &tmp); + tmp = cpu_to_le32(tmp); + memcpy(&vpd_data[i], &tmp, 4); + } } /* Now parse and find the part number. */ @@ -9412,6 +9871,46 @@ out_not_found: strcpy(tp->board_part_number, "none"); } +static void __devinit tg3_read_fw_ver(struct tg3 *tp) +{ + u32 val, offset, start; + + if (tg3_nvram_read_swab(tp, 0, &val)) + return; + + if (val != TG3_EEPROM_MAGIC) + return; + + if (tg3_nvram_read_swab(tp, 0xc, &offset) || + tg3_nvram_read_swab(tp, 0x4, &start)) + return; + + offset = tg3_nvram_logical_addr(tp, offset); + if (tg3_nvram_read_swab(tp, offset, &val)) + return; + + if ((val & 0xfc000000) == 0x0c000000) { + u32 ver_offset, addr; + int i; + + if (tg3_nvram_read_swab(tp, offset + 4, &val) || + tg3_nvram_read_swab(tp, offset + 8, &ver_offset)) + return; + + if (val != 0) + return; + + addr = offset + ver_offset - start; + for (i = 0; i < 16; i += 4) { + if (tg3_nvram_read(tp, addr + i, &val)) + return; + + val = cpu_to_le32(val); + memcpy(tp->fw_ver + i, &val, 4); + } + } +} + #ifdef CONFIG_SPARC64 static int __devinit tg3_is_sun_570X(struct tg3 *tp) { @@ -9603,6 +10102,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; @@ -9610,12 +10111,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; - if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) - tp->tg3_flags2 |= TG3_FLG2_HW_TSO; + if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; + tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; + } else + tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1; + } if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) @@ -9771,8 +10280,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + /* Force the chip into D0. */ - err = tg3_set_power_state(tp, 0); + err = tg3_set_power_state(tp, PCI_D0); if (err) { printk(KERN_ERR PFX "(%s) transition to D0 failed\n", pci_name(tp->pdev)); @@ -9825,7 +10337,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; - if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) + if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)) tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; tp->coalesce_mode = 0; @@ -9925,6 +10439,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } tg3_read_partno(tp); + tg3_read_fw_ver(tp); if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; @@ -9960,10 +10475,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; - /* It seems all chips can get confused if TX buffers + /* All chips before 5787 can get confused if TX buffers * straddle the 4GB address boundary in some cases. */ - tp->dev->hard_start_xmit = tg3_start_xmit; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + tp->dev->hard_start_xmit = tg3_start_xmit; + else + tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; tp->rx_offset = 2; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && @@ -10491,7 +11010,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp) tp->link_config.speed = SPEED_INVALID; tp->link_config.duplex = DUPLEX_INVALID; tp->link_config.autoneg = AUTONEG_ENABLE; - netif_carrier_off(tp->dev); tp->link_config.active_speed = SPEED_INVALID; tp->link_config.active_duplex = DUPLEX_INVALID; tp->link_config.phy_is_low_power = 0; @@ -10550,6 +11068,8 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case PHY_ID_BCM5752: return "5752"; case PHY_ID_BCM5714: return "5714"; case PHY_ID_BCM5780: return "5780"; + case PHY_ID_BCM5755: return "5755"; + case PHY_ID_BCM5787: return "5787"; case PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -10848,11 +11368,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; } - /* TSO is off by default, user can enable using ethtool. */ -#if 0 - if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) + /* TSO is on by default on chips that support hardware TSO. + * Firmware TSO on older chips gives lower performance, so it + * is off by default, but can be enabled using ethtool. + */ + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) dev->features |= NETIF_F_TSO; -#endif #endif @@ -10896,7 +11417,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, * checksumming. */ if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { - dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + dev->features |= NETIF_F_HW_CSUM; + else + dev->features |= NETIF_F_IP_CSUM; + dev->features |= NETIF_F_SG; tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; } else tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; @@ -10949,6 +11475,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, (pdev->dma_mask == DMA_32BIT_MASK) ? 32 : (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64)); + netif_carrier_off(tp->dev); + return 0; err_out_iounmap: @@ -11044,7 +11572,7 @@ static int tg3_resume(struct pci_dev *pdev) pci_restore_state(tp->pdev); - err = tg3_set_power_state(tp, 0); + err = tg3_set_power_state(tp, PCI_D0); if (err) return err; diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 7e3b613afb29..c43cc3264202 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -138,6 +138,8 @@ #define ASIC_REV_5752 0x06 #define ASIC_REV_5780 0x08 #define ASIC_REV_5714 0x09 +#define ASIC_REV_5755 0x0a +#define ASIC_REV_5787 0x0b #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) #define CHIPREV_5700_AX 0x70 #define CHIPREV_5700_BX 0x71 @@ -455,6 +457,7 @@ #define RX_MODE_PROMISC 0x00000100 #define RX_MODE_NO_CRC_CHECK 0x00000200 #define RX_MODE_KEEP_VLAN_TAG 0x00000400 +#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 #define MAC_RX_STATUS 0x0000046c #define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 #define RX_STATUS_XOFF_RCVD 0x00000002 @@ -1339,6 +1342,7 @@ #define GRC_LCLCTRL_CLEARINT 0x00000002 #define GRC_LCLCTRL_SETINT 0x00000004 #define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 +#define GRC_LCLCTRL_GPIO_UART_SEL 0x00000010 /* 5755 only */ #define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */ #define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */ #define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 @@ -1393,6 +1397,7 @@ #define GRC_MDI_CTRL 0x00006844 #define GRC_SEEPROM_DELAY 0x00006848 /* 0x684c --> 0x6c00 unused */ +#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */ /* 0x6c00 --> 0x7000 unused */ @@ -1436,6 +1441,16 @@ #define FLASH_5752VENDOR_ST_M45PE10 0x02400000 #define FLASH_5752VENDOR_ST_M45PE20 0x02400002 #define FLASH_5752VENDOR_ST_M45PE40 0x02400001 +#define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001 +#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 +#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 +#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003 +#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003 +#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002 +#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 +#define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002 +#define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000 +#define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000 #define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 #define FLASH_5752PAGE_SIZE_256 0x00000000 #define FLASH_5752PAGE_SIZE_512 0x10000000 @@ -2185,7 +2200,7 @@ struct tg3 { #define TG3_FLG2_PHY_SERDES 0x00002000 #define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000 #define TG3_FLG2_FLASH 0x00008000 -#define TG3_FLG2_HW_TSO 0x00010000 +#define TG3_FLG2_HW_TSO_1 0x00010000 #define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 #define TG3_FLG2_5705_PLUS 0x00040000 #define TG3_FLG2_5750_PLUS 0x00080000 @@ -2198,6 +2213,9 @@ struct tg3 { #define TG3_FLG2_PARALLEL_DETECT 0x01000000 #define TG3_FLG2_ICH_WORKAROUND 0x02000000 #define TG3_FLG2_5780_CLASS 0x04000000 +#define TG3_FLG2_HW_TSO_2 0x08000000 +#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) +#define TG3_FLG2_1SHOT_MSI 0x10000000 u32 split_mode_max_reqs; #define SPLIT_MODE_5704_MAX_REQ 3 @@ -2247,6 +2265,8 @@ struct tg3 { #define PHY_ID_BCM5752 0x60008100 #define PHY_ID_BCM5714 0x60008340 #define PHY_ID_BCM5780 0x60008350 +#define PHY_ID_BCM5755 0xbc050cc0 +#define PHY_ID_BCM5787 0xbc050ce0 #define PHY_ID_BCM8002 0x60010140 #define PHY_ID_INVALID 0xffffffff #define PHY_ID_REV_MASK 0x0000000f @@ -2258,6 +2278,7 @@ struct tg3 { u32 led_ctrl; char board_part_number[24]; + char fw_ver[16]; u32 nic_sram_data_cfg; u32 pci_clock_ctrl; struct pci_dev *pdev_peer; @@ -2271,7 +2292,8 @@ struct tg3 { (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \ - (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM8002) + (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ + (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM8002) struct tg3_hw_stats *hw_stats; dma_addr_t stats_mapping; diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 6299e186c73f..e3dd144d326b 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -1327,11 +1327,11 @@ static void de_clean_rings (struct de_private *de) struct sk_buff *skb = de->tx_skb[i].skb; if ((skb) && (skb != DE_DUMMY_SKB)) { if (skb != DE_SETUP_SKB) { - dev_kfree_skb(skb); de->net_stats.tx_dropped++; pci_unmap_single(de->pdev, de->tx_skb[i].mapping, skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); } else { pci_unmap_single(de->pdev, de->tx_skb[i].mapping, diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index db2c798ba89e..175ba13bce41 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -1495,8 +1495,7 @@ module_param(skip_pci_probe, bool, 0); MODULE_LICENSE("GPL"); -int -init_module( void ) +int __init init_module( void ) { struct net_device *dev; int err; diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 5b0a19a5058d..6a1033ec06cf 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -25,6 +25,15 @@ config NET_RADIO the tools from <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. +config NET_WIRELESS_RTNETLINK + bool "Wireless Extension API over RtNetlink" + ---help--- + Support the Wireless Extension API over the RtNetlink socket + in addition to the traditional ioctl interface (selected above). + + For now, few tools use this facility, but it might grow in the + future. The only downside is that it adds 4.5 kB to your kernel. + # Note : the cards are obsolete (can't buy them anymore), but the drivers # are not, as people are still using them... comment "Obsolete Wireless cards support (pre-802.11)" diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 864937a409e5..108d9fed8f07 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -770,6 +770,11 @@ typedef struct { } BSSListRid; typedef struct { + BSSListRid bss; + struct list_head list; +} BSSListElement; + +typedef struct { u8 rssipct; u8 rssidBm; } tdsRssiEntry; @@ -902,6 +907,7 @@ static char swversion[] = "2.1"; #define NUM_MODULES 2 #define MIC_MSGLEN_MAX 2400 #define EMMH32_MSGLEN_MAX MIC_MSGLEN_MAX +#define AIRO_DEF_MTU 2312 typedef struct { u32 size; // size @@ -1119,6 +1125,8 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi); static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm); +static void airo_networks_free(struct airo_info *ai); + struct airo_info { struct net_device_stats stats; struct net_device *dev; @@ -1150,7 +1158,7 @@ struct airo_info { #define FLAG_COMMIT 13 #define FLAG_RESET 14 #define FLAG_FLASHING 15 -#define JOB_MASK 0x1ff0000 +#define JOB_MASK 0x2ff0000 #define JOB_DIE 16 #define JOB_XMIT 17 #define JOB_XMIT11 18 @@ -1160,6 +1168,7 @@ struct airo_info { #define JOB_EVENT 22 #define JOB_AUTOWEP 23 #define JOB_WSTATS 24 +#define JOB_SCAN_RESULTS 25 int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen, int whichbap); unsigned short *flash; @@ -1176,7 +1185,7 @@ struct airo_info { } xmit, xmit11; struct net_device *wifidev; struct iw_statistics wstats; // wireless stats - unsigned long scan_timestamp; /* Time started to scan */ + unsigned long scan_timeout; /* Time scan should be read */ struct iw_spy_data spy_data; struct iw_public_data wireless_data; /* MIC stuff */ @@ -1198,6 +1207,10 @@ struct airo_info { APListRid *APList; #define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE char proc_name[IFNAMSIZ]; + + struct list_head network_list; + struct list_head network_free_list; + BSSListElement *networks; }; static inline int bap_read(struct airo_info *ai, u16 *pu16Dst, int bytelen, @@ -1216,6 +1229,22 @@ static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime); static int flashputbuf(struct airo_info *ai); static int flashrestart(struct airo_info *ai,struct net_device *dev); +#define airo_print(type, name, fmt, args...) \ + { printk(type "airo(%s): " fmt "\n", name, ##args); } + +#define airo_print_info(name, fmt, args...) \ + airo_print(KERN_INFO, name, fmt, ##args) + +#define airo_print_dbg(name, fmt, args...) \ + airo_print(KERN_DEBUG, name, fmt, ##args) + +#define airo_print_warn(name, fmt, args...) \ + airo_print(KERN_WARNING, name, fmt, ##args) + +#define airo_print_err(name, fmt, args...) \ + airo_print(KERN_ERR, name, fmt, ##args) + + /*********************************************************************** * MIC ROUTINES * *********************************************************************** @@ -1294,7 +1323,7 @@ static int micsetup(struct airo_info *ai) { ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP); if (ai->tfm == NULL) { - printk(KERN_ERR "airo: failed to load transform for AES\n"); + airo_print_err(ai->dev->name, "failed to load transform for AES"); return ERROR; } @@ -1726,11 +1755,11 @@ static int writeWepKeyRid(struct airo_info*ai, WepKeyRid *pwkr, int perm, int lo wkr.kindex = cpu_to_le16(wkr.kindex); wkr.klen = cpu_to_le16(wkr.klen); rc = PC4500_writerid(ai, RID_WEP_TEMP, &wkr, sizeof(wkr), lock); - if (rc!=SUCCESS) printk(KERN_ERR "airo: WEP_TEMP set %x\n", rc); + if (rc!=SUCCESS) airo_print_err(ai->dev->name, "WEP_TEMP set %x", rc); if (perm) { rc = PC4500_writerid(ai, RID_WEP_PERM, &wkr, sizeof(wkr), lock); if (rc!=SUCCESS) { - printk(KERN_ERR "airo: WEP_PERM set %x\n", rc); + airo_print_err(ai->dev->name, "WEP_PERM set %x", rc); } } return rc; @@ -1909,7 +1938,7 @@ static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct airo_info *ai = dev->priv; if (!skb) { - printk(KERN_ERR "airo: %s: skb==NULL\n",__FUNCTION__); + airo_print_err(dev->name, "%s: skb == NULL!",__FUNCTION__); return 0; } npacks = skb_queue_len (&ai->txq); @@ -1955,8 +1984,8 @@ static int mpi_send_packet (struct net_device *dev) /* get a packet to send */ if ((skb = skb_dequeue(&ai->txq)) == 0) { - printk (KERN_ERR - "airo: %s: Dequeue'd zero in send_packet()\n", + airo_print_err(dev->name, + "%s: Dequeue'd zero in send_packet()", __FUNCTION__); return 0; } @@ -2108,7 +2137,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) { u32 *fids = priv->fids; if ( skb == NULL ) { - printk( KERN_ERR "airo: skb == NULL!!!\n" ); + airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__); return 0; } @@ -2179,7 +2208,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) { } if ( skb == NULL ) { - printk( KERN_ERR "airo: skb == NULL!!!\n" ); + airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__); return 0; } @@ -2364,6 +2393,8 @@ void stop_airo_card( struct net_device *dev, int freeres ) dev_kfree_skb(skb); } + airo_networks_free (ai); + kfree(ai->flash); kfree(ai->rssi); kfree(ai->APList); @@ -2434,7 +2465,7 @@ static int mpi_init_descriptors (struct airo_info *ai) cmd.parm2 = MPI_MAX_FIDS; rc=issuecommand(ai, &cmd, &rsp); if (rc != SUCCESS) { - printk(KERN_ERR "airo: Couldn't allocate RX FID\n"); + airo_print_err(ai->dev->name, "Couldn't allocate RX FID"); return rc; } @@ -2462,7 +2493,7 @@ static int mpi_init_descriptors (struct airo_info *ai) rc=issuecommand(ai, &cmd, &rsp); if (rc != SUCCESS) { - printk(KERN_ERR "airo: Couldn't allocate TX FID\n"); + airo_print_err(ai->dev->name, "Couldn't allocate TX FID"); return rc; } @@ -2476,7 +2507,7 @@ static int mpi_init_descriptors (struct airo_info *ai) cmd.parm2 = 1; /* Magic number... */ rc=issuecommand(ai, &cmd, &rsp); if (rc != SUCCESS) { - printk(KERN_ERR "airo: Couldn't allocate RID\n"); + airo_print_err(ai->dev->name, "Couldn't allocate RID"); return rc; } @@ -2508,25 +2539,25 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, aux_len = AUXMEMSIZE; if (!request_mem_region(mem_start, mem_len, name)) { - printk(KERN_ERR "airo: Couldn't get region %x[%x] for %s\n", + airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s", (int)mem_start, (int)mem_len, name); goto out; } if (!request_mem_region(aux_start, aux_len, name)) { - printk(KERN_ERR "airo: Couldn't get region %x[%x] for %s\n", + airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s", (int)aux_start, (int)aux_len, name); goto free_region1; } ai->pcimem = ioremap(mem_start, mem_len); if (!ai->pcimem) { - printk(KERN_ERR "airo: Couldn't map region %x[%x] for %s\n", + airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s", (int)mem_start, (int)mem_len, name); goto free_region2; } ai->pciaux = ioremap(aux_start, aux_len); if (!ai->pciaux) { - printk(KERN_ERR "airo: Couldn't map region %x[%x] for %s\n", + airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s", (int)aux_start, (int)aux_len, name); goto free_memmap; } @@ -2534,7 +2565,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, /* Reserve PKTSIZE for each fid and 2K for the Rids */ ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma); if (!ai->shared) { - printk(KERN_ERR "airo: Couldn't alloc_consistent %d\n", + airo_print_err(ai->dev->name, "Couldn't alloc_consistent %d", PCI_SHARED_LEN); goto free_auxmap; } @@ -2626,7 +2657,7 @@ static void wifi_setup(struct net_device *dev) dev->type = ARPHRD_IEEE80211; dev->hard_header_len = ETH_HLEN; - dev->mtu = 2312; + dev->mtu = AIRO_DEF_MTU; dev->addr_len = ETH_ALEN; dev->tx_queue_len = 100; @@ -2670,6 +2701,42 @@ static int reset_card( struct net_device *dev , int lock) { return 0; } +#define MAX_NETWORK_COUNT 64 +static int airo_networks_allocate(struct airo_info *ai) +{ + if (ai->networks) + return 0; + + ai->networks = + kzalloc(MAX_NETWORK_COUNT * sizeof(BSSListElement), + GFP_KERNEL); + if (!ai->networks) { + airo_print_warn(ai->dev->name, "Out of memory allocating beacons"); + return -ENOMEM; + } + + return 0; +} + +static void airo_networks_free(struct airo_info *ai) +{ + if (!ai->networks) + return; + kfree(ai->networks); + ai->networks = NULL; +} + +static void airo_networks_initialize(struct airo_info *ai) +{ + int i; + + INIT_LIST_HEAD(&ai->network_free_list); + INIT_LIST_HEAD(&ai->network_list); + for (i = 0; i < MAX_NETWORK_COUNT; i++) + list_add_tail(&ai->networks[i].list, + &ai->network_free_list); +} + static struct net_device *_init_airo_card( unsigned short irq, int port, int is_pcmcia, struct pci_dev *pci, struct device *dmdev ) @@ -2681,22 +2748,22 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, /* Create the network device object. */ dev = alloc_etherdev(sizeof(*ai)); if (!dev) { - printk(KERN_ERR "airo: Couldn't alloc_etherdev\n"); + airo_print_err("", "Couldn't alloc_etherdev"); return NULL; } if (dev_alloc_name(dev, dev->name) < 0) { - printk(KERN_ERR "airo: Couldn't get name!\n"); + airo_print_err("", "Couldn't get name!"); goto err_out_free; } ai = dev->priv; ai->wifidev = NULL; ai->flags = 0; + ai->dev = dev; if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) { - printk(KERN_DEBUG "airo: Found an MPI350 card\n"); + airo_print_dbg(dev->name, "Found an MPI350 card"); set_bit(FLAG_MPI, &ai->flags); } - ai->dev = dev; spin_lock_init(&ai->aux_lock); sema_init(&ai->sem, 1); ai->config.len = 0; @@ -2711,6 +2778,10 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, if (rc) goto err_out_thr; + if (airo_networks_allocate (ai)) + goto err_out_unlink; + airo_networks_initialize (ai); + /* The Airo-specific entries in the device structure. */ if (test_bit(FLAG_MPI,&ai->flags)) { skb_queue_head_init (&ai->txq); @@ -2732,33 +2803,33 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, SET_NETDEV_DEV(dev, dmdev); - reset_card (dev, 1); msleep(400); rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ, dev->name, dev ); if (rc) { - printk(KERN_ERR "airo: register interrupt %d failed, rc %d\n", irq, rc ); + airo_print_err(dev->name, "register interrupt %d failed, rc %d", + irq, rc); goto err_out_unlink; } if (!is_pcmcia) { if (!request_region( dev->base_addr, 64, dev->name )) { rc = -EBUSY; - printk(KERN_ERR "airo: Couldn't request region\n"); + airo_print_err(dev->name, "Couldn't request region"); goto err_out_irq; } } if (test_bit(FLAG_MPI,&ai->flags)) { if (mpi_map_card(ai, pci, dev->name)) { - printk(KERN_ERR "airo: Could not map memory\n"); + airo_print_err(dev->name, "Could not map memory"); goto err_out_res; } } if (probe) { if ( setup_card( ai, dev->dev_addr, 1 ) != SUCCESS ) { - printk( KERN_ERR "airo: MAC could not be enabled\n" ); + airo_print_err(dev->name, "MAC could not be enabled" ); rc = -EIO; goto err_out_map; } @@ -2769,21 +2840,20 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, rc = register_netdev(dev); if (rc) { - printk(KERN_ERR "airo: Couldn't register_netdev\n"); + airo_print_err(dev->name, "Couldn't register_netdev"); goto err_out_map; } ai->wifidev = init_wifidev(ai, dev); set_bit(FLAG_REGISTERED,&ai->flags); - printk( KERN_INFO "airo: MAC enabled %s %x:%x:%x:%x:%x:%x\n", - dev->name, + airo_print_info(dev->name, "MAC enabled %x:%x:%x:%x:%x:%x", dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] ); /* Allocate the transmit buffers */ if (probe && !test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) - ai->fids[i] = transmit_allocate(ai,2312,i>=MAX_FIDS/2); + ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2); setup_proc_entry( dev, dev->priv ); /* XXX check for failure */ netif_start_queue(dev); @@ -2840,16 +2910,16 @@ int reset_airo_card( struct net_device *dev ) return -1; if ( setup_card(ai, dev->dev_addr, 1 ) != SUCCESS ) { - printk( KERN_ERR "airo: MAC could not be enabled\n" ); + airo_print_err(dev->name, "MAC could not be enabled"); return -1; } - printk( KERN_INFO "airo: MAC enabled %s %x:%x:%x:%x:%x:%x\n", dev->name, + airo_print_info(dev->name, "MAC enabled %x:%x:%x:%x:%x:%x", dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); /* Allocate the transmit buffers if needed */ if (!test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) - ai->fids[i] = transmit_allocate (ai,2312,i>=MAX_FIDS/2); + ai->fids[i] = transmit_allocate (ai,AIRO_DEF_MTU,i>=MAX_FIDS/2); enable_interrupts( ai ); netif_wake_queue(dev); @@ -2875,6 +2945,65 @@ static void airo_send_event(struct net_device *dev) { wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); } +static void airo_process_scan_results (struct airo_info *ai) { + union iwreq_data wrqu; + BSSListRid BSSList; + int rc; + BSSListElement * loop_net; + BSSListElement * tmp_net; + + /* Blow away current list of scan results */ + list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) { + list_move_tail (&loop_net->list, &ai->network_free_list); + /* Don't blow away ->list, just BSS data */ + memset (loop_net, 0, sizeof (loop_net->bss)); + } + + /* Try to read the first entry of the scan result */ + rc = PC4500_readrid(ai, RID_BSSLISTFIRST, &BSSList, sizeof(BSSList), 0); + if((rc) || (BSSList.index == 0xffff)) { + /* No scan results */ + goto out; + } + + /* Read and parse all entries */ + tmp_net = NULL; + while((!rc) && (BSSList.index != 0xffff)) { + /* Grab a network off the free list */ + if (!list_empty(&ai->network_free_list)) { + tmp_net = list_entry(ai->network_free_list.next, + BSSListElement, list); + list_del(ai->network_free_list.next); + } + + if (tmp_net != NULL) { + memcpy(tmp_net, &BSSList, sizeof(tmp_net->bss)); + list_add_tail(&tmp_net->list, &ai->network_list); + tmp_net = NULL; + } + + /* Read next entry */ + rc = PC4500_readrid(ai, RID_BSSLISTNEXT, + &BSSList, sizeof(BSSList), 0); + } + +out: + ai->scan_timeout = 0; + clear_bit(JOB_SCAN_RESULTS, &ai->flags); + up(&ai->sem); + + /* Send an empty event to user space. + * We don't send the received data on + * the event because it would require + * us to do complex transcoding, and + * we want to minimise the work done in + * the irq handler. Use a request to + * extract the data - Jean II */ + wrqu.data.length = 0; + wrqu.data.flags = 0; + wireless_send_event(ai->dev, SIOCGIWSCAN, &wrqu, NULL); +} + static int airo_thread(void *data) { struct net_device *dev = data; struct airo_info *ai = dev->priv; @@ -2904,13 +3033,26 @@ static int airo_thread(void *data) { set_current_state(TASK_INTERRUPTIBLE); if (ai->flags & JOB_MASK) break; - if (ai->expires) { - if (time_after_eq(jiffies,ai->expires)){ + if (ai->expires || ai->scan_timeout) { + if (ai->scan_timeout && + time_after_eq(jiffies,ai->scan_timeout)){ + set_bit(JOB_SCAN_RESULTS,&ai->flags); + break; + } else if (ai->expires && + time_after_eq(jiffies,ai->expires)){ set_bit(JOB_AUTOWEP,&ai->flags); break; } if (!signal_pending(current)) { - schedule_timeout(ai->expires - jiffies); + unsigned long wake_at; + if (!ai->expires || !ai->scan_timeout) { + wake_at = max(ai->expires, + ai->scan_timeout); + } else { + wake_at = min(ai->expires, + ai->scan_timeout); + } + schedule_timeout(wake_at - jiffies); continue; } } else if (!signal_pending(current)) { @@ -2953,6 +3095,10 @@ static int airo_thread(void *data) { airo_send_event(dev); else if (test_bit(JOB_AUTOWEP, &ai->flags)) timer_func(dev); + else if (test_bit(JOB_SCAN_RESULTS, &ai->flags)) + airo_process_scan_results(ai); + else /* Shouldn't get here, but we make sure to unlock */ + up(&ai->sem); } complete_and_exit (&ai->thr_exited, 0); } @@ -3047,19 +3193,15 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) * and reassociations as valid status * Jean II */ if(newStatus == ASSOCIATED) { - if (apriv->scan_timestamp) { - /* Send an empty event to user space. - * We don't send the received data on - * the event because it would require - * us to do complex transcoding, and - * we want to minimise the work done in - * the irq handler. Use a request to - * extract the data - Jean II */ - wrqu.data.length = 0; - wrqu.data.flags = 0; - wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); - apriv->scan_timestamp = 0; +#if 0 + /* FIXME: Grabbing scan results here + * seems to be too early??? Just wait for + * timeout instead. */ + if (apriv->scan_timeout > 0) { + set_bit(JOB_SCAN_RESULTS, &apriv->flags); + wake_up_interruptible(&apriv->thr_wait); } +#endif if (down_trylock(&apriv->sem) != 0) { set_bit(JOB_EVENT, &apriv->flags); wake_up_interruptible(&apriv->thr_wait); @@ -3117,8 +3259,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) } len = le16_to_cpu(hdr.len); - if (len > 2312) { - printk( KERN_ERR "airo: Bad size %d\n", len ); + if (len > AIRO_DEF_MTU) { + airo_print_err(apriv->dev->name, "Bad size %d", len); goto badrx; } if (len == 0) @@ -3161,10 +3303,12 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) bap_read (apriv, &gap, sizeof(gap), BAP0); gap = le16_to_cpu(gap); if (gap) { - if (gap <= 8) + if (gap <= 8) { bap_read (apriv, tmpbuf, gap, BAP0); - else - printk(KERN_ERR "airo: gaplen too big. Problems will follow...\n"); + } else { + airo_print_err(apriv->dev->name, "gaplen too " + "big. Problems will follow..."); + } } bap_read (apriv, buffer + hdrlen/2, len, BAP0); } else { @@ -3281,12 +3425,13 @@ exitrx: } } else { OUT4500( apriv, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC)); - printk( KERN_ERR "airo: Unallocated FID was used to xmit\n" ); + airo_print_err(apriv->dev->name, "Unallocated FID was " + "used to xmit" ); } } exittx: if ( status & ~STATUS_INTS & ~IGNORE_INTS ) - printk( KERN_WARNING "airo: Got weird status %x\n", + airo_print_warn(apriv->dev->name, "Got weird status %x", status & ~STATUS_INTS & ~IGNORE_INTS ); } @@ -3359,8 +3504,8 @@ static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) { up(&ai->sem); if (rc) - printk(KERN_ERR "%s: Cannot enable MAC, err=%d\n", - __FUNCTION__,rc); + airo_print_err(ai->dev->name, "%s: Cannot enable MAC, err=%d", + __FUNCTION__, rc); return rc; } @@ -3489,8 +3634,8 @@ void mpi_receive_802_11 (struct airo_info *ai) if (ai->wifidev == NULL) hdr.len = 0; len = le16_to_cpu(hdr.len); - if (len > 2312) { - printk( KERN_ERR "airo: Bad size %d\n", len ); + if (len > AIRO_DEF_MTU) { + airo_print_err(ai->dev->name, "Bad size %d", len); goto badrx; } if (len == 0) @@ -3531,8 +3676,8 @@ void mpi_receive_802_11 (struct airo_info *ai) if (gap <= 8) ptr += gap; else - printk(KERN_ERR - "airo: gaplen too big. Problems will follow...\n"); + airo_print_err(ai->dev->name, + "gaplen too big. Problems will follow..."); } memcpy ((char *)buffer + hdrlen, ptr, len); ptr += len; @@ -3604,15 +3749,15 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) if (issuecommand(ai, &cmd, &rsp) != SUCCESS) { if (lock) up(&ai->sem); - printk(KERN_ERR "airo: Error checking for AUX port\n"); + airo_print_err(ai->dev->name, "Error checking for AUX port"); return ERROR; } if (!aux_bap || rsp.status & 0xff00) { ai->bap_read = fast_bap_read; - printk(KERN_DEBUG "airo: Doing fast bap_reads\n"); + airo_print_dbg(ai->dev->name, "Doing fast bap_reads"); } else { ai->bap_read = aux_bap_read; - printk(KERN_DEBUG "airo: Doing AUX bap_reads\n"); + airo_print_dbg(ai->dev->name, "Doing AUX bap_reads"); } } if (lock) @@ -3643,7 +3788,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) if (cap_rid.softCap & 8) ai->config.rmode |= RXMODE_NORMALIZED_RSSI; else - printk(KERN_WARNING "airo: unknown received signal level scale\n"); + airo_print_warn(ai->dev->name, "unknown received signal " + "level scale"); } ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS; ai->config.authType = AUTH_OPEN; @@ -3706,7 +3852,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) status = enable_MAC(ai, &rsp, lock); if ( status != SUCCESS || (rsp.status & 0xFF00) != 0) { - printk( KERN_ERR "airo: Bad MAC enable reason = %x, rid = %x, offset = %d\n", rsp.rsp0, rsp.rsp1, rsp.rsp2 ); + airo_print_err(ai->dev->name, "Bad MAC enable reason = %x, rid = %x," + " offset = %d", rsp.rsp0, rsp.rsp1, rsp.rsp2 ); return ERROR; } @@ -3749,8 +3896,8 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) { } if ( max_tries == -1 ) { - printk( KERN_ERR - "airo: Max tries exceeded when issueing command\n" ); + airo_print_err(ai->dev->name, + "Max tries exceeded when issueing command"); if (IN4500(ai, COMMAND) & COMMAND_BUSY) OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); return ERROR; @@ -3762,11 +3909,11 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) { pRsp->rsp1 = IN4500(ai, RESP1); pRsp->rsp2 = IN4500(ai, RESP2); if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) { - printk (KERN_ERR "airo: cmd= %x\n", pCmd->cmd); - printk (KERN_ERR "airo: status= %x\n", pRsp->status); - printk (KERN_ERR "airo: Rsp0= %x\n", pRsp->rsp0); - printk (KERN_ERR "airo: Rsp1= %x\n", pRsp->rsp1); - printk (KERN_ERR "airo: Rsp2= %x\n", pRsp->rsp2); + airo_print_err(ai->dev->name, "cmd= %x\n", pCmd->cmd); + airo_print_err(ai->dev->name, "status= %x\n", pRsp->status); + airo_print_err(ai->dev->name, "Rsp0= %x\n", pRsp->rsp0); + airo_print_err(ai->dev->name, "Rsp1= %x\n", pRsp->rsp1); + airo_print_err(ai->dev->name, "Rsp2= %x\n", pRsp->rsp2); } // clear stuck command busy if necessary @@ -3799,15 +3946,15 @@ static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap ) } } else if ( status & BAP_ERR ) { /* invalid rid or offset */ - printk( KERN_ERR "airo: BAP error %x %d\n", + airo_print_err(ai->dev->name, "BAP error %x %d", status, whichbap ); return ERROR; } else if (status & BAP_DONE) { // success return SUCCESS; } if ( !(max_tries--) ) { - printk( KERN_ERR - "airo: BAP setup error too many retries\n" ); + airo_print_err(ai->dev->name, + "airo: BAP setup error too many retries\n"); return ERROR; } // -- PC4500 missed it, try again @@ -3962,8 +4109,8 @@ static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, in len = min(len, (int)le16_to_cpu(*(u16*)pBuf)) - 2; if ( len <= 2 ) { - printk( KERN_ERR - "airo: Rid %x has a length of %d which is too short\n", + airo_print_err(ai->dev->name, + "Rid %x has a length of %d which is too short", (int)rid, (int)len ); rc = ERROR; goto done; @@ -3996,8 +4143,8 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid, Resp rsp; if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid)) - printk(KERN_ERR - "%s: MAC should be disabled (rid=%04x)\n", + airo_print_err(ai->dev->name, + "%s: MAC should be disabled (rid=%04x)", __FUNCTION__, rid); memset(&cmd, 0, sizeof(cmd)); memset(&rsp, 0, sizeof(rsp)); @@ -4013,7 +4160,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid, &ai->config_desc.rid_desc, sizeof(Rid)); if (len < 4 || len > 2047) { - printk(KERN_ERR "%s: len=%d\n",__FUNCTION__,len); + airo_print_err(ai->dev->name, "%s: len=%d", __FUNCTION__, len); rc = -1; } else { memcpy((char *)ai->config_desc.virtual_host_addr, @@ -4021,10 +4168,10 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid, rc = issuecommand(ai, &cmd, &rsp); if ((rc & 0xff00) != 0) { - printk(KERN_ERR "%s: Write rid Error %d\n", - __FUNCTION__,rc); - printk(KERN_ERR "%s: Cmd=%04x\n", - __FUNCTION__,cmd.cmd); + airo_print_err(ai->dev->name, "%s: Write rid Error %d", + __FUNCTION__, rc); + airo_print_err(ai->dev->name, "%s: Cmd=%04x", + __FUNCTION__, cmd.cmd); } if ((rsp.status & 0x7f00)) @@ -4123,7 +4270,7 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket) len >>= 16; if (len <= ETH_ALEN * 2) { - printk( KERN_WARNING "Short packet %d\n", len ); + airo_print_warn(ai->dev->name, "Short packet %d", len); return ERROR; } len -= ETH_ALEN * 2; @@ -4187,7 +4334,7 @@ static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket) } if (len < hdrlen) { - printk( KERN_WARNING "Short packet %d\n", len ); + airo_print_warn(ai->dev->name, "Short packet %d", len); return ERROR; } @@ -4584,15 +4731,14 @@ static int proc_stats_rid_open( struct inode *inode, i*4<stats.len; i++){ if (!statsLabels[i]) continue; if (j+strlen(statsLabels[i])+16>4096) { - printk(KERN_WARNING - "airo: Potentially disasterous buffer overflow averted!\n"); + airo_print_warn(apriv->dev->name, + "Potentially disasterous buffer overflow averted!"); break; } j+=sprintf(data->rbuffer+j, "%s: %u\n", statsLabels[i], vals[i]); } if (i*4>=stats.len){ - printk(KERN_WARNING - "airo: Got a short rid\n"); + airo_print_warn(apriv->dev->name, "Got a short rid"); } data->readlen = j; return 0; @@ -4754,7 +4900,7 @@ static void proc_config_on_close( struct inode *inode, struct file *file ) { line += 14; v = get_dec_u16(line, &i, 4); - v = (v<0) ? 0 : ((v>2312) ? 2312 : v); + v = (v<0) ? 0 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v); ai->config.rtsThres = (u16)v; set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) { @@ -4788,7 +4934,7 @@ static void proc_config_on_close( struct inode *inode, struct file *file ) { line += 15; v = get_dec_u16(line, &i, 4); - v = (v<256) ? 256 : ((v>2312) ? 2312 : v); + v = (v<256) ? 256 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v); v = v & 0xfffe; /* Make sure its even */ ai->config.fragThresh = (u16)v; set_bit (FLAG_COMMIT, &ai->flags); @@ -4798,8 +4944,7 @@ static void proc_config_on_close( struct inode *inode, struct file *file ) { case 'd': ai->config.modulation=MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break; case 'c': ai->config.modulation=MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break; case 'm': ai->config.modulation=MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break; - default: - printk( KERN_WARNING "airo: Unknown modulation\n" ); + default: airo_print_warn(ai->dev->name, "Unknown modulation"); } } else if (!strncmp(line, "Preamble: ", 10)) { line += 10; @@ -4807,10 +4952,10 @@ static void proc_config_on_close( struct inode *inode, struct file *file ) { case 'a': ai->config.preamble=PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break; case 'l': ai->config.preamble=PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break; case 's': ai->config.preamble=PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break; - default: printk(KERN_WARNING "airo: Unknown preamble\n"); + default: airo_print_warn(ai->dev->name, "Unknown preamble"); } } else { - printk( KERN_WARNING "Couldn't figure out %s\n", line ); + airo_print_warn(ai->dev->name, "Couldn't figure out %s", line); } while( line[0] && line[0] != '\n' ) line++; if ( line[0] ) line++; @@ -5076,7 +5221,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) { } j = 2; } else { - printk(KERN_ERR "airo: WepKey passed invalid key index\n"); + airo_print_err(ai->dev->name, "WepKey passed invalid key index"); return; } @@ -5489,17 +5634,16 @@ static int __init airo_init_module( void ) airo_entry->gid = proc_gid; for( i = 0; i < 4 && io[i] && irq[i]; i++ ) { - printk( KERN_INFO - "airo: Trying to configure ISA adapter at irq=%d io=0x%x\n", - irq[i], io[i] ); + airo_print_info("", "Trying to configure ISA adapter at irq=%d " + "io=0x%x", irq[i], io[i] ); if (init_airo_card( irq[i], io[i], 0, NULL )) have_isa_dev = 1; } #ifdef CONFIG_PCI - printk( KERN_INFO "airo: Probing for PCI adapters\n" ); + airo_print_info("", "Probing for PCI adapters"); pci_register_driver(&airo_driver); - printk( KERN_INFO "airo: Finished probing for PCI adapters\n" ); + airo_print_info("", "Finished probing for PCI adapters"); #endif /* Always exit with success, as we are a library module @@ -5511,7 +5655,7 @@ static int __init airo_init_module( void ) static void __exit airo_cleanup_module( void ) { while( airo_devices ) { - printk( KERN_INFO "airo: Unregistering %s\n", airo_devices->dev->name ); + airo_print_info(airo_devices->dev->name, "Unregistering...\n"); stop_airo_card( airo_devices->dev, 1 ); } #ifdef CONFIG_PCI @@ -5622,7 +5766,8 @@ static int airo_set_freq(struct net_device *dev, /* We should do a better check than that, * based on the card capability !!! */ if((channel < 1) || (channel > 14)) { - printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m); + airo_print_dbg(dev->name, "New channel value of %d is invalid!", + fwrq->m); rc = -EINVAL; } else { readConfigRid(local, 1); @@ -5946,8 +6091,8 @@ static int airo_set_rts(struct net_device *dev, int rthr = vwrq->value; if(vwrq->disabled) - rthr = 2312; - if((rthr < 0) || (rthr > 2312)) { + rthr = AIRO_DEF_MTU; + if((rthr < 0) || (rthr > AIRO_DEF_MTU)) { return -EINVAL; } readConfigRid(local, 1); @@ -5970,7 +6115,7 @@ static int airo_get_rts(struct net_device *dev, readConfigRid(local, 1); vwrq->value = local->config.rtsThres; - vwrq->disabled = (vwrq->value >= 2312); + vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU); vwrq->fixed = 1; return 0; @@ -5989,8 +6134,8 @@ static int airo_set_frag(struct net_device *dev, int fthr = vwrq->value; if(vwrq->disabled) - fthr = 2312; - if((fthr < 256) || (fthr > 2312)) { + fthr = AIRO_DEF_MTU; + if((fthr < 256) || (fthr > AIRO_DEF_MTU)) { return -EINVAL; } fthr &= ~0x1; /* Get an even value - is it really needed ??? */ @@ -6014,7 +6159,7 @@ static int airo_get_frag(struct net_device *dev, readConfigRid(local, 1); vwrq->value = local->config.fragThresh; - vwrq->disabled = (vwrq->value >= 2312); + vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU); vwrq->fixed = 1; return 0; @@ -6709,9 +6854,9 @@ static int airo_get_range(struct net_device *dev, range->throughput = 1500 * 1000; range->min_rts = 0; - range->max_rts = 2312; + range->max_rts = AIRO_DEF_MTU; range->min_frag = 256; - range->max_frag = 2312; + range->max_frag = AIRO_DEF_MTU; if(cap_rid.softCap & 2) { // WEP: RC4 40 bits @@ -6972,6 +7117,7 @@ static int airo_set_scan(struct net_device *dev, struct airo_info *ai = dev->priv; Cmd cmd; Resp rsp; + int wake = 0; /* Note : you may have realised that, as this is a SET operation, * this is privileged and therefore a normal user can't @@ -6981,17 +7127,25 @@ static int airo_set_scan(struct net_device *dev, * Jean II */ if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; + if (down_interruptible(&ai->sem)) + return -ERESTARTSYS; + + /* If there's already a scan in progress, don't + * trigger another one. */ + if (ai->scan_timeout > 0) + goto out; + /* Initiate a scan command */ memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LISTBSS; - if (down_interruptible(&ai->sem)) - return -ERESTARTSYS; issuecommand(ai, &cmd, &rsp); - ai->scan_timestamp = jiffies; - up(&ai->sem); - - /* At this point, just return to the user. */ + ai->scan_timeout = RUN_AT(3*HZ); + wake = 1; +out: + up(&ai->sem); + if (wake) + wake_up_interruptible(&ai->thr_wait); return 0; } @@ -7111,59 +7265,38 @@ static int airo_get_scan(struct net_device *dev, char *extra) { struct airo_info *ai = dev->priv; - BSSListRid BSSList; - int rc; + BSSListElement *net; + int err = 0; char *current_ev = extra; - /* When we are associated again, the scan has surely finished. - * Just in case, let's make sure enough time has elapsed since - * we started the scan. - Javier */ - if(ai->scan_timestamp && time_before(jiffies,ai->scan_timestamp+3*HZ)) { - /* Important note : we don't want to block the caller - * until results are ready for various reasons. - * First, managing wait queues is complex and racy - * (there may be multiple simultaneous callers). - * Second, we grab some rtnetlink lock before comming - * here (in dev_ioctl()). - * Third, the caller can wait on the Wireless Event - * - Jean II */ + /* If a scan is in-progress, return -EAGAIN */ + if (ai->scan_timeout > 0) return -EAGAIN; - } - ai->scan_timestamp = 0; - /* There's only a race with proc_BSSList_open(), but its - * consequences are begnign. So I don't bother fixing it - Javier */ - - /* Try to read the first entry of the scan result */ - rc = PC4500_readrid(ai, RID_BSSLISTFIRST, &BSSList, sizeof(BSSList), 1); - if((rc) || (BSSList.index == 0xffff)) { - /* Client error, no scan results... - * The caller need to restart the scan. */ - return -ENODATA; - } + if (down_interruptible(&ai->sem)) + return -EAGAIN; - /* Read and parse all entries */ - while((!rc) && (BSSList.index != 0xffff)) { + list_for_each_entry (net, &ai->network_list, list) { /* Translate to WE format this entry */ current_ev = airo_translate_scan(dev, current_ev, extra + dwrq->length, - &BSSList); + &net->bss); /* Check if there is space for one more entry */ if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) { /* Ask user space to try again with a bigger buffer */ - return -E2BIG; + err = -E2BIG; + goto out; } - - /* Read next entry */ - rc = PC4500_readrid(ai, RID_BSSLISTNEXT, - &BSSList, sizeof(BSSList), 1); } + /* Length of data */ dwrq->length = (current_ev - extra); dwrq->flags = 0; /* todo */ - return 0; +out: + up(&ai->sem); + return err; } /*------------------------------------------------------------------*/ @@ -7711,7 +7844,7 @@ static int cmdreset(struct airo_info *ai) { disable_MAC(ai, 1); if(!waitbusy (ai)){ - printk(KERN_INFO "Waitbusy hang before RESET\n"); + airo_print_info(ai->dev->name, "Waitbusy hang before RESET"); return -EBUSY; } @@ -7720,7 +7853,7 @@ static int cmdreset(struct airo_info *ai) { ssleep(1); /* WAS 600 12/7/00 */ if(!waitbusy (ai)){ - printk(KERN_INFO "Waitbusy hang AFTER RESET\n"); + airo_print_info(ai->dev->name, "Waitbusy hang AFTER RESET"); return -EBUSY; } return 0; @@ -7748,7 +7881,7 @@ static int setflashmode (struct airo_info *ai) { if(!waitbusy(ai)) { clear_bit (FLAG_FLASHING, &ai->flags); - printk(KERN_INFO "Waitbusy hang after setflash mode\n"); + airo_print_info(ai->dev->name, "Waitbusy hang after setflash mode"); return -EIO; } return 0; @@ -7777,7 +7910,7 @@ static int flashpchar(struct airo_info *ai,int byte,int dwelltime) { /* timeout for busy clear wait */ if(waittime <= 0 ){ - printk(KERN_INFO "flash putchar busywait timeout! \n"); + airo_print_info(ai->dev->name, "flash putchar busywait timeout!"); return -EBUSY; } @@ -7866,7 +7999,7 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev){ if (!test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) { ai->fids[i] = transmit_allocate - ( ai, 2312, i >= MAX_FIDS / 2 ); + ( ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2 ); } ssleep(1); /* Added 12/7/00 */ diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index 753a1de6664b..06c3fa32b310 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -3141,7 +3141,7 @@ int hostap_add_sta(struct ap_data *ap, u8 *sta_addr) if (ret == 1) { sta = ap_add_sta(ap, sta_addr); if (!sta) - ret = -1; + return -1; sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; sta->ap = 1; memset(sta->supported_rates, 0, sizeof(sta->supported_rates)); diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index f8f4503475f9..d335b250923a 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -585,8 +585,6 @@ static int prism2_config(dev_link_t *link) parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL); hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL); if (parse == NULL || hw_priv == NULL) { - kfree(parse); - kfree(hw_priv); ret = -ENOMEM; goto failed; } diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index b1f142d9e232..328e9a1d13b5 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -928,15 +928,15 @@ static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len) res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL); up(&local->rid_bap_sem); + if (res) { printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE " "failed (res=%d, rid=%04x, len=%d)\n", dev->name, res, rid, len); - return res; - } - if (res == -ETIMEDOUT) - prism2_hw_reset(dev); + if (res == -ETIMEDOUT) + prism2_hw_reset(dev); + } return res; } diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index f3e0ce1ee037..8b37e824dfcb 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c @@ -3358,10 +3358,6 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev, if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { if (!sta_ptr) local->tx_keyidx = i; - else if (i) { - ret = -EINVAL; - goto done; - } } diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c index 2e85bdced2dd..194f07097581 100644 --- a/drivers/net/wireless/hostap/hostap_pci.c +++ b/drivers/net/wireless/hostap/hostap_pci.c @@ -307,7 +307,7 @@ static int prism2_pci_probe(struct pci_dev *pdev, memset(hw_priv, 0, sizeof(*hw_priv)); if (pci_enable_device(pdev)) - return -EIO; + goto err_out_free; phymem = pci_resource_start(pdev, 0); @@ -368,6 +368,8 @@ static int prism2_pci_probe(struct pci_dev *pdev, err_out_disable: pci_disable_device(pdev); prism2_free_local_data(dev); + + err_out_free: kfree(hw_priv); return -ENODEV; diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c index 94fe2449f099..edaaa943eb8f 100644 --- a/drivers/net/wireless/hostap/hostap_plx.c +++ b/drivers/net/wireless/hostap/hostap_plx.c @@ -368,7 +368,7 @@ static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len, switch (cis[pos]) { case CISTPL_CONFIG: - if (cis[pos + 1] < 1) + if (cis[pos + 1] < 2) goto cis_error; rmsz = (cis[pos + 2] & 0x3c) >> 2; rasz = cis[pos + 2] & 0x03; @@ -390,7 +390,7 @@ static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len, break; case CISTPL_MANFID: - if (cis[pos + 1] < 4) + if (cis[pos + 1] < 5) goto cis_error; manfid1 = cis[pos + 2] + (cis[pos + 3] << 8); manfid2 = cis[pos + 4] + (cis[pos + 5] << 8); @@ -452,7 +452,7 @@ static int prism2_plx_probe(struct pci_dev *pdev, memset(hw_priv, 0, sizeof(*hw_priv)); if (pci_enable_device(pdev)) - return -EIO; + goto err_out_free; /* National Datacomm NCP130 based on TMD7160, not PLX9052. */ tmd7160 = (pdev->vendor == 0x15e8) && (pdev->device == 0x0131); @@ -567,9 +567,6 @@ static int prism2_plx_probe(struct pci_dev *pdev, return hostap_hw_ready(dev); fail: - prism2_free_local_data(dev); - kfree(hw_priv); - if (irq_registered && dev) free_irq(dev->irq, dev); @@ -577,6 +574,10 @@ static int prism2_plx_probe(struct pci_dev *pdev, iounmap(attr_mem); pci_disable_device(pdev); + prism2_free_local_data(dev); + + err_out_free: + kfree(hw_priv); return -ENODEV; } |